metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnnoone/aiovault",
"score": 2
} |
#### File: aiovault/aiovault/client.py
```python
from . import v1
from .request import Request
from .util import task, extract_id
class Vault(v1.SysEndpoint):
def __init__(self, addr, token=None, cert=None, verify=True):
token = extract_id(token)
self.req_handler = Request(addr, 'v1', token=token,
cert=cert, verify=verify)
@property
def audit(self):
return v1.AuditEndpoint(self.req_handler)
@property
def auth(self):
return v1.AuthEndpoint(self.req_handler)
@property
def lease(self):
return v1.LeaseEndpoint(self.req_handler)
@property
def policy(self):
return v1.PolicyEndpoint(self.req_handler)
@property
def raw(self):
return v1.RawEndpoint(self.req_handler)
@property
def seal(self):
return v1.SealEndpoint(self.req_handler)
@property
def secret(self):
return v1.SecretEndpoint(self.req_handler)
@task
def login(self, *args, **kwargs):
return self.auth.login(*args, **kwargs)
@task
def read(self, path, **kwargs):
method = kwargs.pop('method', 'GET')
response = yield from self.req_handler(method, path, **kwargs)
return response
@task
def write(self, path, **kwargs):
method = kwargs.pop('method', 'POST')
response = yield from self.req_handler(method, path, **kwargs)
return response
@task
def delete(self, path, **kwargs):
method = kwargs.pop('method', 'DELETE')
response = yield from self.req_handler(method, path, **kwargs)
return response
def __repr__(self):
return '<Vault(addr=%r)>' % self.req_handler.addr
```
#### File: aiovault/aiovault/policy.py
```python
from collections.abc import MutableMapping
class Rules(MutableMapping):
def __init__(self, *, name, rules=None):
"""
Parameters:
name (str): The policy name
rules (obj): List of :ref:`Rule` or a dict.
"""
self.name = name
self.rules = {}
if isinstance(rules, dict):
self.rules.update(rules)
elif isinstance(rules, (list, set)):
for rule in rules:
self.__setitem__(*rule)
elif isinstance(rules, tuple):
self.__setitem__(*rules)
def __getitem__(self, path):
return self.rules[path]
def __setitem__(self, path, policy):
if isinstance(policy, str):
policy = {
'policy': policy
}
self.rules[path] = policy
def __delitem__(self, path):
del self.rules[path]
def __iter__(self):
return iter(self.rules)
def __len__(self):
return len(self.rules)
def __eq__(self, other):
if isinstance(other, Rules):
other == other.rules
return self.rules == other
def __repr__(self):
return '<Rules(name=%r, rules=%r)>' % (self.name, self.rules)
```
#### File: aiovault/aiovault/request.py
```python
import asyncio
import json
import os.path
import ssl
from .exceptions import BadToken, DownError, HTTPError, InvalidRequest
from .exceptions import InvalidPath, InternalServerError, RateLimitExceeded
from .exceptions import Unauthorized
from aiohttp import ClientSession, TCPConnector
class Request:
def __init__(self, addr, version, token=None, cert=None, verify=True):
self.addr = addr
self.version = version
self._token = token
cookies = {}
if self._token:
cookies.setdefault('token', self._token)
connector, context, ca = None, None, None
if verify:
if isinstance(verify, str):
verify, ca = True, verify
else:
verify = False
if addr.startswith('https://') or cert:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
if cert:
certfile, keyfile = cert
context.load_cert_chain(certfile, keyfile)
if ca:
context.verify_mode = ssl.CERT_REQUIRED
if os.path.isdir(ca):
context.load_verify_locations(capath=ca)
else:
context.load_verify_locations(cafile=ca)
else:
context.verify_mode = ssl.CERT_NONE
if verify:
connector = TCPConnector(verify_ssl=True, ssl_context=context)
else:
connector = TCPConnector(verify_ssl=False)
self.session = ClientSession(cookies=cookies, connector=connector)
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
self.session._update_cookies({'token': value})
@asyncio.coroutine
def request(self, method, path, **kwargs):
url = '%s/%s%s' % (self.addr, self.version, path)
for field in ('params', 'data', 'json'):
if field in kwargs and isinstance(kwargs[field], dict):
kwargs[field] = no_null(kwargs[field])
data = kwargs.pop('json', None)
if data is not None:
kwargs['data'] = json.dumps(data)
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
response = yield from self.session.request(method, url, **kwargs)
if response.status in (200, 204):
return response
if response.headers['Content-Type'] == 'application/json':
data = yield from response.json()
else:
data = yield from response.text()
if response.status == 400:
raise InvalidRequest(data)
if response.status == 401:
raise Unauthorized(data)
if response.status == 403:
raise BadToken(data)
if response.status == 404:
raise InvalidPath(data)
if response.status == 429:
raise RateLimitExceeded(data)
if response.status == 500:
raise InternalServerError(data)
if response.status == 503:
raise DownError(data)
raise HTTPError(data, response.status)
__call__ = request
def no_null(data):
return {k: v for k, v in data.items() if v is not None}
```
#### File: aiovault/aiovault/util.py
```python
import asyncio
import inspect
import os.path
import re
from base64 import b64decode, b64encode
from datetime import timedelta
from functools import partial, wraps
__all__ = ['convert_duration', 'format_duration', 'format_policies', 'task']
def base64_decode(data):
"""Decode a Base64 encodedstring"""
return b64decode(data.encode('utf-8')).decode('utf-8')
def base64_encode(data):
"""Encode a string using Base64"""
return b64encode(data.encode('utf-8')).decode('utf-8')
def format_duration(obj):
"""Converts obj to consul duration"""
if obj is None:
return None
if isinstance(obj, str):
return obj
if isinstance(obj, int):
return '%ss' % obj
if isinstance(obj, timedelta):
return '%ss' % int(obj.total_seconds())
raise ValueError('wrong type %r' % obj)
def convert_duration(obj):
"""Parse an api duration to timedelta"""
if isinstance(obj, str):
matches = re.match('''
((?P<hours>\d+)h)?
((?P<minutes>\d+)m)?
((?P<seconds>\d+)s)?
''', obj, re.X)
if matches:
h = int(matches.group('hours') or 0)
m = int(matches.group('minutes') or 0)
s = int(matches.group('seconds') or 0)
return timedelta(hours=h, minutes=m, seconds=s)
if isinstance(obj, int):
return timedelta(seconds=obj)
return obj
def format_policies(obj):
if isinstance(obj, (list, set, tuple)):
obj = ','.join(str(element) for element in obj)
elif obj:
obj = str(obj)
return obj
def task(func=None, *, loop=None):
"""Transforms func into an asyncio task."""
if not func:
if not loop:
raise ValueError('loop is required')
return partial(task, loop=loop)
if getattr(func, '_is_task', False):
return func
coro = asyncio.coroutine(func)
if inspect.ismethod(func):
@wraps(func)
def wrapper(self, *arg, **kwargs):
l = loop or self.loop
return asyncio.async(coro(self, *arg, **kwargs), loop=l)
else:
@wraps(func)
def wrapper(*arg, **kwargs):
return asyncio.async(coro(*arg, **kwargs), loop=loop)
wrapper._is_task = True
return wrapper
def mark_task(func):
"""Mark function as a defacto task (for documenting purpose)"""
func._is_task = True
return func
class lazy_property:
"""
meant to be used for lazy evaluation of an object attribute.
property should represent non-mutable data, as it replaces itself.
"""
def __init__(self, fget):
self.fget = fget
self.func_name = fget.__name__
self.__name__ = fget.__name__
self.__doc__ = fget.__doc__
def __get__(self, obj, cls):
if obj:
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
return self
def ok(response):
response.close()
return response.status == 204
try:
# python >= 3.4
from contextlib import suppress
except ImportError:
class suppress:
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions) # noqa
class Path(str):
"""Poorsman path maker.
"""
def __call__(self, *args):
obj = os.path.join(self, *args)
return Path(obj)
def extract_id(obj):
return getattr(obj, 'id', obj)
def extract_name(obj):
return getattr(obj, 'name', obj)
```
#### File: auth/backends/bases.py
```python
from abc import ABCMeta, abstractmethod
from aiovault.util import ok, task, Path
class AuthBackend(metaclass=ABCMeta):
def __init__(self, name, type, req_handler):
self.name = name
self.type = type
self.req_handler = req_handler
@property
def path(self):
return Path('/auth/%s' % self.name)
@task
@abstractmethod
def login(self):
"""Performs login
"""
@task
def enable(self, description=None):
"""Enable backend
Parameters:
description (str): A human-friendly description of the auth backend
Returns:
bool
"""
method = 'POST'
path = '/sys/auth/%s' % self.name
data = {'type': self.type,
'description': description}
response = yield from self.req_handler(method, path, json=data)
return ok(response)
@task
def disable(self):
"""Disable backend
Returns:
bool
"""
method = 'DELETE'
path = '/sys/auth/%s' % self.name
response = yield from self.req_handler(method, path)
return ok(response)
def __repr__(self):
return '<%s(name=%r)>' % (self.__class__.__name__, self.name)
```
#### File: v1/auth/__init__.py
```python
from .backends import load_backend
from collections.abc import Mapping
from aiovault.exceptions import BadToken, InvalidPath
from aiovault.token import ReadToken, LoginToken
from aiovault.util import extract_name, extract_id
from aiovault.util import ok, task, Path, format_duration
__all__ = ['authenticate', 'AuthEndpoint', 'AuthCollection']
class AuthEndpoint:
def __init__(self, req_handler):
self.req_handler = req_handler
@property
def path(self):
return Path('/sys/auth')
@property
def token_path(self):
return Path('/auth/token')
@task
def items(self):
"""Lists all the enabled auth backends
Returns:
AuthCollection
"""
method = 'GET'
path = self.path
response = yield from self.req_handler(method, path)
result = yield from response.json()
return AuthCollection(result, self.req_handler)
def load(self, name, *, type=None):
"""Returns auth backend
Parameters:
name (str): The auth backend name
Returns
AuthBackend
"""
type = type or getattr(name, 'type', name)
name = extract_name(name)
return load_backend(type, {
'name': name,
'type': type,
'req_handler': self.req_handler
})
@task
def login(self, name, *, type=None, **credentials):
"""Login
Parameters:
name (str): The name of mount
type (str): The name of the auth backend type, such as ``github``
credentials (str): Login credentials
Returns
AuthBackend
"""
backend = self.load(name, type=type)
try:
token = yield from backend.login(**credentials)
return token
except AttributeError:
return NotImplemented
@task
def enable(self, name, *, type=None, description=None):
"""Enable and load a new auth backend
Parameters:
name (str): The name of mount
type (str): The name of the auth backend type, such as ``github``
description (str): A human-friendly description of the auth backend
Returns
AuthBackend
"""
backend = self.load(name, type=type)
enabled = yield from backend.enable(description)
if enabled:
return backend
@task
def disable(self, name):
"""Disable the auth backend at the given mount point
Parameters:
name (str): The name of mount
"""
method = 'DELETE'
path = self.path(name)
response = yield from self.req_handler(method, path)
return ok(response)
@task
def create(self, *, id=None, policies=None, metadata=None, no_parent=None,
lease=None, display_name=None, num_uses=None):
"""Creates a new token.
Certain options are only available to when called by a root token.
Parameters:
id (str): The ID of the client token. Can only be specified by a
root token. Otherwise, the token ID is a randomly
generated UUID.
policies (list): A list of policies for the token. This must be a
subset of the policies belonging to the token
making the request, unless root. If not specified,
defaults to all the policies of the calling token.
metadata (dict): A map of string to string valued metadata.
This is passed through to the audit backends.
no_parent (bool): If true and set by a root caller, the token will
not have the parent token of the caller. This
creates a token with no parent.
lease (str): The lease period of the token, provided as "1h", where
hour is the largest suffix. If not provided, the token
is valid indefinitely.
display_name (str): The display name of the token. Defaults to
"token".
num_uses (int): The maximum uses for the given token. This can be
used to create a one-time-token or limited use
token. Defaults to no limit.
Returns:
LoginToken: The client token
"""
method = 'POST'
path = self.token_path('create')
data = {'id': id,
'policies': policies,
'metadata': metadata,
'no_parent': no_parent,
'lease': format_duration(lease),
'display_name': display_name,
'num_uses': num_uses}
response = yield from self.req_handler(method, path, json=data)
result = yield from response.json()
return LoginToken(**result)
@task
def lookup_self(self):
"""Returns information about the current client token.
Returns:
ReadToken: The current client token
"""
method = 'GET'
path = self.token_path('lookup-self')
response = yield from self.req_handler(method, path)
result = yield from response.json()
return ReadToken(**result)
@task
def lookup(self, token):
"""Returns information about a client token.
Parameters:
token (str): The token ID
Returns:
ReadToken: The client token
"""
token = extract_id(token)
method = 'GET'
path = self.token_path('lookup', token)
try:
response = yield from self.req_handler(method, path)
result = yield from response.json()
return ReadToken(**result)
except (InvalidPath, BadToken):
raise KeyError('%r does not exists' % token)
@task
def revoke(self, token):
"""Revokes a token and all child tokens.
When the token is revoked, all secrets generated with it are also
revoked.
Parameters:
token (str): The token ID
"""
token = extract_id(token)
method = 'POST'
path = self.token_path('revoke', token)
response = yield from self.req_handler(method, path)
result = yield from response.json()
return result
@task
def revoke_orphan(self, token):
"""Revokes a token but not its child tokens.
When the token is revoked, all secrets generated with it are also
revoked. All child tokens are orphaned, but can be revoked
sub-sequently using :py:meth:`revoke`.
Parameters:
token (str): The token ID
"""
token = extract_id(token)
method = 'POST'
path = self.token_path('revoke-orphan', token)
response = yield from self.req_handler(method, path)
result = yield from response.json()
return result
@task
def revoke_prefix(self, prefix):
"""Revokes all tokens generated at a given prefix, along with child
tokens, and all secrets generated using those tokens. Uses include
revoking all tokens generated by a credential backend during a
suspected compromise.
Parameters:
token (str): The token ID
"""
method = 'POST'
path = self.token_path('revoke-prefix', prefix)
response = yield from self.req_handler(method, path)
return ok(response)
@task
def renew(self, token, increment=None):
"""Renews a lease associated with a token.
This is used to prevent the expiration of a token, and the automatic
revocation of it.
Parameters:
token (str): The token ID
increment (int): An optional requested lease increment can be
provided. This increment may be ignored.
Returns:
LoginToken: The client token
"""
token = extract_id(token)
method = 'POST'
path = self.token_path('renew', token)
data = {'increment': increment}
response = yield from self.req_handler(method, path, json=data)
result = yield from response.json()
return LoginToken(**result)
class AuthCollection(Mapping):
def __init__(self, backends, req_handler):
self.backends = backends
self.req_handler = req_handler
def __getitem__(self, name):
path = '%s/' % name
return load_backend(self.backends[path]['type'], {
'name': name,
'type': self.backends[path]['type'],
'req_handler': self.req_handler
})
def __iter__(self):
for key in self.backends.keys():
yield key[:-1]
def __len__(self):
return len(self.backends)
def __repr__(self):
data = tuple(self.backends.keys())
return '<AuthCollection{!r}>'.format(data)
```
#### File: aiovault/v1/lease.py
```python
from aiovault.objects import Value
from aiovault.util import format_duration, ok, task
class LeaseEndpoint:
def __init__(self, req_handler):
self.req_handler = req_handler
@task
def renew(self, lease_id, increment=None):
"""Renew a secret, requesting to extend the lease.
Parameters:
lease_id (str): The lease id
increment (int): A requested amount of time in seconds
to extend the lease. This is advisory.
Returns:
Value
"""
method = 'PUT'
path = '/sys/renew/%s' % lease_id
data = {'increment': format_duration(increment)}
response = yield from self.req_handler(method, path, data=data)
result = yield from response.json()
return Value(**result)
@task
def revoke(self, lease_id):
"""Revoke a secret immediately.
Parameters:
lease_id (str): The lease id
Returns:
bool
"""
method = 'PUT'
path = '/sys/revoke/%s' % lease_id
response = yield from self.req_handler(method, path)
return ok(response)
@task
def revoke_prefix(self, path_prefix):
"""Revoke all secrets generated under a given prefix immediately.
Parameters:
path_prefix (str): The path prefix
Returns:
bool
"""
method = 'PUT'
path = '/sys/revoke-prefix/%s' % path_prefix
response = yield from self.req_handler(method, path)
return ok(response)
```
#### File: secret/backends/consul.py
```python
from .bases import SecretBackend
from aiovault.exceptions import InvalidPath, InvalidRequest
from aiovault.objects import Value
from aiovault.util import base64_encode, ok, task, format_duration
class ConsulBackend(SecretBackend):
@task
def config_access(self, address, token):
"""Configures the access information for Consul.
This is a root protected endpoint.
Parameters:
address (str): The address of the Consul instance,
provided as scheme://host:port
token (str): The Consul ACL token to use.
Must be a management type token.
Results:
bool
"""
method = 'POST'
path = self.path('config/access')
scheme = None
if address.startswith('https://'):
scheme, address = 'https', address[8:]
elif address.startswith('http://'):
scheme, address = 'http', address[7:]
data = {'address': address,
'token': token,
'scheme': scheme}
response = yield from self.req_handler(method, path, json=data)
return ok(response)
@task
def read_role(self, name):
"""Queries a Consul role definition.
Parameters:
name (str): The role name
Results:
Value
"""
method = 'GET'
path = self.path('roles', name)
try:
response = yield from self.req_handler(method, path)
result = yield from response.json()
return Value(**result)
except (InvalidPath, InvalidRequest):
raise KeyError('%r does not exists' % name)
@task
def write_role(self, name, *, policy, lease=None):
"""Creates or updates the Consul role definition.
Parameters:
name (str): The role name
policy (str): The Consul ACL policy.
Returns:
bool
"""
method = 'POST'
path = self.path('roles', name)
data = {'policy': base64_encode(policy),
'lease': format_duration(lease)}
response = yield from self.req_handler(method, path, json=data)
return ok(response)
@task
def delete_role(self, name):
"""Deletes a Consul role definition.
Parameters:
name (str): The role name
Returns:
bool
"""
method = 'DELETE'
path = self.path('roles', name)
response = yield from self.req_handler(method, path)
return ok(response)
@task
def creds(self, name):
"""Generates a dynamic Consul token based on the role definition.
Parameters:
name (str): The role name
Results:
Value
"""
method = 'GET'
path = self.path('creds', name)
response = yield from self.req_handler(method, path)
result = yield from response.json()
return Value(**result)
```
#### File: aiovault/tests/test_aws.py
```python
from aiovault import Vault
from conftest import async_test
import pytest
AWS_POLICY = '''{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1426528957000",
"Effect": "Allow",
"Action": [
"noop:noop"
],
"Resource": [
"*"
]
}
]
}'''
@async_test
def test_basic(dev_server, env):
try:
access_key = env.AWS_ACCESS_KEY_ID
secret_key = env.AWS_SECRET_ACCESS_KEY
region = env.AWS_DEFAULT_REGION
except AttributeError:
return 'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY \
or AWS_DEFAULT_REGION missing'
client = Vault(dev_server.addr, token=dev_server.root_token)
mounted, backend = yield from client.secret.mount('aws')
configured = yield from backend.config_root(access_key=access_key,
secret_key=secret_key,
region=region)
assert configured
configured = yield from backend.config_lease(lease='1m',
lease_max='1m')
assert configured
configured = yield from backend.write_role('foo', policy=AWS_POLICY)
assert configured
data = yield from backend.creds('foo')
assert 'access_key' in data
assert 'secret_key' in data
# TODO destroy the new account with boto
@async_test
def test_crud(dev_server, env):
try:
access_key = env.AWS_ACCESS_KEY_ID
secret_key = env.AWS_SECRET_ACCESS_KEY
region = env.AWS_DEFAULT_REGION
except AttributeError:
return 'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY \
or AWS_DEFAULT_REGION missing'
client = Vault(dev_server.addr, token=dev_server.root_token)
mounted, backend = yield from client.secret.mount('aws')
configured = yield from backend.config_root(access_key=access_key,
secret_key=secret_key,
region=region)
assert configured
configured = yield from backend.config_lease(lease='1m', lease_max='1m')
assert configured
configured = yield from backend.write_role('test', policy=AWS_POLICY)
assert configured
role = yield from backend.read_role('test')
assert 'policy' in role
deleted = yield from backend.delete_role('test')
assert deleted
with pytest.raises(KeyError):
yield from backend.read_role('test')
```
#### File: aiovault/tests/test_cert.py
```python
from aiovault import Vault
from conftest import async_test
import os.path
import pytest
HERE = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.xfail
@async_test
def test_cert(server, env):
crt_file = os.path.join(HERE, 'certs', 'client.crt')
csr_file = os.path.join(HERE, 'certs', 'client.csr')
key_file = os.path.join(HERE, 'certs', 'client.key')
ca_path = os.path.join(HERE, 'certs')
client = Vault(server.addr, cert=[server.csr, server.key])
state = yield from client.initialize(secret_shares=5, secret_threshold=3)
yield from client.seal.unseal(state.keys)
yield from client.audit.enable('file', path='/tmp/aiovault.log')
backend = yield from client.auth.enable('cert')
assert backend.__repr__() == "<CertBackend(name='cert')>"
with open(csr_file) as file:
written = yield from backend.write_cert('foo',
certificate=file.read(),
policies=['pierre', 'pol'])
assert written
data = yield from backend.read_cert('foo')
assert 'pierre' in data['policies']
# TODO does not work with Vault v0.1.2
# return
client = Vault(server.addr, cert=[crt_file, key_file], verify=crt_file)
backend = client.auth.load('cert')
res = yield from backend.login()
print(res)
```
#### File: aiovault/tests/test_consul.py
```python
from aiovault import Vault
from conftest import async_test
from datetime import timedelta
import pytest
CONSUL_POLICY = """
key "" {
policy = "write"
}
"""
@async_test
def test_basic(dev_server, consul):
client = Vault(dev_server.addr, token=dev_server.root_token)
mounted, backend = yield from client.secret.mount('consul')
assert mounted
store = client.secret.load('consul')
configured = yield from store.config_access(address=consul.address,
token=consul.acl_master_token)
assert configured
configured = yield from store.write_role('foo', policy=CONSUL_POLICY)
assert configured
data = yield from store.creds('foo')
assert 'token' in data
@async_test
def test_crud(dev_server, consul):
client = Vault(dev_server.addr, token=dev_server.root_token)
mounted, backend = yield from client.secret.mount('consul')
assert mounted
store = client.secret.load('consul')
configured = yield from store.config_access(address=consul.address,
token=consul.acl_master_token)
assert configured
configured = yield from store.write_role('foo', policy=CONSUL_POLICY)
assert configured
role = yield from store.read_role('foo')
assert 'policy' in role
deleted = yield from store.delete_role('foo')
assert deleted
with pytest.raises(KeyError):
yield from store.read_role('foo')
@async_test
def test_lease(dev_server, consul):
client = Vault(dev_server.addr, token=dev_server.root_token)
mounted, backend = yield from client.secret.mount('consul')
assert mounted
store = client.secret.load('consul')
configured = yield from store.config_access(address=consul.address,
token=consul.acl_master_token)
assert configured
configured = yield from store.write_role('foo',
policy=CONSUL_POLICY,
lease='6h')
assert configured
role = yield from store.read_role('foo')
assert role['lease'] == timedelta(hours=6)
deleted = yield from store.delete_role('foo')
assert deleted
with pytest.raises(KeyError):
yield from store.read_role('foo')
```
#### File: aiovault/tests/test_ldap.py
```python
from aiovault import Vault
from conftest import async_test
import pytest
@async_test
def test_ldap(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
backend = yield from client.auth.enable('ldap')
configured = yield from backend.configure(url='ldap://ldap.forumsys.com',
userattr='uid',
userdn='dc=example,dc=com',
groupdn='dc=example,dc=com')
assert configured
writen = yield from backend.write_group(name='scientists', policies='foo')
assert writen
token = yield from backend.login(username='tesla', password='password')
assert token['metadata']['username'] == 'tesla'
@async_test
def test_ldap_crud(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
backend = yield from client.auth.enable('ldap')
configured = yield from backend.configure(url='ldap://ldap.forumsys.com',
userattr='uid',
userdn='dc=example,dc=com',
groupdn='dc=example,dc=com')
assert configured
writen = yield from backend.write_group(name='g1', policies='foo')
assert writen
data = yield from backend.read_group(name='g1')
assert data['policies'] == {'foo'}
deleted = yield from backend.delete_group(name='g1')
assert deleted
with pytest.raises(KeyError):
yield from backend.read_group(name='g1')
```
#### File: aiovault/tests/test_lease.py
```python
from aiovault import Vault
from conftest import async_test
@async_test
def test_renew(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
response = yield from client.write('/secret/foo', json={
'data': 'bar', 'lease': '1h'
})
assert response.status == 204
response = yield from client.read('/secret/foo')
result = yield from response.json()
renewed = yield from client.lease.renew(result['lease_id'])
assert renewed
@async_test
def test_revoke(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
revoked = yield from client.lease.revoke('foo/1234')
assert revoked
@async_test
def test_revoke_prefix(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
revoked = yield from client.lease.revoke_prefix('foo/1234')
assert revoked
```
#### File: aiovault/tests/test_seal.py
```python
from aiovault import Vault
from conftest import async_test
@async_test
def test_initial_status(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
response = yield from client.seal.status()
assert response.sealed == dev_server.sealed
assert response.threshold == dev_server.threshold
assert response.shares == dev_server.shares
assert response.progress == dev_server.progress
@async_test
def test_seal(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
status = yield from client.seal.status()
assert status.sealed is False
sealed = yield from client.seal.seal()
assert sealed is True
status = yield from client.seal.status()
assert status.sealed is True
status = yield from client.seal.unseal(dev_server.unseal_key)
assert status.sealed is False
```
#### File: aiovault/tests/tls.py
```python
import configparser
import ipaddress
import json
import os
import os.path
import subprocess
from collections import OrderedDict
here = os.path.dirname(os.path.abspath(__file__))
def generate_keys(directory, crt, csr, key, names):
# 1. generate configuration file
env = os.environ.copy()
config = configparser.ConfigParser()
config.optionxform = str
config['req'] = {
'default_bits': '1024',
'distinguished_name': 'req_distinguished_name',
'req_extensions': 'v3_req',
'prompt': 'no',
}
config['req_distinguished_name'] = {
'C': 'GB',
'ST': 'Test State or Province',
'L': 'Test Locality',
'O': 'AIOVault Name',
'OU': 'AIOVault Testing',
'CN': 'AIOVault',
'emailAddress': '<EMAIL>',
}
config['v3_req'] = OrderedDict([
('basicConstraints', 'CA:TRUE'),
('keyUsage', 'nonRepudiation,digitalSignature,keyEncipherment'),
('subjectAltName', '@alt_names'),
])
dns, ip = set(), set()
for name in names:
try:
ipaddress.IPv4Address(name)
ip.add(name)
except ipaddress.AddressValueError:
try:
ipaddress.IPv6Address(name)
ip.add(name)
except ipaddress.AddressValueError:
dns.add(name)
config['alt_names'] = OrderedDict([])
for i, name in enumerate(sorted(dns), start=1):
config['alt_names']['DNS.%s' % i] = name
for i, name in enumerate(sorted(ip), start=1):
config['alt_names']['IP.%s' % i] = name
config_filename = os.path.join(directory, 'openssl.ini')
with open(config_filename, 'w') as file:
config.write(file)
env['OPENSSL_CONF'] = config_filename
# 2. generate keys
key_filename = os.path.join(directory, key)
csr_filename = os.path.join(directory, csr)
crt_filename = os.path.join(directory, crt)
args = [
'openssl', 'req',
'-x509',
'-nodes',
'-days', '365',
'-newkey', 'rsa:2048',
'-keyout', key_filename,
'-out', csr_filename,
'-config', config_filename
]
proc = subprocess.Popen(args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=directory)
stdout, stderr = proc.communicate()
if not proc.returncode:
print('created:', key_filename)
print('created:', csr_filename)
else:
raise Exception(stderr)
args = [
'openssl', 'x509',
'-in', csr_filename,
'-extfile', config_filename,
'-extensions', 'v3_req',
'-signkey', key_filename,
'-out', crt_filename
]
proc = subprocess.Popen(args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=directory)
stdout, stderr = proc.communicate()
if not proc.returncode:
print('created:', crt_filename)
else:
raise Exception(stderr.decode('utf-8'))
def generate_config(directory, config, crt, key):
data = {
'backend': {
'inmem': {}
},
'listener': {
'tcp': {
'address': '127.0.0.1:8200',
'tls_cert_file': crt,
'tls_key_file': key
}
}
}
filename = os.path.join(directory, config)
with open(filename, 'w') as file:
file.write(json.dumps(data, indent=2))
def run_server(directory, config):
args = ['vault', 'server', '-config', config]
proc = subprocess.Popen(args, cwd=directory)
print(proc)
def handle_keys(args, parser):
crt = '%s.crt' % args.name
csr = '%s.csr' % args.name
key = '%s.key' % args.name
names = args.names or ['127.0.0.1', 'example.com']
generate_keys(args.directory, crt, csr, key, names)
def handle_config(args, parser):
config = 'server.json'
crt = 'server.crt'
key = 'server.key'
generate_config(args.directory, config, crt, key)
def handle_server(args, parser):
config = 'server.json'
run_server(args.directory, config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('generate tls and server config')
parser.add_argument('--directory', default='certs')
parser.add_argument('--name', default='server')
subparsers = parser.add_subparsers(title='commands')
parser_a = subparsers.add_parser('tls', help='generate keys')
parser_a.set_defaults(handle=handle_keys)
parser_a.add_argument('names', nargs='*', help='dns and IP SANs')
parser_b = subparsers.add_parser('configuration', help='generate config')
parser_b.set_defaults(handle=handle_config)
parser_c = subparsers.add_parser('run', help='run server')
parser_c.set_defaults(handle=handle_server)
try:
args = parser.parse_args()
args.directory = os.path.join(here, args.directory)
os.makedirs(args.directory, exist_ok=True)
args.handle(args, parser)
except AttributeError:
parser.print_help()
``` |
{
"source": "johnnoone/cooperate",
"score": 3
} |
#### File: cooperate/cooperate/concurrency.py
```python
import math
__all__ = ['Concurrency']
class Concurrency:
def __init__(self, *, size=None, part=None):
if size and part:
raise ValueError('size and part are mutually exclusive')
self.size = size
self.part = part
def batch(self, collection):
if self.size:
return self.size
if self.part:
return math.ceil(len(collection) / 100 * self.part)
return len(collection)
def __repr__(self):
if self.size:
return '<Concurrency(size=%r)>' % self.size
if self.part:
return '<Concurrency(part=%r)>' % self.part
return '<Concurrency>'
```
#### File: cooperate/cooperate/__main__.py
```python
import asyncio
import asyncio.subprocess
import signal
import functools
from .cli import get_parser
from .renderers import StatusRenderer # noqa
from aioutils import Group, Pool
def broadcast(args):
loop = asyncio.get_event_loop()
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
functools.partial(ask_exit, loop, signame))
env = dict(args.env or [])
renderer = StatusRenderer()
def printer(future, node, command):
data = renderer.render(future, node, command)
print(data)
nodes = args.nodes or []
jobs = args.mode(nodes, args.commands)
if args.batch:
pooler = Pool(args.batch.batch(jobs))
else:
pooler = Group()
for node, command in jobs:
task = pooler.spawn(node.run(command, env=env))
render = functools.partial(printer, node=node, command=command)
task.add_done_callback(render)
pooler.join()
loop.close()
def ask_exit(loop, signame):
print("# got signal %s: exit" % signame)
loop.stop()
def run():
parser, ns, remains = get_parser()
args = parser.parse_args(remains, namespace=ns)
broadcast(args)
if __name__ == '__main__':
run()
``` |
{
"source": "johnnoone/django-admincommand",
"score": 4
} |
#### File: django-admincommand/admincommand/utils.py
```python
def generate_instance_name(name):
out = name[0].lower()
for char in name[1:]:
if char.isupper():
out += "_%s" % char.lower()
else:
out += char
return out
def generate_human_name(name):
out = name[0]
for char in name[1:]:
if char.isupper():
out += " %s" % char.lower()
else:
out += char
return out
```
#### File: management/commands/pi.py
```python
from django.core.management.base import BaseCommand
def arccot(x, unity):
sum = xpower = unity // x
n = 3
sign = -1
while 1:
xpower = xpower // (x * x)
term = xpower // n
if not term:
break
sum += sign * term
sign = -sign
n += 2
return sum
def pi(digits):
unity = 10 ** (digits + 10)
pi = 4 * (4 * arccot(5, unity) - arccot(239, unity))
return pi // 10 ** 10
class Command(BaseCommand):
help = "Compute pi number"
def add_arguments(self, parser):
parser.add_argument("argument")
def handle(self, *args, **options):
arg = options["argument"]
r = str(pi(int(arg)))
r = r[0] + "." + r[1:]
self.stdout.write("pi(%s) = %s" % (arg, r))
``` |
{
"source": "johnnoone/django-rayures",
"score": 2
} |
#### File: johnnoone/django-rayures/conftest.py
```python
import django
import os
import pytest
from django.conf import settings
@pytest.fixture
def publishable_key(settings):
return settings.STRIPE_PUBLISHABLE_KEY
@pytest.fixture
def endpoint_secret(settings):
return settings.STRIPE_ENDPOINT_SECRET
def pytest_configure():
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rayures',
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dev',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': '127.0.0.1',
'PORT': '5433',
}
},
SITE_ID=1,
ROOT_URLCONF="rayures.urls",
SECRET_KEY="notasecret",
STRIPE_API_KEY=os.getenv("STRIPE_SECRET_KEY", "DUMMY"),
STRIPE_PUBLISHABLE_KEY=os.getenv("STRIPE_PUBLISHABLE_KEY", "DUMMY_PUBLISHABLE"),
STRIPE_ENDPOINT_SECRET=os.getenv("STRIPE_ENDPOINT_SECRET", "DUMMY")
)
settings.configure(**DEFAULT_SETTINGS)
django.setup()
@pytest.fixture
def vcr_config():
return {
"filter_headers": [("authorization", "DUMMY")],
}
```
#### File: src/rayures/fields.py
```python
from .utils import price_from_stripe, dt_from_stripe
from django.db import models
from django.db.models.expressions import Col
from django.db.models.lookups import IExact
from django.db.models.fields.related_lookups import RelatedLookupMixin
from django.db.models.signals import post_init
from django.contrib.postgres.fields import JSONField as JSONFieldBase
__all__ = ['IntegerField']
class DatetimeProxy:
def __init__(self, source, field_name):
self.source = source
self.path = source.split('.')
self.field_name = field_name
def __get__(self, obj, type=None):
if obj is None:
return obj
try:
value = obj.data
for p in self.path:
value = value.get(p, MISSING)
if value is MISSING:
return
except AttributeError:
return
if value is not None:
return dt_from_stripe(value)
class CharProxy:
def __init__(self, source, field_name):
self.source = source
self.path = source.split('.')
self.field_name = field_name
def __get__(self, obj, type=None):
if obj is None:
return obj
try:
value = obj.data
for p in self.path:
value = value.get(p, MISSING)
if value is MISSING:
return
except AttributeError:
return
if value is not None:
return str(value)
class IntegerProxy:
def __init__(self, source, field_name):
self.source = source
self.path = source.split('.')
self.field_name = field_name
def __get__(self, obj, type=None):
if obj is None:
return obj
try:
value = obj.data
for p in self.path:
value = value.get(p, MISSING)
if value is MISSING:
return
except AttributeError:
return
if value is not None:
return int(value)
class PriceProxy:
def __init__(self, source, field_name):
self.source = source
self.path = source.split('.')
self.currency_path = self.path[:-1] + ['currency']
self.field_name = field_name
def __get__(self, obj, type=None):
if obj is None:
return obj
try:
value = obj.data
for p in self.path:
value = value.get(p, MISSING)
if value is MISSING:
return
currency = obj.data
for p in self.currency_path:
currency = currency.get(p, MISSING)
if currency is MISSING:
return
except AttributeError:
return
if value is not None:
# TODO: convert the value to local
return price_from_stripe(value, currency)
class BooleanProxy:
def __init__(self, source, field_name):
self.source = source
self.path = source.split('.')
self.field_name = field_name
def __get__(self, obj, type=None):
if obj is None:
return obj
try:
value = obj.data
for p in self.path:
value = value.get(p, MISSING)
if value is MISSING:
return
except AttributeError:
return
if value is not None:
return bool(value)
class HashProxy:
def __init__(self, source, field_name):
self.source = source
self.path = source.split('.')
self.field_name = field_name
def __get__(self, obj, type=None):
if obj is None:
return obj
try:
value = obj.data
for p in self.path:
value = value.get(p, MISSING)
if value is MISSING:
return
except AttributeError:
return
return value
class StripeCol(Col):
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
*prev, last = ["data"] + [f"'{p}'" for p in self.target.source.split('.')]
field = '->'.join(prev) + '->>' + last
# cast for now
field = "%s.%s" % (qn(self.alias), field)
if isinstance(self.target, DateTimeField):
field = f'to_timestamp(({field})::text::double precision)'
elif isinstance(self.target, IntegerField):
field = f'({field})::text::numeric'
elif isinstance(self.target, BooleanField):
field = f'({field})::text::bool'
elif isinstance(self.target, HashField):
field = '->'.join(prev) + '->' + last
field = "%s.%s" % (qn(self.alias), field)
else:
field = f'({field})::text'
return field, []
class StripeField(models.Field):
proxy = None
def __init__(self, *args, source, **kwargs):
"""
Parameters:
source (str): the path in data JSON
"""
self.source = source
kwargs['null'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs["source"] = self.source
del kwargs["editable"]
del kwargs["serialize"]
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
self.name = name
self.verbose_name = name.replace('_', ' ')
self.field_name = name
self.attname = name
self.model = cls
self.concrete = False
self.column = f'__{self.source}__'
self.proxy = type(self).proxy(self.source, self.attname)
cls._meta.add_field(self, private=True)
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, self.proxy)
if not cls._meta.abstract:
post_init.connect(self.rebound_fields, sender=cls)
def rebound_fields(self, instance, *args, **kwargs):
self.rebound(instance)
def rebound(self, instance):
value = self.proxy.__get__(instance)
setattr(instance, self.attname, value)
pass
def get_col(self, alias, output_field=None):
col = super().get_col(alias, output_field)
if isinstance(col, Col):
col.__class__ = StripeCol
return col
# def select_format(self, compiler, sql, params):
# sql, params = super().select_format(compiler, sql, params)
# print('select_format', self, sql, params)
# return sql, params
# def get_lookup(self, name):
# result = super().get_lookup(name)
# print('get_lookup', self, result, name)
# # get_lookup rayures.Coupon.created_at <class 'django.db.models.lookups.GreaterThanOrEqual'> gte
# # get_lookup rayures.Coupon.created_at <class 'django.db.models.lookups.LessThan'> lt
# return result
# def get_transform(self, name):
# result = super().get_transform(name)
# print('get_transform', self, result, name)
# return result
class IntegerField(StripeField, models.IntegerField):
# description = _("String (up to %(max_length)s)")
proxy = IntegerProxy
def get_internal_type(self):
return 'IntegerField'
class CharField(StripeField, models.CharField):
# description = _("String (up to %(max_length)s)")
proxy = CharProxy
# def __init__(self, *args, **kwargs):
# """
# Parameters:
# source (str): the path in data JSON
# """
# kwargs['max_length'] = 2000
# super().__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
class DateTimeField(StripeField, models.DateTimeField):
# description = _("String (up to %(max_length)s)")
proxy = DatetimeProxy
def get_internal_type(self):
return 'DateTimeField'
class PriceField(StripeField):
# description = _("String (up to %(max_length)s)")
proxy = PriceProxy
class BooleanField(StripeField, models.NullBooleanField):
# description = _("String (up to %(max_length)s)")
proxy = BooleanProxy
def get_internal_type(self):
return 'NullBooleanField'
class HashField(StripeField, JSONFieldBase):
# description = _("String (up to %(max_length)s)")
proxy = HashProxy
def get_internal_type(self):
return 'JSONField'
MISSING = object()
class ForeignKey(models.ForeignKey):
proxy = CharProxy
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
source=None,
**kwargs):
kwargs['to'] = to
kwargs['related_name'] = related_name
kwargs['related_query_name'] = related_query_name
kwargs['limit_choices_to'] = limit_choices_to
kwargs['parent_link'] = parent_link
kwargs['to_field'] = to_field
# forced
kwargs['db_constraint'] = False
kwargs['db_index'] = False
kwargs['null'] = True
kwargs['default'] = None
kwargs['on_delete'] = models.DO_NOTHING
# our
kwargs['editable'] = False
self.source = source
super().__init__(**kwargs)
def get_col(self, alias, output_field=None):
col = super().get_col(alias, output_field)
if isinstance(col, Col):
col.__class__ = StripeCol
return col
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
self.proxy = type(self).proxy(self.source, name)
return super().contribute_to_class(cls, name, private_only=True, **kwargs)
def rebound_fields(self, instance, *args, **kwargs):
self.rebound(instance)
def rebound(self, instance):
print('instance, self.attname', instance, self.attname)
value = self.proxy.__get__(instance)
setattr(instance, self.attname, value)
pass
@ForeignKey.register_lookup
class RelatedIExact(RelatedLookupMixin, IExact):
pass
```
#### File: src/rayures/instrumentation.py
```python
import stripe
from contextlib import contextmanager
class InstrumentedClient(stripe.http_client.HTTPClient):
def __init__(self, client):
self.client = client
self.calls = []
def __getattr__(self, name):
return getattr(self.client, name)
def request(self, method, url, headers, post_data=None):
data = [method, url, -1, None, None]
try:
content, status_code, headers = self.client.request(method, url, headers, post_data)
data[2] = status_code
if 'Request-Id' in headers:
data[3] = headers['Request-Id']
except Exception as err:
data[4] = str(err)
raise err
finally:
self.calls.append(data)
return content, status_code, headers
@contextmanager
def instrument_client(client=None):
from stripe import verify_ssl_certs as verify
from stripe import proxy
previous_client = client or stripe.default_http_client or \
stripe.http_client.new_default_http_client(
verify_ssl_certs=verify, proxy=proxy)
try:
instrumented_client = InstrumentedClient(previous_client)
stripe.default_http_client = instrumented_client
yield instrumented_client.calls
finally:
stripe.default_http_client = previous_client
```
#### File: src/rayures/integration.py
```python
class BaseCustomerFinder:
def find(self, request):
"""Find customer based on request.
Must return a rayure.Customer instance or None
"""
raise NotImplementedError
```
#### File: management/commands/replay_events.py
```python
from django.apps import apps
from django.core.management.base import BaseCommand
from rayures.events import dispatch
class Command(BaseCommand):
help = 'Sync stripe events'
def handle(self, *args, **options):
# TODO: option to select only the one that have only failed or never processed
cls = apps.get_model('rayures', 'Event')
qs = cls.objects.all()
# qs = qs.filter(type__startswith='coupon.')
for event in qs.order_by('created_at'):
print(event, event.created_at, event.type)
dispatch(event)
```
#### File: management/commands/sync_stripe_events.py
```python
import stripe
from django.core.management.base import BaseCommand
from rayures.reconciliation import reconciliate_event
class Command(BaseCommand):
help = 'Sync stripe events'
def handle(self, *args, **options):
for obj in stripe.Event.all(limit=1000).auto_paging_iter():
obj, created = reconciliate_event(obj, persist=True)
status = 'created' if created else 'updated'
self.stdout.write(f'{type(obj)._stripe_object} {obj.id} {status}')
```
#### File: src/rayures/reconciliation.py
```python
import stripe
from . import models
from .utils import dt_from_stripe
from collections import namedtuple
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.db.models.functions import Now
from django.utils import timezone
from email.utils import parsedate_to_datetime
def squeeze_state(state, *,
created=None,
deleted=None,
now=None,
api_version=None,
request_id=None,
idempotency_key=None) -> dict:
opts = {
'data': state,
'created': created,
'deleted': deleted,
'now': now,
'api_version': api_version,
'request_id': request_id,
'idempotency_key': idempotency_key,
'id': None,
'object': None,
'account': None,
}
last_response = None
last_response_date = None
if isinstance(state, stripe.stripe_object.StripeObject):
# bingo! best alternative
last_response = state.last_response
opts.update({
'account': state.stripe_account,
'object': getattr(type(state), "class_name", None),
'id': state.get('id', None)
})
if state.stripe_version:
opts["api_version"] = state.stripe_version
elif isinstance(state, stripe.stripe_response.StripeResponse):
last_response = state
if last_response:
opts.update({
'data': last_response.data,
'request_id': opts['request_id'] or last_response.request_id,
'idempotency_key': last_response.idempotency_key,
'api_version': opts['api_version'] or last_response.headers.get('Stripe-Version', None),
})
dt = last_response.headers.get('Date', None)
if dt:
opts['now'] = last_response_date = parsedate_to_datetime(dt)
opts['object'] = opts['data'].get('object', opts['object']) # no object smells bad
opts['id'] = opts['data'].get('id', opts['id']) # no id can be invoice.upcoming
opts['deleted'] = deleted = opts['data'].get('deleted', deleted) is True
if not deleted and opts['created'] is None:
# let's try to find if it's a creation by comparing created attrs with Date header
if ('created' in opts['data']) and last_response_date:
dt1 = dt_from_stripe(state['created'])
opts['created'] = dt1 == last_response_date
return opts
def squeeze_event(event) -> dict:
opts = {"source": "webhook"}
if isinstance(event, dict):
opts.update({
"event_id": event.get("id", None)
})
if isinstance(event, stripe.Event):
opts.update({
"now": event["created"], # FIXME: make it datetime,
"api_version": event["api_version"],
"state": event["data"]["object"],
"event_id": event.id
})
if event["request"] is not None:
opts.update({
"request_id": event["request"]["id"],
"idempotency_key": event["request"]["idempotency_key"],
})
event_type = event["type"]
elif isinstance(event, models.Event):
opts.update({
"now": event.created_at,
"api_version": event.api_version,
"request_id": event.request_id,
"idempotency_key": event.idempotency_key,
"state": event.data["data"]["object"],
"event_id": event.id
})
event_type = event.type
else:
raise NotImplementedError("whoot?")
opts["created"] = event_type.endswith('.created')
opts["deleted"] = event_type.endswith('.deleted')
return opts
Reconciliation = namedtuple('Reconciliation', 'instance, persisted')
def reconciliate_by_event(event, *, persist=None) -> Reconciliation:
opts = squeeze_event(event)
opts.setdefault("persist", persist)
# we are good so far
rec = reconciliate(**opts)
# TODO: link non persisted obj, like Discount and invoice.upcoming, customer.discount.created
obj = rec.instance
cls = type(obj)
if rec.persisted and cls in models.PersistedModel.__subclasses__():
event.content_type = ContentType.objects.get_for_model(cls)
event.object_id = obj.id
event.save(update_fields=['content_type', 'object_id'])
return rec
def handle_volative_model(data, object, **opts):
cls = {'balance': models.Balance, 'discount': models.Discount}.get(object)
if cls:
instance = cls(data)
return Reconciliation(instance, False)
raise NotImplementedError(f'Tristesse {object}')
def handle_incoming_model(data, object, now, deleted, created, api_version, **opts):
cls = models.PERSISTED_MODELS.get(object)
instance = cls(data=data, api_version=api_version)
if created: # stripe does not give a delete time, try to stick to almost near
instance.created_at = now or timezone.now()
if deleted: # stripe does not give a delete time, try to stick to almost near
instance.deleted_at = now or timezone.now()
instance.rebound_fields()
# TODO: do we care about meta?
return Reconciliation(instance, False)
def handle_persistable_model(data, object, id, persist, api_version, now, deleted, created, **opts):
cls = models.PERSISTED_MODELS.get(object)
defaults = {
'api_version': api_version
}
# 99.999% of the time, deletion are a total mess
if not data.get('deleted', None):
defaults['data'] = data
def load_instance(stripe_id, defaults, cls, qs, created, deleted, now):
instance = qs.filter(id=stripe_id).first()
newborn = not instance
if instance:
for k, v in defaults.items():
setattr(instance, k, v)
else:
instance = cls(id=stripe_id, **defaults)
instance.created_at = now
if created: # stripe does not give always a create time, try to stick to almost near
instance.created_at = getattr(instance, 'created_at', None) or now or timezone.now()
if deleted: # stripe does not give a delete time, try to stick to almost near
instance.deleted_at = getattr(instance, 'deleted_at', None) or now or timezone.now()
else:
instance.updated_at = now
instance.rebound_fields()
return instance, newborn
if persist is True:
with transaction.atomic():
qs = cls.objects.select_for_update(of=('self',))
instance, newborn = load_instance(id, defaults, cls, qs, created, deleted, now)
instance.save()
return Reconciliation(instance, True)
else:
qs = cls.objects
instance, newborn = load_instance(id, defaults, cls, qs, created, deleted, now)
# TODO: do we care about meta?
return Reconciliation(instance, False)
def reconciliate_event(state, *,
persist=None,
api_version=None,
request_id=None,
idempotency_key=None) -> Reconciliation:
opts = squeeze_state(state,
api_version=api_version,
request_id=request_id,
idempotency_key=idempotency_key)
opts.setdefault('persist', persist)
opts.setdefault('source', "webhook")
return handle_persistable_model(**opts)
def reconciliate(state, *,
created=None,
deleted=None,
persist=None,
source="sdk",
now=None,
api_version=None,
request_id=None,
idempotency_key=None,
event_id=None) -> Reconciliation:
opts = squeeze_state(state,
created=created,
deleted=deleted,
now=now,
api_version=api_version,
request_id=request_id,
idempotency_key=idempotency_key)
opts.setdefault('persist', persist)
opts.setdefault('source', source)
opts.setdefault('event_id', event_id)
if opts['object'] not in models.PERSISTED_MODELS:
rec = handle_volative_model(**opts)
elif opts['id'] is None:
rec = handle_incoming_model(**opts)
else:
rec = handle_persistable_model(**opts)
# FIXME: handle meta now!
if rec.persisted and opts['object'] not in ('event',):
handle_meta(rec.instance, **opts)
return rec
def handle_meta(instance, *, created, deleted, event_id, request_id, idempotency_key, source, **opts):
cls = type(instance)
content_type = ContentType.objects.get_for_model(cls)
defaults = {
'event_id': event_id,
'request_id': request_id,
'idempotency_key': idempotency_key,
'source': source
}
if created:
defaults['created_at'] = Now()
defaults['deleted_at'] = None
if deleted:
defaults['deleted_at'] = Now()
else:
defaults['updated_at'] = Now()
meta, created = models.RayureMeta.objects.update_or_create(id=instance.id,
content_type=content_type,
defaults=defaults)
return meta
```
#### File: src/rayures/stripe_webhooks.py
```python
import logging
from .events import listen
from .models import PersistedModel
logger = logging.getLogger('rayures')
@listen('*', position=10)
def persist_obj(event, obj):
try:
if isinstance(obj, PersistedModel):
obj.save()
logger.info(f'persisted {obj}', extra={'obj': obj})
else:
logger.info(f'ephemeral {obj}', extra={'obj': obj})
except Exception as error:
logger.error(f'failed to persist {obj.id}: {error}', extra={'obj': obj})
``` |
{
"source": "johnnoone/facts",
"score": 2
} |
#### File: facts/facts/conf.py
```python
import os.path
class Settings:
def __init__(self):
self.entry_point = os.path.expanduser('facts.graft')
self.userfacts = os.path.expanduser('~/.facts/user.yml')
self.userpath = os.path.expanduser('~/.facts/grafts')
settings = Settings()
```
#### File: facts/tests/test_targeting.py
```python
import pytest
from facts.targeting import Target, NotFound, WrongType
def test_match():
obj1 = {'foo': 42, 'bar': ['baz', 'qux']}
obj2 = {'one': 42, 'two': {'three': 4}}
target = Target('foo')
assert target.match(obj1) is True
assert target.match(obj2) is False
target = Target('foo:42')
assert target.match(obj1) is True
assert target.match(obj2) is False
target = Target('bar:baz')
assert target.match(obj1) is True
assert target.match(obj2) is False
target = Target('bar:qux')
assert target.match(obj1) is True
assert target.match(obj2) is False
target = Target('two:three:4')
assert target.match(obj1) is False
assert target.match(obj2) is True
def test_read():
obj1 = {'foo': 42, 'bar': ['baz', 'qux']}
obj2 = {'one': 42, 'two': {'three': 4}}
target = Target('foo')
assert target.read(obj1) == 42
with pytest.raises(NotFound):
target.read(obj2)
target = Target('foo:42')
with pytest.raises(WrongType):
target.read(obj1)
with pytest.raises(NotFound):
target.read(obj2)
target = Target('bar:baz')
with pytest.raises(WrongType):
target.read(obj1)
with pytest.raises(NotFound):
target.read(obj2)
target = Target('bar:qux')
with pytest.raises(WrongType):
target.read(obj1)
with pytest.raises(NotFound):
target.read(obj2)
target = Target('two:three')
with pytest.raises(NotFound):
target.read(obj1)
assert target.read(obj2) == 4
def test_read_2():
obj = {'one': {'two': None}}
target = Target('one:two:three')
with pytest.raises(NotFound):
target.read(obj)
def test_write_1():
obj1 = {'foo': 42}
target = Target('foo')
obj2 = target.write(obj1, 'bar')
assert target.read(obj1) == 42
assert target.read(obj2) == 'bar'
obj3 = target.delete(obj1)
assert 'foo' not in obj3
def test_write_2():
obj1 = {'foo': {'bar': 'baz'}}
target = Target('foo:bar')
obj2 = target.write(obj1, 'qux')
assert target.read(obj1) == 'baz'
assert target.read(obj2) == 'qux'
obj3 = target.delete(obj1)
assert 'baz' not in obj3['foo']
def test_write_3():
obj1 = {'foo': ['one', 'two']}
target = Target('foo:-')
obj2 = target.write(obj1, 'bar')
assert obj2 == {'foo': ['one', 'two', 'bar']}
obj3 = target.delete(obj1)
assert obj3 == {'foo': ['one']}
def test_write_4():
obj1 = {'foo': ['one', 'two']}
target = Target('foo:1')
obj2 = target.write(obj1, 'bar')
assert obj2 == {'foo': ['one', 'bar']}
obj3 = target.delete(obj1)
assert obj3 == {'foo': ['one']}
def test_write_5():
obj1 = {'foo': [None, {'bar': 'baz'}]}
target = Target('foo:1:bar')
obj2 = target.write(obj1, 'qux')
assert obj2 == {'foo': [None, {'bar': 'qux'}]}
obj3 = target.delete(obj1)
assert obj3 == {'foo': [None, {}]}
def test_write_6():
obj1 = {'top': {'foo': 'bar'}}
target = Target('top')
obj2 = target.write(obj1, {'baz': 'qux'})
assert obj2 == {'top': {'baz': 'qux'}}
obj3 = target.write(obj1, {'baz': 'qux'}, merge=True)
assert obj3 == {'top': {'foo': 'bar', 'baz': 'qux'}}
```
#### File: facts/tests/test_user_grafts.py
```python
import pytest
from facts.grafts import user_grafts
from collections.abc import Mapping
@pytest.mark.asyncio
async def test_user():
data = await user_grafts.user_data_info()
assert isinstance(data.value, Mapping)
``` |
{
"source": "johnnoone/py-retry",
"score": 2
} |
#### File: py-retry/tests/test_coro.py
```python
import pytest
from itertools import cycle
from retrying import retry, Backoff, ExponentialBackoff, RandBackoff
from retrying import RetryError, MaxRetriesError, TimeoutError, TryAgain
from unittest.mock import Mock
from datetime import timedelta
class Dumb(Exception):
pass
async def coro(stub):
action = stub()
if action == 'again':
raise TryAgain
if action == 'dumb':
raise Dumb()
if action == 'exception':
raise Exception()
return action
@pytest.fixture
def mock():
return Mock()
@pytest.mark.asyncio
async def test_inherits_docs():
@retry
def example():
"""Docstring"""
assert example.__name__ == 'example'
assert example.__doc__ == 'Docstring'
@pytest.mark.asyncio
async def test_retry_until_success(mock):
sentinel = 'ok'
mock.side_effect = [
'dumb',
'exception',
sentinel,
'exception',
sentinel
]
assert (await retry(coro)(mock)) == sentinel
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_on_exception(mock):
sentinel = 'dumb'
def callback(error, ctx):
return not isinstance(error, Dumb)
mock.side_effect = ['exception', sentinel, sentinel]
with pytest.raises(Dumb):
await retry(coro, on_exception=callback)(mock)
assert mock.call_count == 2
@pytest.mark.asyncio
async def test_wrap_expection(mock):
sentinel = 'dumb'
def callback(error, ctx):
return not isinstance(error, Dumb)
mock.side_effect = ['exception', sentinel, sentinel]
with pytest.raises(RetryError):
await retry(coro, on_exception=callback, wrap_exception=True)(mock)
assert mock.call_count == 2
@pytest.mark.asyncio
async def test_on_result(mock):
sentinel = 'bar'
def callback(result, ctx):
return result == 'foo'
mock.side_effect = ['foo', 'foo', sentinel, sentinel]
assert (await retry(coro, on_result=callback)(mock)) == sentinel
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_on_global(mock):
sentinel = 'bar'
def callback(result, exception, ctx):
return result == 'foo'
mock.side_effect = ['foo', 'foo', sentinel, sentinel]
assert (await retry(coro, on_global=callback)(mock)) == sentinel
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_on_global_caused_runtime_error(mock):
def callback(result, exception, ctx):
raise Exception('No reason')
mock.side_effect = ['foo']
with pytest.raises(RuntimeError):
await retry(coro, on_global=callback)(mock)
@pytest.mark.asyncio
async def test_max_tries(mock):
mock.side_effect = cycle(['dumb', 'exception'])
with pytest.raises(MaxRetriesError):
await retry(coro, max_tries=4)(mock)
assert mock.call_count == 4
@pytest.mark.asyncio
async def test_backoff(mock):
mock.side_effect = cycle(['dumb', 'exception', 'foo'])
await retry(coro, backoff=Backoff(.001))(mock)
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_exponential_backoff(mock):
mock.side_effect = cycle(['dumb', 'exception', 'foo'])
await retry(coro, backoff=ExponentialBackoff(.001))(mock)
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_rand_backoff(mock):
mock.side_effect = cycle(['dumb', 'exception', 'foo'])
await retry(coro, backoff=RandBackoff(timedelta(seconds=.1),
timedelta(seconds=.2)))(mock)
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_custom_backoff(mock):
mock.side_effect = cycle(['dumb', 'exception', 'foo'])
await retry(coro, backoff=cycle([timedelta(seconds=.001)]))(mock)
assert mock.call_count == 3
@pytest.mark.asyncio
async def test_backoff_on_context(mock):
def callback(result, ctx):
assert isinstance(ctx.backoff, Backoff)
return result != 'bar'
mock.side_effect = cycle(['foo', 'bar'])
assert (await retry(coro, on_result=callback, max_tries=4)(mock)) == 'bar'
assert mock.call_count == 2
@pytest.mark.asyncio
async def test_change_backoff_on_context(mock):
def callback(result, ctx):
ctx.backoff = Backoff(seconds=.1)
return result != 'bar'
mock.side_effect = cycle(['foo', 'bar'])
assert (await retry(coro, on_result=callback, max_tries=4)(mock)) == 'bar'
assert mock.call_count == 2
@pytest.mark.asyncio
async def test_timeout(mock):
def callback(result, ctx):
return True
mock.side_effect = cycle(['foo', 'bar'])
with pytest.raises(TimeoutError):
await retry(coro,
giveup_after=timedelta(seconds=1),
on_result=callback, backoff=Backoff(seconds=1))(mock)
assert mock.call_count >= 1
@pytest.mark.asyncio
async def test_reraise_on_maxtries_throws_value_error(mock):
def callback(result, ctx):
return True
mock.side_effect = cycle(['foo', 'bar'])
with pytest.raises(ValueError):
await retry(coro, on_result=callback, max_tries=4, reraise=True)(mock)
@pytest.mark.asyncio
async def test_reraise_on_maxtries_has_effect(mock):
mock.side_effect = cycle(['dumb'])
with pytest.raises(Dumb):
await retry(coro, max_tries=4, reraise=True)(mock)
assert mock.call_count >= 4
@pytest.mark.asyncio
async def test_reraise_on_timeout_throws_value_error(mock):
def callback(result, ctx):
return True
mock.side_effect = cycle(['foo', 'bar'])
with pytest.raises(ValueError):
await retry(coro,
giveup_after=timedelta(seconds=1),
on_result=callback,
backoff=Backoff(seconds=1),
reraise=True)(mock)
@pytest.mark.asyncio
async def test_reraise_on_timeout_has_effect(mock):
mock.side_effect = cycle(['dumb'])
with pytest.raises(Dumb):
await retry(coro,
giveup_after=timedelta(seconds=1),
backoff=Backoff(seconds=1),
reraise=True)(mock)
@pytest.mark.asyncio
async def test_try_again_timeout(mock):
mock.side_effect = cycle(['again'])
with pytest.raises(TimeoutError):
await retry(coro,
giveup_after=timedelta(seconds=1),
backoff=Backoff(seconds=1))(mock)
@pytest.mark.asyncio
async def test_try_again_max_retries(mock):
mock.side_effect = cycle(['again'])
with pytest.raises(MaxRetriesError):
await retry(coro, max_tries=4)(mock)
``` |
{
"source": "johnnoone/starlette",
"score": 2
} |
#### File: starlette/middleware/sessions.py
```python
import functools
import json
import typing
from base64 import b64decode, b64encode
import itsdangerous
from itsdangerous.exc import BadTimeSignature, SignatureExpired
from starlette.datastructures import MutableHeaders, Secret
from starlette.requests import Request
from starlette.types import ASGIApp, ASGIInstance, Message, Receive, Scope, Send
class SessionMiddleware:
def __init__(
self,
app: ASGIApp,
secret_key: typing.Union[str, Secret],
session_cookie: str = "session",
max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds
) -> None:
self.app = app
self.signer = itsdangerous.TimestampSigner(str(secret_key))
self.session_cookie = session_cookie
self.max_age = max_age
def __call__(self, scope: Scope) -> ASGIInstance:
if scope["type"] in ("http", "websocket"):
request = Request(scope)
if self.session_cookie in request.cookies:
data = request.cookies[self.session_cookie].encode("utf-8")
try:
data = self.signer.unsign(data, max_age=self.max_age)
scope["session"] = json.loads(b64decode(data))
except (BadTimeSignature, SignatureExpired):
scope["session"] = {}
else:
scope["session"] = {}
return functools.partial(self.asgi, scope=scope)
return self.app(scope) # pragma: no cover
async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:
was_empty_session = not scope["session"]
inner = self.app(scope)
async def sender(message: Message) -> None:
if message["type"] == "http.response.start":
if scope["session"]:
# We have session data to persist.
data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
data = self.signer.sign(data)
headers = MutableHeaders(scope=message)
header_value = "%s=%s; path=/" % (
self.session_cookie,
data.decode("utf-8"),
)
headers.append("Set-Cookie", header_value)
elif not was_empty_session:
# The session has been cleared.
headers = MutableHeaders(scope=message)
header_value = "%s=%s" % (
self.session_cookie,
"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT",
)
headers.append("Set-Cookie", header_value)
await send(message)
await inner(receive, sender)
``` |
{
"source": "JohnNooney/HadoopAutoProvisioner",
"score": 3
} |
#### File: flask-api/endpoints/builder.py
```python
from flask_restful import Resource, reqparse
import docker
import os
import json
import yaml
import ast
import subprocess
# This endpoint takes care of building the Hadoop cluster from a docker-compose file
class Builder(Resource):
def __init__(self):
self.origin = 'http://localhost:3000'
self.status = 200
self.payload = "default"
self.baseYamlFile = './hadoop-cluster/base-docker-compose.yml'
self.newYamlFile = './hadoop-cluster/docker-compose.yml'
self.yaml = {}
self.recievedData = {}
def get(self):
self.status = 200
self.readStoredDict()
if self.recievedData:
self.payload = {"payload": self.recievedData}
else:
self.payload = {"payload": "none"}
return self.payload, \
self.status, \
{'Access-Control-Allow-Origin': self.origin} # return data and 200 OK code
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('data', required=False)
parser.add_argument('type', required=True)
args = parser.parse_args() # parse arguments to dictionary
try:
print("POST incoming type: ", args)
if args['type'] == "container":
print("starting container...")
containerId = self.startContainer("test cmd")
self.status = 200
self.payload = {"containerId": containerId}
elif args['type'] == "cluster":
print("starting cluster...")
containerId = self.startCluster(args['data'])
self.status = 200
self.payload = {"clusterConf": "Successfully started"}
elif args['type'] == "stop":
print("stopping cluster...")
self.stopCluster()
self.status = 200
self.payload = {"clusterConf": "Successfully Stopped"}
except Exception as e:
print("Unable to accept request, error: ", e)
self.status = 400
self.payload = "Error: Unable to accept request. Check server. "
finally:
return {'received': args, 'payload': self.payload}, \
self.status, \
{'Access-Control-Allow-Origin': self.origin}
# method for pre-flight requests. Checks to make sure browser can communicate
def options(self):
return {'Allow': 'POST, GET, OPTIONS'}, \
self.status, \
{'Access-Control-Allow-Origin': self.origin,
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type',
'Content-Type': 'application/json'}
def startContainer(self, cmd):
# client = docker.from_env()
# container = client.containers.run("johnnoon74/getting-started", detach=True)
# print(container.id)
result = subprocess.check_output(['docker', 'run', '-d', 'johnnoon74/getting-started'])
print("subprocess response: " + result.decode())
return result.decode()
return container.id
# if ran in container / stopping will also need to be in container
def startCluster(self, vars):
dict = ast.literal_eval(vars)
print("dict: ", dict)
print("Cluster Name: ", dict["name_node_cluster_name"])
# map each dict value to environment variables to be used in the docker compose
self.writeEnv(dict)
# load base yaml file and append new data based on request from UI
self.loadYaml()
self.writeYaml(dict)
# TODO: surround this with a try/catch later on
# docker - compose - f ./hadoop-cluster/docker-compose.yml up - d
result = subprocess.check_output(['docker', 'compose', '-f', 'hadoop-cluster/docker-compose.yml', 'up', '-d'])
print("subprocess response: " + result.decode())
self.storeDict(dict)
return result.decode()
# return "test"
def stopCluster(self):
result = subprocess.check_output(['docker', 'compose', '-f', 'hadoop-cluster/docker-compose.yml', 'down', '-v'])
print("subprocess response: " + result.decode())
self.deleteDict()
return result.decode()
# return "test"
# store dictionary sent by UI. Used to restore state
def storeDict(self, dict):
print("writing cached json for UI state...")
with open("cached-state.json", "w") as f:
json.dump(dict, f)
def readStoredDict(self):
print("reading cached json for UI state...")
try:
with open("cached-state.json", 'r') as f:
self.recievedData = json.load(f)
print("read json: ", self.recievedData)
except FileNotFoundError:
print("No saved cached state")
self.recievedData = {}
# delete stored dictionary
def deleteDict(self):
os.remove("cached-state.json")
# write Hadoop cluster data to .env file
def writeEnv(self, dict):
with open("./hadoop-cluster/user_hadoop.env", "w") as f:
f.write("CLUSTER_NAME="+dict["name_node_cluster_name"]+"\n")
if 'extras_historyserver' in dict:
if dict['extras_historyserver']:
f.write("YARN_CONF_yarn_log_server_url=http://historyserver.hadoop:8188/applicationhistory/logs/"+"\n")
f.write("YARN_CONF_yarn_timeline___service_enabled=true"+"\n")
f.write("YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true"+"\n")
f.write("YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true"+"\n")
f.write("YARN_CONF_yarn_resourcemanager_hostname=resourcemanager.hadoop"+"\n")
f.write("YARN_CONF_yarn_timeline___service_hostname=resourcemanager.hadoop"+"\n")
# load yaml from docker-compose only first time
def loadYaml(self):
if not self.yaml:
with open(self.baseYamlFile, 'r') as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader)
# reset yaml dict to starting data
def resetYaml(self):
# write yaml to file
with open(self.baseYamlFile, 'w') as f:
yaml.dump(self.yaml, f)
# used to modify yaml in order to add resources to cluster as necessary
def writeYaml(self, dict):
self.resetYaml()
yamlData = self.yaml
print(yamlData)
preUpdateServices = yamlData['services']
dataNodeYaml = {}
volumesYaml = {}
resourceManagerNodeYaml = {}
historyServerYaml = {}
nodeManagerYaml = {}
sparkYaml = {}
# based on how many worker nodes requested
for i in range(int(dict['data_node_workers'])):
# DataNode modifier
dataNodeYaml = {**dataNodeYaml, 'datanode' + str(i + 1): {'image': 'uhopper/hadoop-datanode',
'hostname': 'datanode' + str(i + 1) + '.hadoop',
'networks': ['hadoop'],
'depends_on': ['namenode'],
'volumes': ['datanode-vol'+str(i + 1)+':/hadoop/dfs/data'],
'env_file': ['./hadoop.env', './user_hadoop.env']}}
volumesYaml = {**volumesYaml, 'datanode-vol'+str(i + 1): {}}
# based on if the resource manager was enabled
if 'yarn_resource_manager' in dict:
resourceManagerNodeYaml = {'resourcemanager': {'depends_on': ['namenode'], 'env_file': ['./hadoop.env', './user_hadoop.env'],
'hostname': 'resourcemanager.hadoop',
'image': 'uhopper/hadoop-resourcemanager',
'networks': ['hadoop'], 'ports': ['8088:8088']}}
# based on if the history server is enabled
if 'extras_historyserver' in dict:
if dict['extras_historyserver']:
historyServerYaml = {'historyserver': {'depends_on': ['namenode'], 'env_file': ['./hadoop.env', './user_hadoop.env'],
'hostname': 'historyserver.hadoop',
'image': 'uhopper/hadoop-historyserver',
'networks': ['hadoop'],}}
# based on how many node managers requested
if 'yarn_node_managers' in dict:
for i in range(int(dict['yarn_node_managers'])):
# Node manager modifier
nodeManagerYaml = {**nodeManagerYaml,
'nodemanager' + str(i + 1): {'depends_on': ['namenode', 'resourcemanager'],
'env_file': ['./hadoop.env', './user_hadoop.env'],
'hostname': 'nodemanager' + str(i + 1) + '.hadoop',
'image': 'uhopper/hadoop-nodemanager',
'networks': ['hadoop'], 'ports': [str(8042+i)+':8042']}} # increment port forward
# based on if spark was enabled
if 'extras_spark' in dict:
sparkYaml = {
'spark': {'command': 'tail -f /var/log/dmesg', 'env_file': ['./hadoop.env', './user_hadoop.env'], 'hostname': 'spark.hadoop',
'image': 'uhopper/hadoop-spark', 'networks': ['hadoop'],
'ports': ['4040:4040', '9000:9000', '8080:8080']}}
# combine data node yaml with the services already in the docker-compose
newYamlData = {'services': {**dataNodeYaml, **resourceManagerNodeYaml, **historyServerYaml, **nodeManagerYaml, **sparkYaml, **preUpdateServices},
'volumes': {**volumesYaml, 'namenode-vol':{}}}
# merge data in full docker-compose yaml
yamlData.update(newYamlData)
# write yaml to file
with open(self.newYamlFile, 'w') as f:
yaml.dump(yamlData, f)
``` |
{
"source": "JohnNooney/rpi-cast-aws",
"score": 3
} |
#### File: rpi-cast-aws/rpi/gpiocontroller.py
```python
import json
import datetime
import subprocess as sp
import boto3
import time
import sys
from gpiozero import LED
from gpiozero import Button
# Red led = gpio 24
# Green led = gpio 18
# Button = gpio 3
class RpiHandler:
def __init__(self, table_name, username):
self.state = False
self.start_time = datetime.datetime.now()
self.green_led = LED(18)
self.red_led = LED(24)
self.button = Button(3)
self.counter = 0
self.restarts = 0
self.table_name = table_name
self.user = username
self.session_status = ""
# put item as specificed json format
def generate_log(self):
time_delta = datetime.datetime.now() - self.start_time
data_str = '{"RpiDateTime":"00:00:0123","RpiUser":"'+self.user+'","RpiSession":"'+str(self.counter)+'","RpiSessionStatus": "'+self.session_status+'","RpiDuration":"00:00:0123","RpiFault": "none","RpiRestarts":"'+str(self.restarts)+'"}'
data_json = json.loads(data_str)
data_json["RpiDateTime"] = str(self.start_time)
data_json["RpiDuration"] = str(time_delta)
return data_json
def handle(self):
print("button press")
self.state = not self.state
self.counter+=1
table = self.dynamo_get_table(self.table_name)
if self.state:
# turn on green LED
print("Green LED on.")
self.green_led.on()
self.red_led.off()
self.session_status = "active"
# construct log
print("Sending initial log to AWS...")
data = self.generate_log()
print("generated log: ", data)
# send log to aws
self.dynamo_put(table, data)
# blink led
self.green_led.off()
time.sleep(.5)
self.green_led.on()
# start AirPlay server as background process
print("Starting AirPlay Server...")
sp.Popen("/home/pi/Downloads/RPiPlay/build/rpiplay", shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
print("Check your IPhone for RPiPlay in the AirPlay menu.")
print("To turn off AirPlay Server press the button again.")
else:
# turn on red LED
print("Red LED on.")
self.green_led.off()
self.red_led.on()
self.session_status = "inactive"
# stop airplay server
print("Stopping AirPlay Server...")
cmd = "pkill -f rpiplay"
sp.run(["pkill","-f","rpiplay"])
print("AirPlay server stopped.")
# construct log
print("Sending concluding log to AWS...")
data = self.generate_log()
print("generated log: ", data)
# submit log
self.dynamo_put(table, data)
print("To start the AirPlay server again press the button.")
self.restarts+=1
def dynamo_get_table(self, name):
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table(name)
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
return table
# put item as specificed json format
def dynamo_put(self, table, data):
request = table.put_item(Item=data)
print(request)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print("Welcome to the RPi and AWS controller!")
print("press your button to start the AirPlay server")
flag = True
username = "test user"
if len(sys.argv) > 1:
username = str(sys.argv[1])
rpi = RpiHandler("rpi-aws-log", username)
while(flag):
rpi.button.when_pressed = rpi.handle
``` |
{
"source": "johnnovak/johnnovak.site",
"score": 2
} |
#### File: johnnovak.site/photo/generate-albums.py
```python
import collections
import os
import shutil
import sys
import yaml
from collections import OrderedDict
from jinja2 import Environment, FileSystemLoader
from optparse import OptionParser
from os.path import join as joinpath
from PIL import Image
from pprint import pprint
VERBOSE = False
CATEGORIES_CONFIG_FNAME = '_categories.yaml'
ALBUMS_CONFIG_FNAME = '_albums.yaml'
# rename 'photos' to 'images'
PHOTOS_CONFIG_FNAME = '_photos.yaml'
ALBUM_IMAGE_FNAME = '_album.jpg'
# {{{ YAML ORDERDICT EXTENSION
# From: http://stackoverflow.com/a/21048064
def dict_representer(dumper, data):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.iteritems())
yaml.add_representer(collections.OrderedDict, dict_representer)
#def dict_constructor(loader, node):
# return collections.OrderedDict(loader.construct_pairs(node))
#
#yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor)
# }}}
# {{{ UTILS
class ConfigException(Exception):
pass
def info(msg):
if VERBOSE:
print msg
def warn(msg):
print 'WARNING:', msg
def read_image_size(fname):
im = Image.open(fname)
return im.size
def read_yaml(fname):
stream = file(fname, 'r')
return yaml.load(stream)
def write_yaml(data, fname):
stream = yaml.dump(data, default_flow_style=False)
stream = stream.replace('\n- ', '\n\n- ')
f = open(fname, 'w')
f.write(stream)
def write_file(data, fname):
outf = open(fname, 'w')
outf.write(data.encode('utf8'))
outf.close()
def get_categories_config_fname(path):
return joinpath(path, CATEGORIES_CONFIG_FNAME)
def get_albums_config_fname(path):
return joinpath(path, ALBUMS_CONFIG_FNAME)
def get_photos_config_fname(path):
return joinpath(path, PHOTOS_CONFIG_FNAME)
def get_category_path(path, category):
return joinpath(path, category['name'])
def get_album_path(path, album):
return joinpath(path,
album['category'], album['section'], album['name'])
def get_image_path(path, album, image):
return joinpath(get_album_path(path, album), image['filename'])
def get_album_image_fname(path, album):
return joinpath(get_album_path(path, album), ALBUM_IMAGE_FNAME)
def format_date(date):
return date.replace('-', '–')
# }}}
# {{{ CREATE CONFIG
def create_configs(path):
categories = build_categories_config(path)
write_categories_config(categories, path)
info('Creating album configs')
create_all_album_configs(path)
def build_categories_config(path):
info('Building categories config')
categories = []
for fname in os.listdir(path):
category_path = joinpath(path, fname)
if not fname.startswith('_') and os.path.isdir(category_path):
category_name = fname
info(" Category found: '%s'" % category_name)
cat = OrderedDict([
('name', category_name),
('title', category_name)
])
categories.append(cat)
return categories
def write_categories_config(config, path):
fname = get_categories_config_fname(path)
info(" Writing categories config '%s'\n" % fname)
write_yaml(config, fname)
def create_all_album_configs(path):
for category_name in os.listdir(path):
category_path = joinpath(path, category_name)
if os.path.isdir(category_path):
info(" Creating album configs for category: '%s'" % category_name)
create_album_configs_for_category(category_path, category_name)
def create_album_configs_for_category(category_path, category_name):
for album_name in os.listdir(category_path):
album_path = joinpath(category_path, album_name)
if os.path.isdir(album_path):
info(" Album found: '%s'" % album_name)
album = build_album_config(album_path, category_name, album_name)
write_album_config(album_path, album)
def write_album_config(path, config):
fname = get_album_config_fname(path)
info(" Writing album config: '%s'\n" % fname)
write_yaml(config, fname)
def build_album_config(album_path, category_name, album_name):
album = OrderedDict()
album['name'] = album_name
album['date'] = ''
album['category'] = category_name
images = []
for fname in os.listdir(album_path):
if fname.endswith('.jpg') and fname != ALBUM_IMAGE_FNAME:
info(" Image found: '%s'" % fname)
img = OrderedDict([
('filename', fname),
('title', os.path.splitext(fname)[0]),
('location', ''),
('date', '')
])
images.append(img)
album['images'] = images
return album
# }}}
# {{{ LOAD CONFIG
def load_config(path):
categories = load_categories_config(path)
config = load_all_album_configs(path, categories)
return config
def load_categories_config(path):
fname = get_categories_config_fname(path)
info("Loading categories config '%s'" % fname)
return read_yaml(fname)
def load_all_album_configs(path, categories):
configs = OrderedDict()
for category in categories:
category_name = category['name']
configs[category_name] = {
'name': category_name,
'title': category['title'],
'albums': load_album_configs_in_category(path, category_name)
}
return configs
def load_album_configs_in_category(basepath, category_name):
config = load_albums_config(basepath, category_name)
section_indices = []
for idx, item in enumerate(config):
if 'section' in item:
section_indices.append(idx)
for album in item['albums']:
album['section'] = item['section']
load_album_config_in_category(basepath, category_name, album)
else:
item['section'] = ''
load_album_config_in_category(basepath, category_name, item)
# Flatten sections within categories
idx_adjust = 0
for idx in section_indices:
i = idx + idx_adjust
for album in config[i]['albums']:
config.append(album)
del config[i]
idx_adjust = idx_adjust - 1
return config
def load_album_config_in_category(basepath, category_name, album):
album['category'] = category_name
album_path = get_album_path(basepath, album)
album['images'] = load_images_config(album_path)
def load_albums_config(basepath, category_name):
fname = get_albums_config_fname(joinpath(basepath, category_name))
config = read_yaml(fname)
return config
def load_images_config(album_path):
fname = get_photos_config_fname(album_path)
info("Loading images config '%s'" % fname)
config = read_yaml(fname)
return config
# }}}
# {{{ PROCESS CONFIG
def process_images(album, input_dir):
for image in album['images']:
fname = get_image_path(input_dir, album, image)
info(" Reading image dimensions: '%s'" % fname)
# }}}
# {{{ GENERATE OUTPUT
def generate_albums(config, input_dir, output_dir, basepath):
env = Environment(loader=FileSystemLoader(joinpath(input_dir,
'_templates')))
generate_album_pages(env, config, output_dir, basepath)
copy_default_album_page(config, output_dir)
generate_photo_pages(env, config, input_dir, output_dir, basepath)
copy_images(config, input_dir, output_dir)
generate_about_page(env, config, output_dir, basepath)
def get_categories(config):
return config.values()
def get_albums(category):
return category['albums']
def get_images(album):
return album['images']
def generate_album_pages(env, config, output_dir, basepath):
template = env.get_template('album.html')
for category in get_categories(config):
dirname = get_category_path(output_dir, category)
info("\nCreating category directory '%s'" % dirname)
os.mkdir(dirname)
html = template.render(
page='albums', basepath=basepath,
current_category=category['name'],
categories=assign_categories(config, basepath),
albums=assign_albums(category, basepath)
)
fname = joinpath(dirname, 'index.html')
info("Writing album page '%s'" % fname)
write_file(html, fname)
def copy_default_album_page(config, output_dir):
src_dir = get_category_path(output_dir, config.items()[0][1])
srcpath = joinpath(src_dir, 'index.html')
info("Copying default album page '%s' to '%s" % (srcpath, output_dir))
shutil.copy2(srcpath, output_dir)
def generate_photo_pages(env, config, input_dir, output_dir, basepath):
template = env.get_template('photo.html')
for category in get_categories(config):
for album in get_albums(category):
dirname = get_album_path(output_dir, album)
info("\nCreating album directory '%s'" % dirname)
os.makedirs(dirname)
html = template.render(
page='photo', basepath=basepath,
current_category=category['name'],
categories=assign_categories(config, basepath),
photos=assign_photos(album, input_dir)
)
fname = joinpath(dirname, 'index.html')
info("Writing photo page '%s'" % fname)
write_file(html, fname)
def copy_images(config, input_dir, output_dir):
for category in get_categories(config):
info("\nCopying images in category '%s'" % category['name'])
for album in get_albums(category):
info("\n Copying images in album '%s'" % album['name'])
info(" Copying album image")
shutil.copy2(get_album_image_fname(input_dir, album),
get_album_image_fname(output_dir, album))
for image in get_images(album):
srcpath = get_image_path(input_dir, album, image)
destpath = get_image_path(output_dir, album, image)
info(" Copying image '%s' to '%s" % (srcpath, destpath))
shutil.copy2(srcpath, destpath)
def assign_categories(config, basepath):
c = []
for category in get_categories(config):
c.append({
'href': get_category_path(basepath, category) + '/',
'caption': category['title'],
'name': category['name']
})
return c
def assign_albums(category, basepath):
a = []
for album in get_albums(category):
caption = album['title']
if 'date' in album and album['date']:
caption += ', ' + format_date(str(album['date']))
a.append({
'href': get_album_path(basepath, album) + '/',
'img_href': get_album_image_fname(basepath, album),
'caption': caption,
'section': album['section']
})
return a
def assign_photos(album, input_dir):
i = []
for image in album['images']:
image_id = os.path.splitext(image['filename'])[0]
image_fname = joinpath(get_album_path(input_dir, album),
image['filename'])
(width, height) = read_image_size(image_fname)
caption = image['title']
if 'location' in image and image['location']:
caption += ' — ' + image['location']
if 'date' in image and image['date']:
caption += ', ' + str(image['date'])
i.append({
'id': image_id,
'href': image['filename'],
'caption': caption,
'width': width
})
return i
def generate_about_page(env, config, output_dir, basepath):
template = env.get_template('about.html')
about_dir = joinpath(output_dir, 'about')
info("Creating directory '%s'" % about_dir)
os.mkdir(about_dir)
html = template.render(page='about', basepath=basepath,
categories=assign_categories(config, basepath))
fname = joinpath(about_dir, 'index.html')
info("Writing about page '%s'" % fname)
write_file(html, fname)
# }}}
# {{{ MAIN
def main():
usage = ('\n'
' 1. Generate default configuration files:\n'
' %prog -c [OPTIONS] INPUT-DIR\n\n'
' 2. Generate site:\n'
' %prog [OPTIONS] INPUT-DIR OUTPUT-DIR')
parser = OptionParser(usage=usage)
parser.add_option('-c', '--create-configs',
default=False, action='store_true',
help='create default configuration files')
parser.add_option('-p', '--basepath',
default='/',
help='basepath for absolute URLs (default: /)')
parser.add_option('-v', '--verbose',
default=False, action='store_true',
help='talk more')
options, args = parser.parse_args()
if options.verbose:
global VERBOSE
VERBOSE = True
if options.create_configs:
if len(args) == 0:
parser.error('input directory must be specified')
return 2
input_dir = args[0]
create_configs(input_dir)
else:
if len(args) == 0:
parser.error('input and output directories must be specified')
return 2
if len(args) == 1:
parser.error('output directory must be specified')
return 2
input_dir = args[0]
output_dir = args[1]
basepath = '/' + options.basepath.strip('/')
config = load_config(input_dir)
generate_albums(config, input_dir, output_dir, basepath)
return 0
# }}}
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
# vim:et ts=4 sts=4 sw=4 foldmethod=marker
``` |
{
"source": "johnnovak/twyg",
"score": 2
} |
#### File: doc/examples/generate-examples.py
```python
import os
from twyg import get_scale_factor, generate_output
DATA_PATH = '../../example-data'
OUT_PATH = '.'
DA = 'google-analytics'
DC = 'cocoa'
DG = 'goals'
DM = 'metrics'
DN = 'animals'
DS = 'six-thinking-hats'
DU = 'guitars'
DW = 'wind-instruments'
DY = 'synthesis'
configs = [
{'boxes': [{ 'data': DC, 'colors': 'kelp' },
{ 'data': DG, 'colors': '' },
{ 'data': DM, 'colors': 'moon' }]
},
{'bubbles': [{ 'data': DA, 'colors': 'inca' },
{ 'data': DM, 'colors': '' },
{ 'data': DS, 'colors': 'neo' }]
},
{'edge' : [{ 'data': DC, 'colors': 'aqua' },
{ 'data': DG, 'colors': 'azure' },
{ 'data': DU, 'colors': '' }]
},
{'flowchart': [{ 'data': DA, 'colors': 'inca' },
{ 'data': DM, 'colors': '' },
{ 'data': DW, 'colors': 'jelly' }]
},
{'hive': [{ 'data': DG, 'colors': 'jelly' },
{ 'data': DS, 'colors': '' },
{ 'data': DY, 'colors': 'mango' }]
},
{'ios': [{ 'data': DM, 'colors': 'milkshake' },
{ 'data': DS, 'colors': 'honey' },
{ 'data': DY, 'colors': '' }]
},
{'jellyfish': [{ 'data': DU, 'colors': '' },
{ 'data': DY, 'colors': 'quartz' },
{ 'data': DN, 'colors': 'colors21' }]
},
{'junction1': [{ 'data': DN, 'colors': 'forest' },
{ 'data': DM, 'colors': 'clay' },
{ 'data': DW, 'colors': '' }]
},
{'junction2': [{ 'data': DN, 'colors': 'mango' },
{ 'data': DU, 'colors': '' },
{ 'data': DW, 'colors': 'salmon' }]
},
{'lines': [{ 'data': DN, 'colors': '' },
{ 'data': DA, 'colors': 'merlot' },
{ 'data': DM, 'colors': 'azure' }]
},
{'modern': [{ 'data': DN, 'colors': '' },
{ 'data': DM, 'colors': 'mustard' },
{ 'data': DY, 'colors': 'cobalt' }]
},
{'nazca': [{ 'data': DC, 'colors': 'earth' },
{ 'data': DM, 'colors': 'aqua' },
{ 'data': DY, 'colors': '' }]
},
{'rounded': [{ 'data': DG, 'colors': '' },
{ 'data': DA, 'colors': 'orbit' },
{ 'data': DM, 'colors': 'grape' }]
},
{'square': [{ 'data': DN, 'colors': 'quartz' },
{ 'data': DC, 'colors': 'crayons' },
{ 'data': DU, 'colors': '' }]
},
{'synapse': [{ 'data': DC, 'colors': 'kelp' },
{ 'data': DA, 'colors': 'mint' },
{ 'data': DM, 'colors': '' }]
},
{'tron': [{ 'data': DC, 'colors': '' },
{ 'data': DM, 'colors': 'mellow' },
{ 'data': DY, 'colors': 'colors21' }]
}
]
def generate_examples(outformat, dpi):
for c in configs:
config_fname = c.keys()[0]
params = c.values()[0]
for p in params:
data_fname = os.path.join(DATA_PATH, p['data'] + '.json')
colorscheme = p['colors']
out_fname = [config_fname]
if colorscheme:
out_fname.append(colorscheme)
out_fname.append(os.path.basename(os.path.splitext(data_fname)[0]))
out_fname = os.path.join(OUT_PATH, outformat,
'-'.join(out_fname) + '.' + outformat)
print "Generating '%s'..." % out_fname,
scale = get_scale_factor(dpi, 1.0);
generate_output(data_fname, config_fname, out_fname, outformat,
colorscheme=colorscheme, scale=scale)
print 'OK'
generate_examples('pdf', 72)
generate_examples('svg', 72)
generate_examples('ps', 72)
generate_examples('png', 150)
```
#### File: doc/figures/connections-curve-cx.py
```python
import os, sys
from fig import *
from twyg.cairowrapper import context as ctx
def drawconn(ctx, linewidth_start, linewidth_end, x1, y1, x2, y2,
cx1, cx2, cy1, cy2):
ctx.strokewidth(linewidth_end)
x2 -= linewidth_end / 2
cx1 = (x2 - x1) * cx1
cx2 = (x2 - x1) * cx2
cy1 = (y2 - y1) * cy1
cy2 = (y2 - y1) * cy2
startwidth = linewidth_start - 1
sw = startwidth / 2.
p1x = x1 + cx1
p1y = y1 + cy1
p2x = x2 - cx2
p2y = y2 - cy2
ctx.beginpath(x1, y1 - sw)
ctx.curveto(p1x, p1y, p2x, p2y, x2, y2)
ctx.curveto(p2x, p2y, p1x, p1y, x1, y1 + sw)
ctx.endpath()
ctx.nostroke()
ctx.fill(1, 0, 0)
ctx.oval(p1x - 3, p1y - 3, 6, 6)
ctx.fill(0, 1, 0)
ctx.oval(p2x - 3, p2y - 3, 6, 6)
init_surface(500, 150, scale=0.8)
ctx.background(ctx.color(1))
ctx.stroke(.3)
ctx.fill(.3)
#drawconn(ctx, 20, 3,
# 20, 20, 250, 120,
# 0.7, 0.28, 0.1, 0.2)
drawconn(ctx, 3, 3,
20, 20, 250, 120,
0.2, 0, 1.0, 0.0)
ctx.writesurface()
```
#### File: doc/source-images/twyg-tree-logo.py
```python
background(None)
size(590, 430)
lsystem = ximport("lsystem")
stroke(0.122, 0.545, 0.553, 1)
def segment(length, generations, time, id):
if generations > 0:
strokewidth(generations ** 2.1)
line(0, 0, 0, -length)
tree = lsystem.strong()
tree.segment = segment
tree.draw(290, 390, 6)
```
#### File: twyg/twyg/cairowrapper.py
```python
import cairo, colorsys, math
class Color(object):
def __init__(self, c1, c2, c3, a, mode='rgb'):
c1 = min(max(0.0, c1), 1.0)
c2 = min(max(0.0, c2), 1.0)
c3 = min(max(0.0, c3), 1.0)
a = min(max(0.0, a), 1.0)
if mode == 'rgb':
self.r = c1
self.g = c2
self.b = c3
self.a = a
self._update_hsv()
elif mode == 'hsv':
self.h = c1
self.s = c2
self.v = c3
self.a = a
self._update_rgb()
else:
raise ValueError, 'Invalid color mode: ' + mode
def __repr__(self):
return 'Color(r=%.3f, g=%.3f, b=%.3f, a=%.3f)' % (self.r, self.g, self.b, self.a)
def copy(self):
return Color(self.r, self.g, self.b, self.a)
def rgba(self):
return (self.r, self.g, self.b, self.a)
def darken(self, step=0.1):
return Color(self.h, self.s, self.v - step, self.a, mode='hsv')
def lighten(self, step=0.1):
return Color(self.h, self.s, self.v + step, self.a, mode='hsv')
def blend(self, clr, factor=0.5):
r = self.r * (1.0 - factor) + clr.r * factor
g = self.g * (1.0 - factor) + clr.g * factor
b = self.b * (1.0 - factor) + clr.b * factor
a = self.a * (1.0 - factor) + clr.a * factor
return Color(r, g, b, a)
def _update_hsv(self):
self.h, self.s, self.v = colorsys.rgb_to_hsv(self.r, self.g, self.b)
def _update_rgb(self):
self.r, self.g, self.b = colorsys.hsv_to_rgb(self.h, self.s, self.v)
def color(*args):
# Only K(A) & RGB(A) modes are supported, HSB(A) & CMYK(A) are not
n = len(args)
if n == 1:
r = g = b = args[0]
a = 1.0
elif n == 2:
r = g = b = args[0]
a = args[1]
elif n == 3:
r, g, b = args
a = 1.0
elif n == 4:
r, g, b, a = args
else:
raise ValueError, "Invalid color value: '%s'" % args
r = min(max(0.0, r), 1.0)
g = min(max(0.0, g), 1.0)
b = min(max(0.0, b), 1.0)
a = min(max(0.0, a), 1.0)
return Color(r, g, b, a)
#=============================================================================#
#= NODEBOX COMMANDS =#
#=============================================================================#
class Context(object):
def __init__(self):
self._backgroundcolor = None
self._fillcolor = None
self._strokecolor = None
self._strokewidth = 1.0
self._autoclosepath = True
self._fontname = 'Helvetica'
self._fontsize = 12.0
self._lineheight = 1.5
self._shadow = False
self._shadow_dx = 0
self._shadow_dy = 0
self._shadow_radius = 3
self._shadow_color = color(0, 0, 0, 1)
self._shadow_blur_passes = 2
self._bitmap_dpi = 150
# TODO call on init
def init():
self.font(self._fontname, self._fontsize)
self.strokewidth(self._strokewidth)
### SHAPE #################################################################
def rect(self, x, y, width, height, roundness=0.0, draw=True):
# Negative width & height behaviour not implemented
# Formula for rounded rectangle taken from NodeBox 1 source code
c = self._ctx
if roundness == 0:
c.rectangle(x, y, width, height)
else:
curve = min(width * roundness, height * roundness)
xw = x + width
yh = y + height
c.move_to(x, y + curve)
c.curve_to(x, y, x, y, x + curve, y)
c.line_to(xw - curve, y)
c.curve_to(xw, y, xw, y, xw, y + curve)
c.line_to(xw, yh - curve)
c.curve_to(xw, yh, xw, yh, xw - curve, yh)
c.line_to(x + curve, yh)
c.curve_to(x, yh, x, yh, x, yh - curve)
c.close_path()
if draw:
self._draw()
else:
path = c.copy_path()
c.new_path()
return path
def oval(self, x, y, width, height, draw=True):
c = self._ctx
# Negative width & height behaviour not implemented
if width == 0 or height == 0:
return
cx = x + width / 2.
cy = y + height / 2.
r = width / 2.
yscale = float(height) / width
c.new_path()
c.save()
c.scale(1, yscale)
c.arc(cx, cy / yscale, r, 0, 2 * math.pi)
c.restore()
if draw:
self._draw()
else:
path = c.copy_path()
c.new_path()
return path
def line(self, x1, y1, x2, y2, draw=True):
c = self._ctx
c.move_to(x1, y1)
c.line_to(x2, y2)
if draw:
self._draw_stroke()
else:
path = c.copy_path()
c.new_path()
return path
def arrow(x, y, width, type, draw=True):
raise NotImplementedError
def star(x, y, points=20, outer=100, inner=50, draw=True):
raise NotImplementedError
### PATH ##################################################################
def beginpath(self, x, y):
self._ctx.move_to(x, y)
def moveto(self, x, y):
self._ctx.move_to(x, y)
def lineto(self, x, y):
self._ctx.line_to(x, y)
def curveto(self, x1, y1, x2, y2, x3, y3):
self._ctx.curve_to(x1, y1, x2, y2, x3, y3)
def findpath(list, curvature=1.0):
raise NotImplementedError
def endpath(self, draw=True):
if self._autoclosepath:
self._ctx.close_path()
if draw:
self._draw()
else:
path = self._ctx.copy_path()
self._ctx.new_path()
return path
def drawpath(self, path):
self._ctx.append_path(path)
self._draw()
def beginclip(self, path):
self._ctx.save()
self._ctx.new_path()
self._ctx.append_path(path)
self._ctx.clip()
def endclip(self):
self._ctx.restore()
def autoclosepath(self, close=True):
self._autoclosepath = close
### TRANSFORM #############################################################
def transform(mode):
raise NotImplementedError
def translate(self, x, y):
self._ctx.translate(x, y)
def rotate(self, degrees=0.0, radians=0.0):
if degrees != 0:
radians = degrees * math.pi / 180
self._ctx.translate(radians)
def scale(self, x, y=None):
if not y:
y = 1.0
self._ctx.scale(x, y)
def skew(x, y=None):
raise NotImplementedError
def push(self):
self._ctx.save()
def pop(self):
self._ctx.restore()
def reset(self):
self._ctx.identity_matrix()
### COLOR #################################################################
def outputmode(self, mode):
# Not implemented; always RGB
raise NotImplementedError
def colormode(self, mode):
pass
def color(self, *args):
return color(*args)
def fill(self, *args):
self._fillcolor = self._make_color_obj(*args)
def nofill(self):
self._fillcolor = None
def stroke(self, *args):
self._strokecolor = self._make_color_obj(*args)
def nostroke(self):
self._strokecolor = None
def strokewidth(self, width):
self._ctx.set_line_width(width)
def background(self, *args):
# Transparent background
if len(args) == 1 and args[0] == None:
return
col = self._make_color_obj(*args)
self._backgroundcolor = col
c = self._ctx
c.set_source_rgba(*col.rgba())
c.rectangle(0, 0, self._width, self._height)
c.fill()
### TYPOGRAPHY ############################################################
def font(self, fontname, fontsize=None):
self._ctx.select_font_face(fontname, cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
self._fontname = fontname
if fontsize:
self.fontsize(fontsize)
def fontsize(self, fontsize):
self._ctx.set_font_size(fontsize)
self._fontsize = fontsize
def text(self, txt, x, y):
# width, height & outline not implemented
c = self._ctx
c.set_source_rgba(*self._fillcolor.rgba())
c.move_to(x, y)
c.show_text(txt)
def textpath(txt, x, y, width=None, height=1000000):
raise NotImplementedError
def textwidth(self, txt):
width, height = self.textmetrics(txt)
return width
def textheight(self, txt):
width, height = self.textmetrics(txt)
return height
def textmetrics(self, txt):
(ascent, descent, height,
max_x_advance, max_y_advance) = self._ctx.font_extents()
linewidth = self._ctx.text_extents(txt)[4]
return linewidth, height + descent
def lineheight(self, height=None):
if height:
self._lineheight = height
return self._lineheight
def align(self, align):
raise NotImplementedError
### IMAGE #################################################################
def image(path, x, y, width=None, height=None, alpha=1.0, data=None):
raise NotImplementedError
def imagesize(path):
raise NotImplementedError
### UTILITY ###############################################################
def size(w, h):
raise NotImplementedError
def var(name, type, default, min, max):
raise NotImplementedError
def random(v1=None, v2=None):
raise NotImplementedError
def choice(list):
raise NotImplementedError
def grid(cols, rows, colsize=1, rowsize=1):
raise NotImplementedError
def files(path):
raise NotImplementedError
def autotext(xml):
raise NotImplementedError
#=========================================================================#
#= COLORS LIBRARY =#
#=========================================================================#
def rgba_color(self, c):
return self.color(*c)
def gradientfill(self, path, clr1, clr2, dx=0.0, dy=0.0,
type='linear',spread=1.0):
c = self._ctx
c.append_path(path)
x1, y1, x2, y2 = c.fill_extents()
pat = cairo.LinearGradient(0, y1, 0, y2)
pat.add_color_stop_rgba(1, *clr1.rgba())
pat.add_color_stop_rgba(0, *clr2.rgba())
if self._shadow:
self._draw_shadow()
c.set_source(pat)
if self._strokecolor:
c.fill_preserve()
c.set_source_rgba(*self._strokecolor.rgba())
c.stroke()
else:
c.fill()
def shadow(self, dx=0.0, dy=0.0, blur=3.0, clr=color(0, 0, 0, 1)):
self._shadow_dx = dx
self._shadow_dy = dy
self._shadow_radius = blur / 2
self._shadow_color = clr
self._shadow = True
def noshadow(self):
self._shadow = False
#=========================================================================#
#= HELPER FUNCTIONS =#
#=========================================================================#
def initsurface(self, w, h, fmt, fname=None, scale=1.0):
self._width = w
self._height = h
w *= scale
h *= scale
if fmt == 'pdf':
self._surface = cairo.PDFSurface(fname, w, h)
elif fmt == 'svg':
self._surface = cairo.SVGSurface(fname, w, h)
elif fmt == 'png':
w = int(w + .5)
h = int(h + .5)
self._surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
elif fmt == 'ps':
self._surface = cairo.PSSurface(fname, w, h)
else:
raise ValueError, "Invalid output format: '%s'" % (fmt)
self._format = fmt
self._filename = fname
self._ctx = cairo.Context(self._surface)
self._ctx.scale(scale, scale)
def writesurface(self):
if self._format == 'png':
self._surface.write_to_png(self._filename)
else:
self._ctx.show_page()
def _make_color_obj(self, *args):
if len(args) == 1 and type(args[0]).__name__ == 'Color':
return args[0]
else:
return self.color(*args)
def _draw_stroke(self):
c = self._ctx
if self._strokecolor:
c.set_source_rgba(*self._strokecolor.rgba())
c.stroke()
def _draw(self):
c = self._ctx
if self._fillcolor:
if self._shadow:
self._draw_shadow()
c.set_source_rgba(*self._fillcolor.rgba())
if self._strokecolor:
c.fill_preserve()
c.set_source_rgba(*self._strokecolor.rgba())
c.stroke()
else:
c.fill()
else:
self._draw_stroke()
def _draw_shadow(self):
c = self._ctx
img, padding = self._render_bitmap_shadow()
x1, y1, x2, y2 = c.fill_extents()
dpi_scale = 72.0 / self._bitmap_dpi
c.save()
c.set_source_rgba(*self._shadow_color.rgba())
c.translate(x1 + self._shadow_dx, y1 + self._shadow_dy)
c.scale(dpi_scale, dpi_scale)
c.translate(-padding, -padding)
c.mask_surface(img, 0, 0)
c.restore()
def _render_bitmap_shadow(self):
# 'Moving average' subpixel resolution box filter implementation
# based on Ryg's posts on fast blurs:
#
# http://fgiesen.wordpress.com/2012/07/30/fast-blurs-1/
# http://fgiesen.wordpress.com/2012/08/01/fast-blurs-2/
#
# Note: Shadows doesn't work properly for SVG output as shadow
# bitmaps don't get translated correctly but are all drawn at
# the origin.
dpi_scale = self._bitmap_dpi / 72.0
radius = self._shadow_radius * dpi_scale
# With 3 passes we get a good approximation of Gaussian blur
# within a 3% error margin (bicubic blur), which is good enough
# for practical purposes.
# 1 - box filter
# 2 - triangle filter
# 3 - piecewise quadratic filter
# 4 - piecewise cubic filter
passes = self._shadow_blur_passes
# Integer part of radius
m = int(radius)
# Fractional part of radius
alpha = radius - m
scale = 1.0 / (2 * radius + 1)
# Calculate the padding required for the blur around the shape's
# bounding box. As we don't do any boundary checks when applying
# the filter, negative index values will wrap around to the end
# of the image buffer. Therefore, we need to make the padding a
# slightly larger than the blur radius to avoid visible wrapping
# effects around the edges, hence the 1.5 multiplier.
padding = int((m+2) * passes * 1.5 + 0.5)
# Calculate shape extents. x1, y1 will hold the offset from the
# origin.
c = self._ctx
x1, y1, x2, y2 = c.fill_extents()
# Add some extra padding (3) to the sides
width = int((x2 - x1) * dpi_scale + padding * 2 + 0.5) + 3
height = int((y2 - y1) * dpi_scale + padding * 2 + 0.5) + 3
# As we don't do any boundary checks when applying the filter,
# the buffer needs to be made N rows larger to prevent index out
# of range exceptions, where N is the maximum sampling radius
# (m+2 in this case). The buffer will be in ARGB32 format, so we
# need 4 bytes per pixel.
data = bytearray(width * (height + m+2) * 4)
# Create an image surface backed by our bytebuffer
img = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32,
width, height)
imgctx = cairo.Context(img)
# Draw the shape to be blurred offset from the origin, so
# there's space around it for the blur.
offsx = int(-x1 * dpi_scale + padding + 0.5)
offsy = int(-y1 * dpi_scale + padding + 0.5)
imgctx.translate(offsx, offsy)
imgctx.scale(dpi_scale, dpi_scale)
imgctx.append_path(c.copy_path())
# Draw the shape with full opacity; the alpha value will be used
# later when we blit the blurred image onto the target surface.
col = self._shadow_color.copy()
col.a = 1.0
imgctx.set_source_rgba(*col.rgba())
imgctx.fill()
# Horizontal passes (blur the alpha channel only)
row = bytearray(width * 4)
for y in range(0, height):
for p in range(passes):
yoffs = y * width * 4 + 3
sum_ = data[yoffs]
for x in range(m):
sum_ += data[yoffs - x*4] + data[yoffs + x*4]
sum_ += alpha * data[yoffs - m*4] + data[yoffs + m*4]
for x in range(width):
a = int(sum_ * scale)
row[x*4] = a
a = data[yoffs + (x+m+1)*4]
b = data[yoffs + (x+m+2)*4]
sum_ += a + alpha * (b - a)
a = data[yoffs + (x-m)*4]
b = data[yoffs + (x-m-1)*4]
sum_ -= a + alpha * (b - a)
data[yoffs:yoffs + width*4] = row
# Vertical passes (blur the alpha channel only)
col = bytearray(height)
for x in range(width):
for p in range(passes):
xoffs = x*4+3
sum_ = data[xoffs]
for y in range(m):
sum_ += data[xoffs - y*width*4] + data[xoffs + y*width*4]
sum_ += alpha * data[xoffs - m*width*4] + data[xoffs + m*width*4]
for y in range(0, height):
a = int(sum_ * scale)
col[y] = a
a = data[xoffs + (y+m+1)*width*4]
b = data[xoffs + (y+m+2)*width*4]
sum_ += a + alpha * (b - a)
a = data[xoffs + (y-m)*width*4]
b = data[xoffs + (y-m-1)*width*4]
sum_ -= a + alpha * (b - a)
for y in range(1, height - 1):
data[xoffs + y*width*4] = col[y]
return img, padding
context = Context()
```
#### File: twyg/twyg/cmdline.py
```python
import os
import sys
import traceback
from optparse import OptionParser
from twyg import get_scale_factor, generate_output
from twyg.common import validate_margins
def exit_error(msg):
print >>sys.stderr, sys.argv[0] + ': ' + msg
sys.exit(1)
def main():
usage = 'Usage: %prog [OPTIONS] DATAFILE OUTFILE'
parser = OptionParser(usage=usage)
parser.add_option('-c', '--config',
default='default',
dest='config', metavar='NAME',
help='configuration to use [default: %default]')
parser.add_option('-o', '--colorscheme',
dest='colorscheme', metavar='NAME',
help='colorscheme to use')
parser.add_option('-d', '--dpi',
default='72.0', type='float',
help=('output resolution (PNG) or shadow rasterisation '
'resolution (PDF, PS and SVG) [default: %default]'))
parser.add_option('-m', '--margin',
default='10%,5%',
help=('margins in TOP,RIGHT,BOTTOM,LEFT or VERT,HORIZ '
'or MARGIN format, either as absolute units '
'(points) or percentages [default: %default]'))
parser.add_option('-v', '--verbose',
default=False, action='store_true',
help='display stack trace on error')
parser.add_option('-s', '--scale',
default='1.0', type='float',
help=('scale factor (multiplier or percentage) '
'[default: %default]'))
options, args = parser.parse_args()
if len(args) == 0:
parser.error('input and output files must be specified')
return 2
if len(args) == 1:
parser.error('output file must be specified')
return 2
datafile = args[0]
outfile = args[1]
ext = os.path.splitext(outfile)[1][1:].lower()
if ext in ('pdf', 'png', 'ps', 'svg'):
options.outformat = ext
else:
parser.error('invalid output format: %s' % ext)
return 2
if options.dpi <= 0:
parser.error('DPI value must be greater than 0')
return 2
if options.scale <= 0:
parser.error('scale value must be greater than 0')
return 2
# Validate margin values
margins = options.margin.split(',')
try:
validate_margins(margins)
except ValueError, e:
parser.error(e)
return 2
try:
scale = get_scale_factor(options.dpi, options.scale)
generate_output(datafile, options.config, outfile, options.outformat,
colorscheme=options.colorscheme, scale=scale,
margins=margins)
except Exception, e:
exit_error(traceback.format_exc() if options.verbose else str(e))
return 0
```
#### File: twyg/twyg/config.py
```python
import math, os, re, sys
import operator as _operator
# TODO what happens in NodeBox?
from pkg_resources import resource_filename
try:
# Python 2.7+
from collections import OrderedDict
except ImportError:
# Python 2.4-2.6 & Nodebox 1
from twyg.ordereddict import OrderedDict
from twyg.css3colors import color_to_rgba, colornames
from twyg.tree import Direction
import twyg.common
CONF_EXT = '.twg'
DEFAULTS_DIR = 'defaults'
COLORS_DIR = 'colors'
CONFIGS_DIR = 'configs'
DEFAULT_LEVEL = '$defaultLevel'
# This is a high-leve description of the config parsing process:
#
# 1. Load configuration (``loadconfig``)
#
# 1.1 Read in the whole config file and tokenize it line by line using
# regexps (``_tokenize_file``)
#
# 1.2 Build config data structure by running an FSM paraser on the
# resulting tokens (``buildconfig``). @copy and @include directives
# are fully resolved during the parsing process:
#
# - @copy directives are expanded.
#
# - If an @include directive is encountered, the referenced config file
# is loaded and tokenized by ``_tokenize_file`` and then recursively
# parsed by ``buildconfig``.
#
# 2. TODO
#
class ConfigError(Exception):
""" Exception for displaying configuration error messages in a
normalized format.
"""
def __init__(self, msg, token=None, file=None, line=None, col=None):
self.msg = msg
if token:
self.file = token.file
self.line = token.line
self.col = token.col
else:
self.file = file
self.line = line
self.col = col
def __str__(self):
return ("Error in configuration file '%s' on line %s at column %s: "
"\n %s" % (self.file, self.line, self.col, self.msg))
##############################################################################
# Tokenizer
##############################################################################
class Pattern(object):
""" Token ID and regexp pattern pair used for tokenization.
"""
def __init__(self, id, pattern):
self.id = id
self.pattern = pattern
# Tokenization rules used for tokenizing a config file.
# The ordering is important.
rules = [
Pattern('(whitespace)', r'^([ \t]+)'),
Pattern('(comment)', r'^(--.*)'),
Pattern('(section)', r'^\[([a-zA-Z]+)\]'),
Pattern('(level)', r'^{([a-zA-Z][a-zA-Z0-9_]*)}'),
Pattern('(directive)', r'^@([a-zA-Z]+)'),
Pattern('(float)', r'^([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)'),
Pattern('(percent)', r'^([0-9][0-9]*%)'),
Pattern('(integer)', r'^([0-9][0-9]*)'),
Pattern('(operator)', r'^(\+|-|\*|/|\^|\(|\)|\[|\]|\.|,)'),
Pattern('(name)', r'^([a-zA-Z][a-zA-Z0-9_]*)'),
Pattern('(hexcolor)', r'^(#[a-zA-Z0-9]+)'),
Pattern('(string)', r'^"([^"\\]*(?:\\.[^"\\]*)*)"')
]
def tokenize(config, file=None, flat=False):
""" Convert a configuration into a list of tokens that can then be
parsed further.
"""
lines = config.split('\n');
def linenum(line_nr, flat):
return line_nr if flat else line_nr + 1
tokens = []
for line_nr, line in enumerate(lines):
line = line.strip()
if not line:
sym = symbol('(newline)')
tokens.append(sym())
continue
col = 1
while line:
found = False
for r in rules:
m = re.match(r.pattern, line)
if m:
val = m.group(1)
id = r.id
if id != '(whitespace)' and id != '(comment)':
if id == '(operator)':
sym = symbol_table.get(val)
if not sym:
raise ConfigError(
"Syntax error: unknown operator: '%s'"
% val, file=file,
line=linenum(line_nr, flat), col=col)
else:
sym = symbol_table[id]
s = sym()
s.value = val
s.file = file
s.line = linenum(line_nr, flat)
s.col = col
tokens.append(s)
end = m.end(0)
line = line[end:]
col += end
found = True
break
if not found:
raise ConfigError("Syntax error",
file=file, line=linenum(line_nr, flat),
col=col)
sym = symbol('(newline)')
tokens.append(sym())
return tokens
##############################################################################
# Config level FSM parser
##############################################################################
def buildconfig(tokens, cwd=None, state='start', config=None, curr=None,
curr_section=None, curr_level=None, section_props=False,
prev_configs=[]):
""" Build the final config dict from the results of the config file
tokenization step.
The implementation is a simple FSM parser with some internal state.
Most of the complexity comes from the error handling and the
building of meaningful error messages.
@copy directives are fully expanded. If an @include directive is
encountered, tokenize the included config file and recursively call
buildconfig on the result.
Below is a simple config file and the corresponding data structure
as built by this function. Note that the tokenization step is not
handled by this function.
[connection]
style junction
linewidth 3
cornerRadius 10
cornerStyle rounded
[node]
{normal}
style rect
strokeWidth 3
cornerRadius 40
{root}
@copy normal
levelDepthMax 0
cornerRadius 80
{leaf}
@copy normal
levelNumChildrenMax 0
cornerRadius 1
-----------------------------------------------------------------
{
'connection': OrderedDict([
('style', [((name) junction), ((end) None)]),
('linewidth', [((integer) 3), ((end) None)]),
('cornerRadius', [((integer) 10), ((end) None)]),
('cornerStyle', [((name) rounded), ((end) None)])
),
'node': OrderedDict([
('normal', {
'style': [((name) rect), ((end) None)],
'strokeWidth': [((integer) 3), ((end) None)],
'cornerRadius': [((integer) 40), ((end) None)]
}),
('root', {
'style': [((name) rect), ((end) None)],
'strokeWidth': [((integer) 3), ((end) None)],
'levelDepthMax': [((integer) 0), ((end) None)],
'cornerRadius': [((integer) 80), ((end) None)]
}),
('leaf', {
'style': [((name) rect), ((end) None)],
'strokeWidth': [((integer) 3), ((end) None)],
'levelNumChildrenMax': [((integer) 0), ((end) None)],
'cornerRadius': [((integer) 1), ((end) None)]
})
])
}
"""
def isliteral(id):
return id in ('(operator)', '(float)', '(percent)', '(integer)',
'(name)', '(hexcolor)', '(string)')
# TODO Python bug ???
if not config:
config = dict()
for t in tokens:
if state == 'start':
if t.id == '(newline)':
pass
elif t.id == '(section)':
state = 'section'
curr_section = t.value
# The order of the levels within a section must be
# retained
curr = config[curr_section] = OrderedDict()
curr_level = None
section_props = False
else:
raise ConfigError('Configuration must start with a '
'section definition', t)
elif state == 'section':
if t.id == '(newline)':
state = 'in_section'
else:
raise ConfigError("Section definition '%s' must be followed "
'by a newline' % curr_section, t)
elif state == 'in_section':
if t.id == '(newline)':
pass
elif t.id == '(name)':
state = 'property'
name = t.value
value = []
if not curr_level:
section_props = True
elif t.id == '(section)':
section = t.value
if section in config:
raise ConfigError('Duplicate section definition '
"'%s'" % section, t)
state = 'section'
curr_section = section
# The order of the levels within a section must be
# retained
curr = config[curr_section] = OrderedDict()
curr_level = None
section_props = False
elif t.id == '(level)':
level = t.value
if section_props:
raise ConfigError("Invalid level definition '%s' in "
"section '%s':\n"
"\tlevel definitions are "
"not allowed after section level "
"properties"
% (level, curr_section), t)
if level in config[curr_section]:
raise ConfigError("Duplicate level name '%s' in "
"section '%s'"
% (level, curr_section), t)
state = 'level'
curr_level = level
curr = config[curr_section][curr_level] = {}
elif t.id == '(directive)':
d = t.value
if d not in ('include', 'copy'):
raise ConfigError("Invalid directive: '%s'" % d, t)
state = 'directive'
name = t.value
value = []
else:
raise ConfigError('Property name, level definition or '
'directive expected', t)
elif state == 'level':
if t.id == '(newline)':
state = 'in_section'
else:
raise ConfigError("Level definition '%s' in section '%s' "
'must be followed by a newline'
% (curr_level, curr_section), t)
elif state == 'directive':
if t.id == '(newline)':
if not value:
p = prevtoken
raise ConfigError("Missing parameter for directive '%s'"
% name, p)
state = 'in_section'
param = ''.join([v.value for v in value])
if name == 'include':
try:
configpath = include_path(os.path.join(cwd, param))
if configpath in prev_configs:
raise ConfigError(
"Error while processing '%s' directive:\n"
"\tCircular reference detected when "
"attempting to include '%s'"
% (name, configpath), prevtoken)
tokens, cwd = _tokenize_file(configpath, flat=False)
except IOError, e:
raise ConfigError(
"Error while processing '%s' directive:\n"
"\t%s: '%s'" % (name, e.strerror, e.filename),
prevtoken)
prev_configs.append(configpath)
buildconfig(tokens, cwd, state, config, curr,
curr_section, curr_level, section_props,
prev_configs)
elif name == 'copy':
level = param
if level not in config[curr_section]:
t = prevtoken
raise ConfigError(
"Error while processing '%s' directive:\n"
"\tLevel '%s' does not exist in section '%s'"
% (name, level, curr_section), t)
curr.update(config[curr_section][level])
elif isliteral(t.id):
value.append(t)
else:
raise ConfigError('Invalid directive syntax', t)
elif state == 'property':
if t.id == '(newline)':
raise ConfigError("Missing property expressions for property "
"'%s'" % name, prevtoken)
if t.isoperator('['):
state = 'array'
value.append(t)
elif isliteral(t.id):
state = 'in_property'
value.append(t)
else:
raise ConfigError("Property expressions cannot start with "
"'%s'" % t.value, t)
elif state == 'in_property':
if isliteral(t.id):
value.append(t)
elif t.id == '(newline)':
state = 'in_section'
sym = symbol('(end)')
value.append(sym())
curr[name] = value
else:
raise ConfigError("Syntax error in property expressions '%s'"
% name, t)
elif state == 'array':
if t.id == '(newline)':
pass
elif t.isoperator('['):
raise ConfigError('Arrays cannot be nested', t)
elif t.isoperator(']'):
state = 'end_array'
value.append(t)
elif isliteral(t.id):
value.append(t)
else:
raise ConfigError("Syntax error in property expressions '%s'"
% name, t)
elif state == 'end_array':
if t.id == '(newline)':
state = 'in_section'
sym = symbol('(end)')
value.append(sym())
curr[name] = value
else:
raise ConfigError("End of array symbol ']' must be followed"
'by a newline', t)
prevtoken = t
return config
def _tokenize_file(file, flat=False):
""" Tokenize a config file.
Returns the list of tokens and the directory the config file resides
in (this will be used for processing the @include directives).
"""
f = open(file)
config = f.read()
if flat:
config = '[default]\n' + config
tokens = tokenize(config, file, flat=flat)
cwd = os.path.dirname(file)
return tokens, cwd
def loaddefaults(defaults):
return loadconfig(defaults_path(defaults), flat=True)
def loadconfig(file, flat=False):
""" Tokenize a config file.
If ``flat`` is true, all properties will be placed in a section
called 'default'. This should only be used when tokenizing config
defaults that don't contain a section definition.
# TODO's
See buildconfig for a detailed description of the returned config
data structure.
"""
tokens, cwd = _tokenize_file(file, flat)
config = buildconfig(tokens, cwd=cwd, prev_configs=[file])
if flat:
config = config['default']
return config
def find_config(paths, name):
""" Find a config file.
``paths`` contains a list of search paths, including the name of the
config. The function first tries to fint the config as specified in
the path, then tries with CONF_EXT extension appended at the end.
"""
for p in paths:
if os.path.exists(p):
return p
p2 = p + CONF_EXT
if os.path.exists(p2):
return p2
raise ConfigError("Cannot open %s file: '%s'" % (name, p))
def defaults_path(configname):
conf = os.path.join(DEFAULTS_DIR, configname)
home_conf = os.path.join(twyg.common.TWYG_HOME, conf)
paths = [
home_conf,
resource_filename(__name__, conf)
]
return find_config(paths, 'defaults config')
def colors_path(configname):
colors_conf = os.path.join(COLORS_DIR, configname)
home_colors_conf = os.path.join(twyg.common.TWYG_HOME, colors_conf)
paths = [
configname,
home_colors_conf,
resource_filename(__name__, colors_conf)
]
return find_config(paths, 'colorscheme config')
def config_path(configname):
configs_conf = os.path.join(CONFIGS_DIR, configname)
home_configs_conf = os.path.join(twyg.common.TWYG_HOME, configs_conf)
paths = [
configname,
home_configs_conf,
resource_filename(__name__, configs_conf)
]
return find_config(paths, 'config')
def include_path(configname):
return find_config([configname], 'included config')
##############################################################################
# Pratt expression parser
##############################################################################
# Top-down operator-precedence parser based heavily on <NAME>'s
# excellent article on Pratt parsers:
#
# http://effbot.org/zone/simple-top-down-parsing.htm
#
# Added type checking, function calls and extensive error reporting on
# my own.
#
# Further references:
#
# http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
# http://javascript.crockford.com/tdop/tdop.html
def parsecolor(mode, *components):
""" Helper function to parse colors specified by their individual
component values using the CSS3 color parser.
"""
s = ', '.join([str(a) for a in components])
if mode:
s = mode + '(' + s + ')'
return _ctx.color(*color_to_rgba(s))
# Functions that are available in a config file
function_table = {
'abs': abs,
'ceil': math.ceil,
'floor': math.floor,
'log': math.log,
'log10': math.log10,
'max': max,
'min': min,
'pow': pow,
'round': round,
'sqrt': math.sqrt,
'rgb': lambda r, g, b: parsecolor('rgb', r, g, b),
'rgba': lambda r, g, b, a: parsecolor('rgba', r, g, b, a),
'hsl': lambda h, s, l: parsecolor('hsl', h, s, l),
'hsla': lambda h, s, l, a: parsecolor('hsla', h, s, l, a)
}
variable_table_defaults = {
}
variable_table = {
}
def init_variable_table_defaults():
# Make all named CSS3 colors available as color.<colorname> in the
# config file
def inject_css3_colors():
global variable_table_defaults
class Colors:
pass
col = Colors()
for name in colornames.keys():
setattr(col, name, parsecolor(None, name))
variable_table_defaults['color'] = col
inject_css3_colors()
class SymbolBase(object):
id = None
value = None
first = second = None
def nud(self):
raise ConfigError("Syntax error: '%s'" % self.value, self)
def led(self, *args):
raise ConfigError("Syntax error: unknown operator: '%s'"
% self.value, self)
def __repr__(self):
if self.id == '(operator)':
out = ["'" + self.value + "'", self.first, self.second]
out = map(str, filter(None, out))
return "(" + " ".join(out) + ")"
else:
return '(%s %s)' % (self.id, self.value)
def isoperator(self, op):
return self.id == '(operator)' and self.value == op
symbol_table = {}
def symbol(id, value=None, bp=0):
key = value if value else id
if key in symbol_table:
s = symbol_table[key]
else:
class s(SymbolBase): pass
s.__name__ = 'symbol-' + key
s.id = id
s.lbp = bp
s.value = value
symbol_table[key] = s
s.lbp = max(bp, s.lbp)
return s
def operator(op, bp=None):
return symbol('(operator)', op, bp)
def infix(op, bp):
def led(self, left):
self.first = left
self.second = expression(bp)
return self
operator(op, bp).led = led
def prefix(op, bp):
def nud(self):
self.first = expression(bp)
self.second = None
return self
operator(op).nud = nud
def method(s):
assert issubclass(s, SymbolBase)
def bind(fn):
setattr(s, fn.__name__, fn)
return bind
# Grammar description
infix('+', 10); infix('-', 10)
infix('*', 20); infix('/', 20); infix('%', 20)
prefix('+', 30); prefix('-', 30)
operator('.', 40); operator('[', 40); operator('(', 40)
@method(symbol('('))
def nud(self):
expr = expression()
advance(')')
return expr
operator(')'); operator(',')
@method(symbol('('))
def led(self, left):
self.first = left
self.second = []
if not token.isoperator(')'):
while 1:
self.second.append(expression())
if not token.isoperator(','):
break
advance(',')
advance(')')
return self
@method(symbol('.'))
def led(self, left):
self.first = left
self.second = token
advance()
return self
operator(']')
@method(symbol('['))
def nud(self):
self.first = []
if not token.isoperator(']'):
while 1:
if token.isoperator(']'):
break
self.first.append(expression())
if not token.isoperator(','):
break
advance(',')
advance(']')
return self
symbol('(section)')
symbol('(level)')
symbol('(directive)')
symbol('(newline)')
symbol('(end)')
nud = lambda self: self
symbol('(float)').nud = nud
symbol('(percent)').nud = nud
symbol('(integer)').nud = nud
symbol('(name)').nud = nud
symbol('(hexcolor)').nud = nud
symbol('(string)').nud = nud
# Evaluation rules
opnames = {
'add': '+',
'pos': '+',
'sub': '-',
'neg': '-',
'mul': '*',
'div': '/'
}
def unaryop(t, op):
try:
a = t.first.eval()
return op(a)
except TypeError, e:
raise ConfigError("Cannot use operator '%s' on type '%s'"
% (opnames[op.__name__], type(a).__name__), t)
def binaryop(t, op):
try:
a = t.first.eval()
b = t.second.eval()
# Ensure that an int/int division always results in a float
# result
if type(b) == int:
b = float(b)
return op(a, b)
except TypeError, e:
raise ConfigError("Cannot use operator '%s' on types '%s' and '%s'"
% (opnames[op.__name__], type(a).__name__,
type(b).__name__), t)
@method(symbol('+'))
def eval(self):
if self.second:
return binaryop(self, _operator.add)
else:
return unaryop(self, _operator.pos)
@method(symbol('-'))
def eval(self):
if self.second:
return binaryop(self, _operator.sub)
else:
return unaryop(self, _operator.neg)
symbol('*').eval = lambda self: binaryop(self, _operator.mul)
symbol('/').eval = lambda self: binaryop(self, _operator.div)
def isfunction(o):
return type(o).__name__ in ('function', 'instancemethod',
'builtin_function_or_method')
@method(symbol('('))
def eval(self):
if self.first.isoperator('.'):
dot_op = self.first
obj, attr = dot_operator(dot_op)
if not hasattr(obj, attr):
raise ConfigError("'%s' has no method named '%s'" %
(dot_op.first.value, attr), dot_op.second)
fn = getattr(obj, attr)
else:
fn = self.first.value
if fn not in function_table:
raise ConfigError("Function '%s' does not exist" % fn, self.first)
fn = function_table[fn]
args = self.second
a = [x.eval() for x in args]
try:
return fn(*a)
except TypeError, e:
raise ConfigError(str(e).capitalize(), self)
@method(symbol('.'))
def eval(self):
obj, attr = dot_operator(self)
if not hasattr(obj, attr):
raise ConfigError("'%s' has no property named '%s'"
% (self.first.value, attr), self.second)
a = getattr(obj, attr)
if isfunction(a):
raise ConfigError("'%s' is a method of '%s'; it cannot be used as "
'a property ' % (attr, self.first.value),
self.second)
return a
def dot_operator(t):
i = t.first.id
v = t.first.value
if i == '(name)':
if v not in variable_table:
raise ConfigError("Variable '%s' does not exist" % v, t)
obj = variable_table[v]
else:
obj = t.first.eval()
attr = t.second.value
return obj, attr
@method(symbol('['))
def eval(self):
args = self.first
a = [x.eval() for x in args]
return a
symbol('(float)').eval = lambda self: float(self.value)
symbol('(integer)').eval = lambda self: int(self.value)
symbol('(percent)').eval = lambda self: self.value
symbol('(hexcolor)').eval = lambda self: parsecolor(None, self.value)
symbol('(string)').eval = lambda self: self.value.replace('\\"', '"')
@method(symbol('(name)'))
def eval(self):
v = self.value
if v not in variable_table:
raise ConfigError("Variable '%s' does not exist" % v, self)
return variable_table[v]
# Pratt parser
def nexttoken():
global token, lasttoken
t = token
if t.id != '(end)':
lasttoken = t
token = next()
return t
def expression(rbp=0):
global token, lasttoken
t = nexttoken()
left = t.nud()
while rbp < token.lbp:
t = nexttoken()
left = t.led(left)
return left
def advance(value=None, id='(operator)'):
global token
if value and not (token.id == id and token.value == value):
raise ConfigError("Syntax error: expected '%s'" % value, lasttoken)
token = next()
def parse_expr(expr):
global next, token
next = (x for x in expr).next
token = next()
try:
e = expression()
except StopIteration:
raise ConfigError("Premature end of expression", lasttoken)
if token.id != '(end)':
raise ConfigError("Expression should have ended at this point", token)
return e
def eval_expr(expr, vars={}):
global variable_table
if not variable_table_defaults:
init_variable_table_defaults()
variable_table = dict(variable_table_defaults)
variable_table.update(vars)
return expr.eval()
##############################################################################
# Levels
##############################################################################
class Level(object):
""" Class for holding and evaluating level selector rules. """
def __init__(self, levelname, config={}):
self.levelname = levelname
# The ordinal numbers of the first four enum values must be
# identical to those of the Direction enum.
Level.orientation = ('top', 'right', 'bottom', 'left', 'any')
properties = {
'levelDepthMin': (NumberProperty, {'min': 0}),
'levelDepthMax': (NumberProperty, {'min': 0}),
'levelNumChildrenMin': (NumberProperty, {'min': 0}),
'levelNumChildrenMax': (NumberProperty, {'min': 0}),
'levelOrientation': (EnumProperty,
{'values': Level.orientation})
}
self._props = Properties(properties, 'level.twg', config,
extra_prop_warning=False)
self._eval()
def __repr__(self):
return self.levelname
def _eval(self):
E = self._props.eval
self.depth_min = E('levelDepthMin')
self.depth_max = E('levelDepthMax')
self.numchildren_min = E('levelNumChildrenMin')
self.numchildren_max = E('levelNumChildrenMax')
o = E('levelOrientation')
if o == 'any':
self.orientation = -1
else:
self.orientation = Level.orientation.index(o)
def selects(self, node, layout):
""" Check if the this level's selector rules select a given
node.
The layout object must be passed to determine the orientation of
the node in certain layouts.
"""
depth = node.depth()
numchildren = len(node.getchildren())
o = layout.node_orientation(node)
ok = ( depth >= self.depth_min
and depth <= self.depth_max
and numchildren >= self.numchildren_min
and numchildren <= self.numchildren_max)
# -1 stands for 'any' orientation, which means the orientation
# can be any valid value, so we don't need to do the orientation
# filtering
if self.orientation >= 0:
ok = ok and self.orientation
return ok
def createlevel(levelname, config):
""" Create Level object from a config and then deletes all level
related properties.
"""
level = Level(levelname, config)
for k in level._props._properties.keys():
if k in config:
del config[k]
return level
class SectionLevel(object):
""" Placeholder to keep level descriptions and drawer objects
together. """
def __init__(self, level, drawer):
self.level = level
self.drawer = drawer
def __repr__(self):
return '{%s}: %s' % (self.level, self.drawer)
STYLE = 'style'
LAYOUT_CONFIG = 'layout'
NODE_CONFIG = 'node'
CONNECTION_CONFIG = 'connection'
COLOR_CONFIG = 'color'
##############################################################################
# Properties
##############################################################################
class Property(object):
def __init__(self, name):
self.name = name
def eval(self, vars):
self.value = eval_expr(self.expr, vars)
self._validate()
return self.value
class StringProperty(Property):
def _validate(self):
if type(self.value) not in (str, unicode):
raise ConfigError("Property '%s' must evaluate to a string"
% self.name, self.expr[0])
class NumberProperty(Property):
def __init__(self, name, min=None, max=None):
super(NumberProperty, self).__init__(name)
self.min = min
self.max = max
def _validate(self):
if type(self.value) not in (int, float):
raise ConfigError("Property '%s' must evaluate to a number"
% self.name, self.expr)
if self.min and self.value < self.min:
raise ConfigError(
"Number property '%s' must have a value greater "
"than %s" % (self.name, self.min), self.expr)
if self.max and self.value > self.max:
raise ConfigError("Number property '%s' must have a value less "
"than %s" % (self.name, self.max), self.expr)
class ColorProperty(Property):
def _validate(self):
if type(self.value).__name__ != 'Color':
raise ConfigError("Property '%s' must evaluate to a color"
% self.name, self.expr)
class EnumProperty(Property):
def __init__(self, name, values):
super(EnumProperty, self).__init__(name)
self.values = values
def eval(self, vars):
enumvars = {}
for value, name in enumerate(self.values):
enumvars[name] = value
vars.update(enumvars)
n = eval_expr(self.expr, vars)
if type(n) not in (int, float):
raise ConfigError(
("Enum property '%s' must evaluate to a numeric value"
% self.name), self.expr)
n = int(round(n))
if n < 0 or n >= len(self.values):
raise ConfigError(
("Enum property '%s' evaluated to an invalid "
"numeric value: %s" % (self.name, n)), self.expr)
self.value = self.values[n]
return self.value
class BooleanProperty(Property):
def eval(self, vars):
vars = {'no': 0, 'off': 0, 'false': 0, 'yes': 1, 'on': 1, 'true': 1}
n = eval_expr(self.expr, vars)
if type(n) not in (int, float):
raise ConfigError(
("Boolean property '%s' must evaluate to a numeric value"
% self.name), self.expr)
self.value = True if n > 0.0 else False
return self.value
class ArrayProperty(Property):
def __init__(self, name, type):
super(ArrayProperty, self).__init__(name)
self.type = type
def _validate(self):
# TODO array element type validation
if type(self.value) != list:
raise ValueError
class Properties(object):
""" Class for managing configuration properties. """
def __init__(self, properties, defaults, config, extra_prop_warning=True):
"""
Load and parse the default config file ``default`` and merge it
with the configuration ``config`` (defaults will be
overwritten).
The ``properties`` dict contains the list of allowed properties
where the key is the name of the property and the value a
two-element tuple of which the first element is the class of the
property and the second element the property's extra parameters
(note that some property types have mandatory extra parameters,
e.g. ArrayProperty). For example:
{
'fontName': (StringProperty, {}),
'fontSizes': (ArrayProperty, {'type': NumberProperty})
}
Warn on property names that are not listed in the ``properties``
dict if ``extra_prop_warning`` is True.
"""
c = loaddefaults(defaults)
c.update(config)
config = c
# Build properties dictionary
self._properties = {}
for name, prop_params in properties.iteritems():
# The first parameter is the property class, the second the
# optional constructor parameters
prop_class, opts = prop_params
self._properties[name] = prop_class(name, **opts)
for name, prop in self._properties.iteritems():
if name not in config:
raise ConfigError("Missing property: '%s'" % name)
e = parse_expr(config[name])
# print '>>>', name, ':', e
prop.expr = e
prop.name = name
if extra_prop_warning:
self._warn_extra_props(config)
def _warn_extra_props(self, config):
extra_props = set(config.keys()) - set(self._properties.keys())
for p in extra_props:
token = config[p][0]
#TODO make displaying warnings optional? print to stdout?
print >>sys.stderr, (
"Warning: Unknown property '%s' in configuration "
"file '%s' on line %s" % (p, token.file, token.line))
def eval(self, name, scope=None, vars={}):
""" Evaluate the value of a property.
``name`` is the name of the property, ``scope`` the object in
whose context the property is to be evaluated and ``vars``
contains a dict of variable name and value pairs that will be
injected into the evaluation scope.
"""
if name not in self._properties:
# TODO more detailed error message
raise AttributeError("Property '%s' does not exist" % name)
p = self._properties[name]
if scope:
for propname, varname in scope.property_mappings.iteritems():
if hasattr(scope, propname):
vars[varname] = getattr(scope, propname)
# TODO triggered by 'basecolor' -- why?
# else:
# raise ConfigError("Variable '%s' is not evaluated "
# "at this point" % (varname))
return p.eval(vars)
##############################################################################
# Utils
##############################################################################
def format_paramvalue_error(configname, paramname, value, correct_type):
msg = ("Invalid %s parameter value: %s: %s ('%s', should be '%s')"
% (configname, paramname, value, correct_type))
return msg
def get_stylename(configname, config):
if STYLE not in config:
raise ConfigError, ("Style must be specified in '%s'" % (configname))
expr = config[STYLE]
if len(expr) == 2 and expr[0].id == '(name)' and expr[1].id == '(end)':
stylename = expr[0].value
else:
raise ConfigError("Invalid style name", expr[0])
if not (type(stylename) == str or type(stylename) == unicode):
raise ConfigError, format_paramvalue_error(configname, STYLE,
stylename, str)
return stylename
```
#### File: twyg/twyg/connection.py
```python
import os
from twyg.common import createpath
from twyg.config import (Properties, NumberProperty,
EnumProperty, ColorProperty)
from twyg.geom import Vector2
from twyg.geomutils import arcpath
from twyg.tree import Direction, opposite_dir
# TODO util function in common?
def defaults_path(conf):
return os.path.join('connection', conf)
class CurveConnectionDrawer(object):
def __init__(self, config={}):
properties = {
'nodeLineWidthStart': (NumberProperty, {'min': 0.0}),
'nodeLineWidthEnd': (NumberProperty, {'min': 0.0}),
'nodeCx1Factor': (NumberProperty, {}),
'nodeCx2Factor': (NumberProperty, {}),
'nodeCy1Factor': (NumberProperty, {}),
'nodeCy2Factor': (NumberProperty, {})
}
self._props = Properties(properties, defaults_path('curve'), config)
def _eval_func(self, node):
return lambda name: self._props.eval(name, node)
def draw(self, node):
"""
Draw a curved connection between a node and its child nodes.
"""
E = self._eval_func(node)
if node.isleaf():
return
_ctx.autoclosepath(True)
_ctx.stroke(node.connectioncolor)
_ctx.fill(node.connectioncolor)
children = node.children
for child in children:
linewidth = E('nodeLineWidthEnd')
_ctx.strokewidth(linewidth)
direction = child.direction()
opp_direction = opposite_dir(direction)
x1, y1 = node.connection_point(direction)
x2, y2 = child.connection_point(opp_direction)
if direction == Direction.Left:
x2 -= linewidth / 2
elif direction == Direction.Right:
x2 += linewidth / 2
if len(children) == 1:
_ctx.line(x1, y1, x2, y2)
else:
cx1 = (x2 - x1) * E('nodeCx1Factor')
cx2 = (x2 - x1) * E('nodeCx2Factor')
cy1 = (y2 - y1) * E('nodeCy1Factor')
cy2 = (y2 - y1) * E('nodeCy2Factor')
p1x = x1 + cx1
p1y = y1 + cy1
p2x = x2 - cx2
p2y = y2 - cy2
startwidth = E('nodeLineWidthStart') - 1
sw = startwidth / 2.
_ctx.beginpath(x1, y1 - sw)
_ctx.curveto(p1x, p1y, p2x, p2y, x2, y2)
_ctx.curveto(p2x, p2y, p1x, p1y, x1, y1 + sw)
_ctx.endpath()
class JunctionConnectionDrawer(object):
def __init__(self, config={}):
corner_styles = ('square', 'beveled', 'rounded')
junction_styles = ('none', 'square', 'disc', 'diamond')
junction_sign = ('none', 'plus', 'minus')
properties = {
'lineWidth': (NumberProperty, {'min': 0.0}),
'junctionXFactor': (NumberProperty, {}),
'cornerStyle': (EnumProperty, {'values': corner_styles}),
'cornerRadius': (NumberProperty, {'min': 0.0}),
'cornerPad': (NumberProperty, {'min': 0.0}),
'junctionStyle': (EnumProperty,{'values': junction_styles}),
'junctionRadius': (NumberProperty, {'min': 0.0}),
'junctionFillColor': (ColorProperty, {}),
'junctionStrokeWidth': (NumberProperty, {'min': 0.0}),
'junctionStrokeColor': (ColorProperty, {}),
'junctionSign': (EnumProperty,
{'values': junction_sign}),
'junctionSignSize': (NumberProperty, {'min': 0.0}),
'junctionSignStrokeWidth': (NumberProperty, {'min': 0.0}),
'junctionSignColor': (ColorProperty, {})
}
self._props = Properties(properties, defaults_path('junction'),
config)
def _eval_func(self, node):
return lambda name: self._props.eval(name, node)
def draw(self, node):
if node.isroot():
self._draw(node, Direction.Left)
self._draw(node, Direction.Right)
else:
self._draw(node)
def _draw(self, node, direction=None):
"""
Draw a curved connection between a node and its child nodes.
"""
E = self._eval_func(node)
children = node.getchildren(direction)
if not children:
return
linewidth = E('lineWidth')
_ctx.autoclosepath(True)
_ctx.stroke(node.connectioncolor)
_ctx.fill(node.connectioncolor)
_ctx.strokewidth(linewidth)
firstchild = children[0]
lastchild = children[-1]
direction = firstchild.direction()
opp_direction = opposite_dir(direction)
x1, y1 = node.connection_point(direction)
xfirst, yfirst = firstchild.connection_point(opp_direction)
# Special case: draw straight line if there's only one child
if len(children) == 1:
_ctx.line(x1, y1, xfirst, yfirst)
return
# Calculate junction point position
jx = x1 + (xfirst - x1) * E('junctionXFactor')
jy = y1
# Draw line from parent node to junction point
_ctx.line(x1, y1, jx, jy)
# Limit first & last corner radius to the available area
ylast = lastchild.connection_point(opp_direction)[1]
ysecond = children[1].connection_point(opp_direction)[1]
ypenultimate = children[-2].connection_point(opp_direction)[1]
# Starting corner radius
cornerPad = E('cornerPad')
r = min(E('cornerRadius'), abs(jx - xfirst) - cornerPad)
r = max(r, 0)
# Adjusted first (top) corner radius
r1 = min(r, abs(yfirst - jy) - cornerPad)
r1 = max(r1, 0)
if ysecond < jy:
r1 = min(r, abs(yfirst - ysecond) - cornerPad)
r1 = max(r1, 0)
# Adjusted last (bottom) corner radius
r2 = min(r, abs(ylast - jy) - cornerPad)
r2 = max(r2, 0)
if ypenultimate > jy:
r2 = min(r, abs(ylast - ypenultimate) - cornerPad)
r2 = max(r2, 0)
# Draw main branch as a single path to ensure line continuity
p1 = Vector2(jx, yfirst + r1)
p2 = Vector2(jx, ylast - r2)
segments = [[p1, p2]]
corner_style = E('cornerStyle')
for i, child in enumerate(children):
direction = child.direction()
opp_direction = opposite_dir(direction)
x2, y2 = child.connection_point(opp_direction)
if direction == Direction.Left:
x2 -= linewidth / 2
elif direction == Direction.Right:
x2 += linewidth / 2
# Draw corners
if direction == Direction.Left:
a1 = 90
da = -90
dx1 = r1 * 2
dx2 = r2 * 2
else:
a1 = da = 90
dx1 = dx2 = 0
x1 = jx
if child is firstchild:
x1 += -r1 if direction == Direction.Left else r1
if (corner_style == 'square' or abs(y2 - jy) < .001):
p1 = Vector2(jx, y2)
p2 = Vector2(jx, y2 + r1)
segments.insert(0, [p1, p2])
p1 = Vector2(x1, y2)
p2 = Vector2(jx, y2)
segments.insert(0, [p1, p2])
elif corner_style == 'beveled':
p1 = Vector2(x1, y2)
p2 = Vector2(jx, y2 + r1)
segments.insert(0, [p1, p2])
elif corner_style == 'rounded':
arc = arcpath(jx - dx1, y2, r1 * 2, r1 * 2, a1, da)
segments = arc + segments
p1 = Vector2(x2, y2)
p2 = Vector2(x1, y2)
segments.insert(0, [p1, p2])
elif child is lastchild:
x1 += -r2 if direction == Direction.Left else r2
if (corner_style == 'square' or abs(y2 - jy) < .001):
p1 = Vector2(jx, y2 - r2)
p2 = Vector2(jx, y2)
segments.append([p1, p2])
p1 = Vector2(jx, y2)
p2 = Vector2(x1, y2)
segments.append([p1, p2])
elif corner_style == 'beveled':
p1 = Vector2(jx, y2 - r2)
p2 = Vector2(x1, y2)
segments.append([p1, p2])
elif corner_style == 'rounded':
arc = arcpath(jx - dx2, y2 - r2 * 2, r2 * 2, r2 * 2,
a1 + da, da)
segments = segments + arc
p1 = Vector2(x1, y2)
p2 = Vector2(x2, y2)
segments.append([p1, p2])
else:
_ctx.line(x1, y2, x2, y2)
# Draw main branch path
_ctx.nofill()
path = createpath(_ctx, segments, close=False)
_ctx.drawpath(path)
# Draw junction point
style = E('junctionStyle')
if style == 'none':
return
r = E('junctionRadius')
r2 = r / 2.
_ctx.fill(E('junctionFillColor'))
_ctx.stroke(E('junctionStrokeColor'))
_ctx.strokewidth(E('junctionStrokeWidth'))
if style == 'square':
_ctx.rect(jx - r2, jy - r2, r, r)
elif style == 'disc':
_ctx.oval(jx - r2, jy - r2, r, r)
elif style == 'diamond':
_ctx.beginpath(jx, jy - r2)
_ctx.lineto(jx + r2, jy)
_ctx.lineto(jx, jy + r2)
_ctx.lineto(jx - r2, jy)
_ctx.lineto(jx, jy - r2)
_ctx.endpath()
# Draw junction sign
sign = E('junctionSign')
if sign == 'none':
return
_ctx.stroke(E('junctionSignColor'))
d = E('junctionSignSize') / 2.
_ctx.strokewidth(E('junctionSignStrokeWidth'))
if sign in ('minus', 'plus'):
_ctx.line(jx - d, jy, jx + d, jy)
if sign == 'plus':
_ctx.line(jx, jy - d, jx, jy + d)
_conndrawer_map = {
'curve': CurveConnectionDrawer,
'junction': JunctionConnectionDrawer
}
def conndrawer_by_name(name):
if name in _conndrawer_map:
return _conndrawer_map[name]
else:
raise ValueError, 'Unrecognized connection drawer name: %s' % name
```
#### File: twyg/twyg/css3colors.py
```python
import re, colorsys
# SVG 1.0 color keyword names
# ---------------------------
# Adapted from http://en.wikipedia.org/wiki/Web_colors#X11_color_names
# See also http://www.w3.org/TR/SVG/types.html#ColorKeywords
colornames = {
# Pink colors
'pink': (255, 192, 203),
'lightpink': (255, 182, 193),
'hotpink': (255, 105, 180),
'deeppink': (255, 20, 147),
'palevioletred': (219, 112, 147),
'mediumvioletred': (199, 21, 133),
# Red colors
'lightsalmon': (255, 160, 122),
'salmon': (250, 128, 114),
'darksalmon': (233, 150, 122),
'lightcoral': (240, 128, 128),
'indianred': (205, 92, 92),
'crimson': (220, 20, 60),
'firebrick': (178, 34, 34),
'darkred': (139, 0, 0),
'red': (255, 0, 0),
# Orange colors
'orangered': (255, 69, 0),
'tomato': (255, 99, 71),
'coral': (255, 127, 80),
'darkorange': (255, 140, 0),
'orange': (255, 165, 0),
'gold': (255, 215, 0),
# Yellow colors
'yellow': (255, 255, 0),
'lightyellow': (255, 255, 224),
'lemonchiffon': (255, 250, 205),
'lightgoldenrodyellow': (250, 250, 210),
'papayawhip': (255, 239, 213),
'moccasin': (255, 228, 181),
'peachpuff': (255, 218, 185),
'palegoldenrod': (238, 232, 170),
'khaki': (240, 230, 140),
'darkkhaki': (189, 183, 107),
# Brown colors
'cornsilk': (255, 248, 220),
'blanchedalmond': (255, 235, 205),
'bisque': (255, 228, 196),
'navajowhite': (255, 222, 173),
'wheat': (245, 222, 179),
'burlywood': (222, 184, 135),
'tan': (210, 180, 140),
'rosybrown': (188, 143, 143),
'sandybrown': (244, 164, 96),
'goldenrod': (218, 165, 32),
'darkgoldenrod': (184, 134, 11),
'peru': (205, 133, 63),
'chocolate': (210, 105, 30),
'saddlebrown': (139, 69, 19),
'sienna': (160, 82, 45),
'brown': (165, 42, 42),
'maroon': (128, 0, 0),
# Green colors
'darkolivegreen': ( 85, 107, 47),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'yellowgreen': (154, 205, 50),
'limegreen': ( 50, 205, 50),
'lime': ( 0, 255, 0),
'lawngreen': (124, 252, 0),
'chartreuse': (127, 255, 0),
'greenyellow': (173, 255, 47),
'springgreen': ( 0, 255, 127),
'mediumspringgreen': ( 0, 250, 154),
'lightgreen': (144, 238, 144),
'palegreen': (152, 251, 152),
'darkseagreen': (143, 188, 143),
'mediumseagreen': ( 60, 179, 113),
'seagreen': ( 46, 139, 87),
'forestgreen': ( 34, 139, 34),
'green': ( 0, 128, 0),
'darkgreen': ( 0, 100, 0),
# Cyan colors
'mediumaquamarine': (102, 205, 170),
'aqua': ( 0, 255, 255),
'cyan': ( 0, 255, 255),
'lightcyan': (224, 255, 255),
'paleturquoise': (175, 238, 238),
'aquamarine': (127, 255, 212),
'turquoise': ( 64, 224, 208),
'mediumturquoise': ( 72, 209, 204),
'darkturquoise': ( 0, 206, 209),
'lightseagreen': ( 32, 178, 170),
'cadetblue': ( 95, 158, 160),
'darkcyan': ( 0, 139, 139),
'teal': ( 0, 128, 128),
# Blue colors
'lightsteelblue': (176, 196, 222),
'powderblue': (176, 224, 230),
'lightblue': (173, 216, 230),
'skyblue': (135, 206, 235),
'lightskyblue': (135, 206, 250),
'deepskyblue': ( 0, 191, 255),
'dodgerblue': ( 30, 144, 255),
'cornflowerblue': (100, 149, 237),
'steelblue': ( 70, 130, 180),
'royalblue': ( 65, 105, 225),
'blue': ( 0, 0, 255),
'mediumblue': ( 0, 0, 205),
'darkblue': ( 0, 0, 139),
'navy': ( 0, 0, 128),
'midnightblue': ( 25, 25, 112),
# Purple colors
'lavender': (230, 230, 250),
'thistle': (216, 191, 216),
'plum': (221, 160, 221),
'violet': (238, 130, 238),
'orchid': (218, 112, 214),
'fuchsia': (255, 0, 255),
'magenta': (255, 0, 255),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'blueviolet': (138, 43, 226),
'darkviolet': (148, 0, 211),
'darkorchid': (153, 50, 204),
'darkmagenta': (139, 0, 139),
'purple': (128, 0, 128),
'indigo': ( 75, 0, 130),
'darkslateblue': ( 72, 61, 139),
'slateblue': (106, 90, 205),
'mediumslateblue': (123, 104, 238),
# White/Gray/Black colors
'white': (255, 255, 255),
'snow': (255, 250, 250),
'honeydew': (240, 255, 240),
'mintcream': (245, 255, 250),
'azure': (240, 255, 255),
'aliceblue': (240, 248, 255),
'ghostwhite': (248, 248, 255),
'whitesmoke': (245, 245, 245),
'seashell': (255, 245, 238),
'beige': (245, 245, 220),
'oldlace': (253, 245, 230),
'floralwhite': (255, 250, 240),
'ivory': (255, 255, 240),
'antiquewhite': (250, 235, 215),
'linen': (250, 240, 230),
'lavenderblush': (255, 240, 245),
'mistyrose': (255, 228, 225),
'gainsboro': (220, 220, 220),
'lightgray': (211, 211, 211),
'silver': (192, 192, 192),
'darkgray': (169, 169, 169),
'gray': (128, 128, 128),
'dimgray': (105, 105, 105),
'lightslategray': (119, 136, 153),
'slategray': (112, 128, 144),
'darkslategray': ( 47, 79, 79),
'black': ( 0, 0, 0)
}
# Precompile regular expressions for rgb(a) & hsl(a) format matching
i = '\s*([-+]?\d+)\s*' # int
p = '\s*([-+]?\d+)\%\s*' # percent
f = '\s*([-+]?\d*\.?\d+)\s*' # float
_re_rgb = re.compile('rgb\(%s,%s,%s\)' % (i, i, i))
_re_rgb_p = re.compile('rgb\(%s,%s,%s\)' % (p, p, p))
_re_rgba = re.compile('rgba\(%s,%s,%s,%s\)' % (i, i, i, f))
_re_rgba_p = re.compile('rgba\(%s,%s,%s,%s\)' % (p, p, p, f))
_re_hsl = re.compile('hsl\(%s,%s,%s\)' % (i, p, p))
_re_hsla = re.compile('hsla\(%s,%s,%s,%s\)' % (i, p, p, f))
del i, p, f
def _parse_hex(col):
if len(col) == 0:
raise ValueError
if col[0] == '#':
col = col[1:]
if len(col) == 3:
r = int(col[0], 16) / 15.
g = int(col[1], 16) / 15.
b = int(col[2], 16) / 15.
return r, g, b
elif len(col) == 6:
r = int(col[0:2], 16) / 255.
g = int(col[2:4], 16) / 255.
b = int(col[4:6], 16) / 255.
return r, g, b
else:
raise ValueError
def _conv_rgb(c):
return min(max(0, float(c)), 255) / 255.
def _conv_percent(p):
return min(max(0, float(p)), 100) / 100.
def _conv_alpha(a):
return min(max(0, float(a)), 1)
def _conv_hue(h):
return float(h) / 360
def color_to_rgba(col):
# Convert to string to handle hex colors consisting of decimal
# digits only correctly
col = str(col).strip()
a = 1.0
if col in colornames:
r, g, b = colornames[col]
return r / 255., g / 255., b / 255., a
try:
r, g, b = _parse_hex(col)
return r, g, b, a
except ValueError:
pass
# rgb(r, g, b)
m = _re_rgb.match(col)
if m:
r, g, b = m.groups()
r = _conv_rgb(r)
g = _conv_rgb(g)
b = _conv_rgb(b)
return r, g, b, a
# rgb(r%, g%, b%)
m = _re_rgb_p.match(col)
if m:
r, g, b = m.groups()
r = _conv_percent(r)
g = _conv_percent(g)
b = _conv_percent(b)
return r, g, b, a
# rgba(r, g, b, a)
m = _re_rgba.match(col)
if m:
r, g, b, a = m.groups()
r = _conv_rgb(r)
g = _conv_rgb(g)
b = _conv_rgb(b)
a = _conv_alpha(a)
return r, g, b, a
# rgba(r%, g%, b%, a)
m = _re_rgba_p.match(col)
if m:
r, g, b, a = m.groups()
r = _conv_percent(r)
g = _conv_percent(g)
b = _conv_percent(b)
a = _conv_alpha(a)
return r, g, b, a
# hsl(h, s, l)
m = _re_hsl.match(col)
if m:
h, s, l = m.groups()
h = _conv_hue(h)
s = _conv_percent(s)
l = _conv_percent(l)
r, g, b = colorsys.hls_to_rgb(h, l, s)
return r, g, b, a
# hsla(h, s, l, a)
m = _re_hsla.match(col)
if m:
h, s, l, a = m.groups()
h = _conv_hue(h)
s = _conv_percent(s)
l = _conv_percent(l)
a = _conv_alpha(a)
r, g, b = colorsys.hls_to_rgb(h, l, s)
return r, g, b, a
raise ValueError, ('Invalid color: %s' % col)
def rgba_to_color(r, g, b, a, format='rgba'):
r = min(max(r, 0), 1)
g = min(max(g, 0), 1)
b = min(max(b, 0), 1)
a = min(max(a, 0), 1)
if format == 'hex':
return '#%02x%02x%02x' % (r * 255 + .5, g * 255 + .5, b * 255 + .5)
if format == 'rgb':
return 'rgb(%.0f, %.0f, %.0f)' % (r * 255, g * 255, b * 255)
if format == 'rgba':
return 'rgba(%.0f, %.0f, %.0f, %.3f)' % (r * 255, g * 255, b * 255, a)
if format == 'rgb_p':
return 'rgb(%.0f%%, %.0f%%, %.0f%%)' % (r * 100, g * 100, b * 100)
if format == 'rgba_p':
return ('rgba(%.0f%%, %.0f%%, %.0f%%, %.3f)'
% (r * 100, g * 100, b * 100, a))
if format == 'hsl':
h, l, s = colorsys.rgb_to_hls(r, g, b)
return 'hsl(%.0f, %.0f%%, %.0f%%)' % (h * 360, s * 100, l * 100)
if format == 'hsla':
h, l, s = colorsys.rgb_to_hls(r, g, b)
return ('hsla(%.0f, %.0f%%, %.0f%%, %.3f)'
% (h * 360, s * 100, l * 100, a))
raise ValueError, 'Invalid color format: %s' % format
```
#### File: twyg/tests/geom_test.py
```python
import math, os, sys, unittest
sys.path.append(os.path.join('..'))
from twyg.geom import Vector2
deg = math.degrees
rad = math.radians
class TestEvalExpr(unittest.TestCase):
def assert_equals(self, a, b):
self.assertTrue(abs(a - b) < 1e-12)
def test_constructor_cartesian1(self):
v = Vector2(3, -4)
self.assert_equals(5, v.m)
self.assert_equals(53.13010235415598, deg(v.a))
def test_constructor_cartesian2(self):
v = Vector2(4, -4)
self.assert_equals(5.6568542494923806, v.m)
self.assert_equals(45.0, deg(v.a))
def test_normalize(self):
v = Vector2(4, -4)
self.assert_equals(5.65685424949238, v.m)
self.assert_equals(45.0, deg(v.a))
v.normalize()
self.assert_equals(1.0, v.m)
self.assert_equals(45.0, deg(v.a))
def test_rotate_positive(self):
v = Vector2(4, -4)
v.rotate(rad(-15))
self.assert_equals(30.0, deg(v.a))
def test_rotate_negative(self):
v = Vector2(4, -4)
v.rotate(rad(30))
self.assert_equals(75.0, deg(v.a))
def test_constructor_polar(self):
v = Vector2(angle=rad(30), m=1)
self.assert_equals(30.0, deg(v.a))
self.assert_equals(1.0, v.m)
self.assert_equals(0.86602540378443, v.x)
self.assert_equals(-0.5, v.y)
def test_constructor_copy(self):
v1 = Vector2(angle=rad(30), m=1)
v2 = Vector2(v1)
self.assert_equals(v2.x, v1.x)
self.assert_equals(v2.y, v1.y)
self.assert_equals(v2.m, v1.m)
self.assert_equals(v2.a, v1.a)
def test_scalar_multiply_right(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = v * 2
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_multiply_left(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = 2 * v
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_multiply_and_assign(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v *= 2
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_divide_and_assign(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v /= 2
self.assert_equals(a, v.a)
self.assert_equals(m / 2, v.m)
def test_scalar_divide_right(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = v / 2
self.assert_equals(a, v.a)
self.assert_equals(m / 2, v.m)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnnv1/accelerate",
"score": 2
} |
#### File: src/accelerate/kwargs_handlers.py
```python
import copy
from dataclasses import dataclass
from datetime import timedelta
from typing import Optional
class KwargsHandler:
"""
Internal mixin that implements a `to_kwargs()` method for a dataclass.
"""
def to_dict(self):
return copy.deepcopy(self.__dict__)
def to_kwargs(self):
"""
Returns a dictionary containing the attributes with values different from the default of this class.
"""
default_dict = self.__class__().to_dict()
this_dict = self.to_dict()
return {k: v for k, v in this_dict.items() if default_dict[k] != v}
@dataclass
class DistributedDataParallelKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize how your model is wrapped in a
`torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
[wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
information on each argument.
<Tip warning={true}>
`gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
</Tip>"""
dim: int = 0
broadcast_buffers: bool = True
bucket_cap_mb: int = 25
find_unused_parameters: bool = False
check_reduction: bool = False
gradient_as_bucket_view: bool = False
@dataclass
class GradScalerKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
`torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
[scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
<Tip warning={true}>
`GradScaler` is only available in PyTorch 1.5.0 and later versions.
</Tip>"""
init_scale: float = 65536.0
growth_factor: float = 2.0
backoff_factor: float = 0.5
growth_interval: int = 2000
enabled: bool = True
@dataclass
class InitProcessGroupKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer
to the documentation of this
[method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
information on each argument.
"""
init_method: Optional[str] = None
timeout: timedelta = timedelta(seconds=1800)
```
#### File: src/accelerate/optimizer.py
```python
import inspect
import torch
from packaging import version
from .state import AcceleratorState, DistributedType, is_tpu_available
from .utils import honor_type
if is_tpu_available():
import torch_xla.core.xla_model as xm
def move_to_device(state, device):
if isinstance(state, (list, tuple)):
return honor_type(state, (move_to_device(t, device) for t in state))
elif isinstance(state, dict):
return type(state)({k: move_to_device(v, device) for k, v in state.items()})
elif isinstance(state, torch.Tensor):
return state.to(device)
return state
class AcceleratedOptimizer(torch.optim.Optimizer):
"""
Internal wrapper around a torch optimizer.
Args:
optimizer (`torch.optim.optimizer.Optimizer`):
The optimizer to wrap.
device_placement (`bool`, *optional*, defaults to `True`):
Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
`optimizer` on the right device.
scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
The scaler to use in the step function if training with mixed precision.
"""
def __init__(self, optimizer, device_placement=True, scaler=None):
self.optimizer = optimizer
self.scaler = scaler
self.accelerator_state = AcceleratorState()
self.device_placement = device_placement
self._is_overflow = False
# Handle device placement
if device_placement:
state_dict = self.optimizer.state_dict()
if self.accelerator_state.distributed_type == DistributedType.TPU:
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
else:
state_dict = move_to_device(state_dict, self.accelerator_state.device)
self.optimizer.load_state_dict(state_dict)
@property
def state(self):
return self.optimizer.state
@state.setter
def state(self, state):
self.optimizer.state = state
@property
def param_groups(self):
return self.optimizer.param_groups
@param_groups.setter
def param_groups(self, param_groups):
self.optimizer.param_groups = param_groups
@property
def defaults(self):
return self.optimizer.defaults
@defaults.setter
def defaults(self, defaults):
self.optimizer.defaults = defaults
def add_param_group(self, param_group):
self.optimizer.add_param_group(param_group)
def load_state_dict(self, state_dict):
if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
def zero_grad(self, set_to_none=None):
if version.parse(torch.__version__) < version.parse("1.7.0"):
if set_to_none is not None:
raise ValueError(
"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for "
f"earlier versions (found version {torch.__version__})."
)
self.optimizer.zero_grad()
else:
accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
if accept_arg:
if set_to_none is None:
set_to_none = False
self.optimizer.zero_grad(set_to_none=set_to_none)
else:
if set_to_none is not None:
raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
self.optimizer.zero_grad()
def step(self, closure=None):
if self.accelerator_state.distributed_type == DistributedType.TPU:
optimizer_args = {"closure": closure} if closure is not None else {}
xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
elif self.scaler is not None:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer, closure)
self.scaler.update()
scale_after = self.scaler.get_scale()
# If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.
self._is_overflow = scale_after < scale_before
else:
self.optimizer.step(closure)
def _switch_parameters(self, parameters_map):
for param_group in self.optimizer.param_groups:
param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
@property
def is_overflow(self):
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
return self._is_overflow
``` |
{
"source": "johnnv1/CCAgT_dataset_utils",
"score": 3
} |
#### File: CCAgT_utils/converters/LabelBox.py
```python
from __future__ import annotations
from typing import Any
import numpy as np
import pandas as pd
from shapely.geometry import Point
from shapely.geometry import Polygon
from CCAgT_utils.converters.CCAgT import CCAgT
from CCAgT_utils.utils import basename
# Just remove the class, everything can be just functions
class LabelBox():
def __init__(
self,
raw_labelbox: list[dict[str, Any]],
categories_map: list[dict[str, Any]] | None = None,
) -> None:
if not isinstance(raw_labelbox, list):
raise ValueError('Expected a list of dictionary that represents raw labelbox data!')
expected_data = set({'ID', 'External ID', 'Skipped', 'Reviews', 'Label'})
for it in raw_labelbox:
if not all(i in it for i in expected_data):
if 'Skipped' not in it:
raise KeyError(f'Not found expected values need to have `Skipped` or {expected_data}')
self.raw_labelbox = raw_labelbox[:]
self.categories_map = None
if isinstance(categories_map, list):
self.categories_map = categories_map[:]
@property
def raw_dataframe(self) -> pd.DataFrame:
return pd.DataFrame(self.raw_labelbox)
def __check_or_instance_categories_map(
self,
categories_map: list[dict[str, Any]] | None,
) -> bool:
if categories_map is None:
if self.categories_map is None:
raise ValueError('You need instantiate or pass as parameter the categories_map before!')
elif isinstance(categories_map, list):
if self.categories_map is not None:
print('The categories map will be overwrite!')
self.categories_map = categories_map
if isinstance(self.categories_map, list):
self.schematic_to_id = {x['labelbox_schemaId']: int(x['id']) for x in self.categories_map}
return True
else:
raise ValueError('Some problems occur in the instantiation of the category map!')
def __remove_duplicated_labels(
self,
df: pd.DataFrame,
) -> pd.DataFrame:
duplicated_idx = df['image_name'].duplicated(keep=False)
df_duplicated = df.loc[duplicated_idx, :].copy()
if df_duplicated.empty:
return df
def hasreview(reviews: list[dict[str, Any]]) -> bool:
for x in reviews:
if x['score'] == 1:
return True
else:
return False
# Check the labels that has review
df_duplicated['have_review'] = df_duplicated.apply(lambda row: hasreview(row['Reviews']), axis=1)
# Count the quantity of labels for each row
df_duplicated['len'] = df_duplicated.apply(lambda row: len(row['Label']['objects']), axis=1)
# Sort the DF by the quantity of labels
df_duplicated = df_duplicated.sort_values(['image_name', 'len'], ascending=False)
# Drop the duplicates labels and keep the first label will be that have more labels
df_to_keep = df_duplicated.drop_duplicates(['image_name'], keep='first')
id_to_remove = df_duplicated.loc[~df_duplicated['ID'].isin(df_to_keep['ID'].to_numpy()), 'ID']
# the rows without review
id_to_remove = pd.concat([id_to_remove, df_duplicated.loc[~df_duplicated['have_review'], 'ID']])
df_without_duplicated = df[~df['ID'].isin(id_to_remove)].copy()
return df_without_duplicated
def __explode_objects(
self,
df: pd.DataFrame,
) -> pd.DataFrame:
df['objects'] = df.apply(lambda row: row['Label']['objects'], axis=1)
df = df.explode('objects')
df = df.reset_index()
df = df.drop(['index', 'Label'], axis=1)
return df
@staticmethod
def labelbox_to_shapely(object: dict[str, Any]) -> Polygon | Point | np.nan:
keys = object.keys()
if 'polygon' in keys:
polygon = object['polygon']
geometry = Polygon(np.array([(p['x'], p['y']) for p in polygon]))
elif 'point' in keys:
point = object['point']
geometry = Point(np.array([point['x'], point['y']]))
else:
geometry = np.NaN
return geometry
def __transform_geometry(
self,
df: pd.DataFrame,
) -> pd.DataFrame:
df['geometry'] = df['objects'].apply(lambda obj: self.labelbox_to_shapely(obj))
df_out = df.dropna(axis=0, subset=['geometry'])
if df.shape != df_out.shape:
print(f'Some NaN geometries have been deleted! Original shape = {df.shape} | out shape = {df_out.shape}')
if df_out.empty:
raise ValueError('Data without valid geometries! After transform the geometries the dataframe stay empty.')
return df_out
def __prepare_data(
self,
df: pd.DataFrame,
) -> pd.DataFrame:
# Drop ignored images at labelling process
df = df.drop(df[df['Skipped']].index)
# Drop irrelevant columns
df = df.drop(
[
'DataRow ID', 'Labeled Data', 'Created By', 'Project Name', 'Dataset Name', 'Created At', 'Updated At',
'Seconds to Label', 'Agreement', 'Benchmark Agreement', 'Benchmark ID', 'View Label',
'Has Open Issues', 'Skipped',
], axis=1, errors='ignore',
)
# Get image names
df['image_name'] = df.apply(lambda row: basename(row['External ID']), axis=1)
df = df.drop(['External ID'], axis=1)
# Remove duplicated labels
df = self.__remove_duplicated_labels(df)
# Explode annotations to each row
df = self.__explode_objects(df)
# Transform labelbox annotation to a geometry
df = self.__transform_geometry(df)
# Map category IDs
df['category_id'] = df.apply(lambda row: self.schematic_to_id[row['objects']['schemaId']], axis=1)
df = df.drop(['ID', 'objects', 'Reviews'], axis=1)
return df
def to_CCAgT(
self,
categories_map: list[dict[str, Any]] | None = None,
) -> CCAgT:
self.__check_or_instance_categories_map(categories_map)
self.df = self.__prepare_data(self.raw_dataframe)
CCAgT_anns = CCAgT(self.df)
return CCAgT_anns
```
#### File: CCAgT_dataset_utils/CCAgT_utils/describe.py
```python
from __future__ import annotations
import multiprocessing
import os
from dataclasses import asdict
from dataclasses import dataclass
from typing import Any
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from PIL import Image
from CCAgT_utils.categories import Categories
from CCAgT_utils.categories import CategoriesInfos
from CCAgT_utils.constants import STRUCTURE
from CCAgT_utils.converters.CCAgT import CCAgT
from CCAgT_utils.converters.CCAgT import read_parquet
from CCAgT_utils.utils import find_files
from CCAgT_utils.utils import get_traceback
R = Union[float, Tuple[float, float, float]]
@dataclass
class Statistics:
mean: R = 0.
std: R = 0.
max: R = 0.
min: R = 0.
count: int = 0
def join_stats(self, results: Statistics) -> None:
if self.count == 0:
self.mean = results.mean
self.std = results.std
self.max = results.max
self.min = results.min
else:
self.mean = np.mean([self.mean, results.mean], axis=0)
self.std = np.mean([self.std, results.std], axis=0)
self.max = np.max([self.max, results.max], axis=0)
self.min = np.min([self.min, results.min], axis=0)
self.count += results.count
def to_dict(self) -> dict[str, R | int]:
return asdict(self)
def __str__(self) -> str:
_mean = f'Mean: {self.mean:.2f}'
_std = f'std: {self.std:.2f}'
_max = f'Max: {self.max:.2f}'
_min = f'Min: {self.min:.2f}'
_count = f'Quantity: {self.count}'
return f'{_count} | {_mean} | {_std} | {_max} | {_min}'
def from_list(itens: list[int | float]) -> Statistics:
_mean = np.mean(itens)
_std = np.std(itens)
_max = np.max(itens)
_min = np.min(itens)
return Statistics(_mean, _std, _max, _min, count=len(itens))
def from_array(array: np.ndarray) -> Statistics:
axis = (0, 1)
_mean = np.mean(array, axis=axis)
_std = np.std(array, axis=axis)
_max = np.max(array, axis=axis)
_min = np.min(array, axis=axis)
return Statistics(_mean, _std, _max, _min, count=1)
@get_traceback
def single_core_from_image_files(filenames: list[str]) -> Statistics:
if len(filenames) == 0:
raise ValueError('It was expected a list of filenames with at least one value.')
out_stats = Statistics()
for filename in filenames:
out_stats.join_stats(
from_array(
np.asarray(
Image.open(filename),
),
),
)
return out_stats
def from_image_files(
images_dir: str,
extensions: str | tuple[str, ...] = '.jpg',
selection: set[str] = set(),
) -> Statistics:
"""From a directory path with images, will generate the stats of all
images. The statistics generated are: mean, std, max, and min.
Parameters
----------
images_dir : str
Path for the directories that contains the images of interest.
extensions : str | tuple[str, ...], optional
The extensions of the images files, by default '.jpg'
selection : set[str], optional
The images basenames (with extension) of selected to compute
the statistics, by default set([]) (all images will be used)
Returns
-------
dict[str, float | tuple[float, ...]]
Will a dict where the key is the name of the statistics and the
value is the computed statistic.
"""
all_images = find_files(images_dir, extensions, True, selection)
all_filenames = list(all_images.values())
cpu_num = multiprocessing.cpu_count()
workers = multiprocessing.Pool(processes=cpu_num)
filenames_splitted = np.array_split(all_filenames, cpu_num)
print(
f'Start compute Statistics for {len(all_filenames)} ({extensions}) files using {cpu_num} cores with '
f'{len(filenames_splitted[0])} files per core...',
)
processes = []
for filenames in filenames_splitted:
if len(filenames) == 0:
continue # pragma: no cover
p = workers.apply_async(single_core_from_image_files, (filenames.tolist(),))
processes.append(p)
out_stats = Statistics()
for p in processes:
out_stats.join_stats(p.get())
print(f'Successfully computed the statstics of {out_stats.count} files with {len(processes)} processes!')
return out_stats
def annotations_per_image(
ccagt: CCAgT,
categories_infos: CategoriesInfos,
) -> pd.DataFrame:
df = ccagt.df
df_describe_images = df.groupby(['image_id', 'category_id']).size().reset_index().rename(columns={0: 'count'})
df_describe_images = df_describe_images.pivot(columns=['category_id'], index='image_id')
df_describe_images = df_describe_images.rename({c.id: c.name.upper() for c in categories_infos}, axis=1)
df_describe_images['qtd_annotations'] = df_describe_images.sum(axis=1)
df_describe_images = df_describe_images.fillna(0)
df_describe_images['NORs'] = df_describe_images[
'count',
Categories.CLUSTER.name,
] + df_describe_images[
'count',
Categories.SATELLITE.name,
]
return df_describe_images
def ccagt_annotations(
ccagt: CCAgT,
categories_infos: CategoriesInfos,
) -> dict[str, Any]:
df = ccagt.df
ann_count = {cat.name: df.loc[df['category_id'] == cat.id, 'area'].shape[0] for cat in categories_infos}
qtd_ann = df.shape[0]
ann_dist = {cat_name: qtd_cat / qtd_ann for cat_name, qtd_cat in ann_count.items()}
area_stats = {
cat.name: from_list(df.loc[df['category_id'] == cat.id, 'area'].tolist())
for cat in categories_infos if ann_count[cat.name] > 0
}
qtd_images = df['image_id'].nunique()
qtd_slides = df['slide_id'].nunique()
return {
'qtd_images': qtd_images,
'qtd_slide': qtd_slides,
'qtd_annotations': qtd_ann,
'qtd_annotations_categorical': ann_count,
'dist_annotations': ann_dist,
'area_stats': area_stats,
}
def tvt_annotations_as_df(
train: dict[str, Any],
valid: dict[str, Any],
test: dict[str, Any],
) -> tuple[pd.DataFrame, ...]:
out = {}
out['train'] = train
out['validation'] = valid
out['test'] = test
folds = out.keys()
df_qtd = pd.DataFrame({
'fold': folds,
'images': [out[f]['qtd_images'] for f in folds],
'slides': [out[f]['qtd_slide'] for f in folds],
'annotations': [out[f]['qtd_annotations'] for f in folds],
})
df_qtd_categorical = pd.DataFrame([
{
'fold': f,
**{
k: v for k, v in out[f]['qtd_annotations_categorical'].items()
if k != Categories.BACKGROUND.name
},
}
for f in folds
])
df_qtd = pd.merge(df_qtd, df_qtd_categorical, on='fold')
df_qtd.loc['total'] = df_qtd.sum()
df_qtd.loc[df_qtd.index == 'total', 'fold'] = 'total'
total_images = df_qtd.loc[df_qtd['fold'] == 'total', 'images'].tolist()[0]
total_ann = df_qtd.loc[df_qtd['fold'] == 'total', 'annotations'].tolist()[0]
df_dist = pd.DataFrame({
'fold': folds,
'% images': [out[f]['qtd_images'] / total_images for f in folds],
'% annotations': [out[f]['qtd_annotations'] / total_ann for f in folds],
})
df_dist_categorical = pd.DataFrame([
{
'fold': f,
**{
f'% {k}': v / out[f]['qtd_annotations']
for k, v in out[f]['qtd_annotations_categorical'].items()
if k != Categories.BACKGROUND.name
},
}
for f in folds
])
df_dist = pd.merge(df_dist, df_dist_categorical, on='fold')
df_area = pd.DataFrame()
for f in folds:
_df = pd.DataFrame([{'category': k, **v.to_dict()} for k, v in out[f]['area_stats'].items()])
_df = _df.set_index('category').transpose()
_df['fold'] = f
df_area = pd.concat([df_area, _df])
return df_qtd, df_dist, df_area
def dataset(
ccagt_path: str,
categories_infos: CategoriesInfos,
dataset_dir: str,
extensions: tuple[str, ...] = ('.jpg', '.png'),
) -> None:
ccagt = read_parquet(ccagt_path)
name = os.path.basename(os.path.normpath(dataset_dir))
images_dir = os.path.join(dataset_dir, STRUCTURE['i'])
masks_dir = os.path.join(dataset_dir, STRUCTURE['m'])
print(f'Dataset name: `{name}` | Location: `{dataset_dir}`')
print(f'From the annotations file ({ccagt_path}) -')
if ccagt.df.shape[0] == 0:
print('Do not have any annotation!')
else:
desc = ccagt_annotations(ccagt, categories_infos)
print(f'Quantity of images: {desc["qtd_images"]}')
print(f'Quantity of slides: {desc["qtd_slide"]}')
print(f'Quantity of annotations: {desc["qtd_annotations"]}')
for cat_name, qtd in desc['qtd_annotations_categorical'].items():
dist = desc['dist_annotations'][cat_name]
print(f' > Quantity of annotations for {cat_name}: {qtd} - {dist*100:.2f}%')
print('Statistics of the area of each category...')
for cat_name, area_stats in desc['area_stats'].items():
print(f' > Statistics of area for {cat_name}: {area_stats}')
images_quantity = len(find_files(images_dir, extensions, True))
masks_quantity = len(find_files(masks_dir, extensions, True))
print('On disk data -')
print(f'Total of images: {images_quantity} - at `{images_dir}`')
print(f'Total of masks: {masks_quantity} - at `{masks_dir}`')
def categorical_mask(mask: np.ndarray) -> dict[int, int]:
unique, counts = np.unique(mask, return_counts=True)
return dict(zip(unique, counts))
@get_traceback
def single_core_from_mask_files(
filenames: list[str],
) -> dict[int, int]:
if len(filenames) == 0:
raise ValueError('It was expected a list of filenames with at least one value.')
out = {cat.value: 0 for cat in Categories}
for filename in filenames:
counts = categorical_mask(
np.asarray(
Image.open(filename).convert('L'),
),
)
out = {k: v + counts[k] if k in counts else v for k, v in out.items()}
return out
def from_mask_files(
masks_dir: str,
extensions: str | tuple[str, ...] = '.png',
selection: set[str] = set(),
) -> dict[str, int]:
all_masks = find_files(masks_dir, extensions, True, selection)
all_filenames = list(all_masks.values())
cpu_num = multiprocessing.cpu_count()
workers = multiprocessing.Pool(processes=cpu_num)
filenames_splitted = np.array_split(all_filenames, cpu_num)
print(
f'Start count pixels quantity for {len(all_filenames)} ({extensions}) files using {cpu_num} cores with '
f'{len(filenames_splitted[0])} files per core...',
)
processes = []
for filenames in filenames_splitted:
if len(filenames) == 0:
continue # pragma: no cover
p = workers.apply_async(single_core_from_mask_files, (filenames.tolist(),))
processes.append(p)
out = {cat.value: 0 for cat in Categories}
for p in processes:
counts = p.get()
out = {k: v + counts[k] if k in counts else v for k, v in out.items()}
n_files = len(all_masks)
print(f'Successfully computed pixels quantity of each category from {n_files} files with {len(processes)} processes!')
out_by_names = {str(Categories(k).name): int(v) for k, v in out.items()}
return out_by_names
```
#### File: CCAgT_dataset_utils/CCAgT_utils/main.py
```python
from __future__ import annotations
import argparse
import ast
import os
import shutil
import sys
from typing import Sequence
from CCAgT_utils import describe
from CCAgT_utils import slice
from CCAgT_utils.categories import CategoriesInfos
from CCAgT_utils.categories import read_json
from CCAgT_utils.constants import STRUCTURE
from CCAgT_utils.constants import VERSION
from CCAgT_utils.converters.CCAgT import read_parquet
from CCAgT_utils.converters.utils import ccagt_generate_masks
from CCAgT_utils.prepare import ccagt_dataset
from CCAgT_utils.prepare import extract_image_and_annotations_by_category
from CCAgT_utils.utils import basename
from CCAgT_utils.utils import find_files
def _add_create_subdataset_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--name',
metavar='SUBDATASET_NAME',
required=True,
help='The name of the subdataset that will be generated',
)
parser.add_argument(
'--original',
metavar='PATH_FOR_ORIGINAL_DATASET',
required=True,
help=(
'Path for the original dataset. It is expected that this directory has the subdirectories '
'`images/` and `masks/`, and `CCAgT.parquet.gzip`'
),
)
parser.add_argument(
'--output',
metavar='PATH_TO_WRITE_THE_SUBDATASETS',
required=True,
help=(
'Path to write the new subdataset. It will create a directory with this subdataset name, and '
'subdirectories `images/` and `masks/`'
),
)
group_edit_image = parser.add_argument_group(
'Define the type of images wanted at the end. If none of these be chosen,'
' will just copy the original data!',
)
group_ex = group_edit_image.add_mutually_exclusive_group(required=False)
group_ex.add_argument(
'--slice-images',
nargs=2,
metavar='HORIZONTA_VERTICAL',
type=int,
help=(
'Define that wants slice the images into smaller parts. Needs to pass the amount of slice '
'desired for the horizontal and vertical split of the images.'
),
)
group_ex.add_argument(
'--extract',
nargs='*',
type=int,
metavar='CATEGORY_ID',
help=(
'Define that wants extract based on one category. Will generate one image for each '
'instance of the desired category.'
),
)
group_extract = parser.add_argument_group('Arguments to use with `--extract` option')
group_extract.add_argument(
'--paddings',
default=0,
help=(
'In percent (float values) or pixels (integer values) select, the size of paddings to '
'apply. Just works with --extract'
),
)
group_clean = parser.add_argument_group('To clear the images and/or annotations for the subdataset')
group_ex_clean = group_clean.add_mutually_exclusive_group(required=False)
group_ex_clean.add_argument(
'--remove-images-without',
nargs='*',
type=int,
metavar='CATEGORY_ID',
help=(
'Define that you wants remove of this subdataset the images that does not have the '
'categories passed as argument.'
),
)
group_ex_clean.add_argument(
'--remove-annotations-different',
nargs='*',
type=int,
metavar='CATEGORY_ID',
help=(
'Define that you wants remove of this subdataset the annotations that have different '
'categories then the passed as argument.'
),
)
check_group = parser.add_argument_group('Process of checking the annotation and images.')
check_group.add_argument(
'--check-if-all-have-at-least-one-of',
nargs='*',
metavar='CATEGORY_ID',
type=int,
help=(
'Define that you wants check if all images have at least one of the categories passed as '
'argument. Will print a information about.'
),
)
check_group.add_argument(
'--no-check-images',
action='store_false',
help=(
'Will not check if all images at the new dataset have at least one annotation. By default '
'the process will check if all images have at least one annotation.'
),
)
check_group.add_argument(
'--delete',
action='store_true',
help='Will delete images without annotation, or without the chosen categories',
)
group_kwargs = parser.add_argument_group('Extra arguments if desired')
group_kwargs.add_argument(
'--generate-masks',
action='store_true',
help='To generate the masks for semantic segmentation based on the new annotations',
)
group_kwargs.add_argument(
'--labels',
help=(
'Path for the CCAgT file with the labels. By default will look at '
'`PATH_FOR_ORIGINAL_DATASET/CCAgT.parquet.gzip`'
),
)
group_kwargs.add_argument(
'--aux-file',
help='Path for the CCAgT auxiliary file, by default will use default Categories Infomation!',
)
group_kwargs.add_argument(
'--extensions',
nargs='*',
default=('.jpg', '.png'),
help='The extensions to look for when search the images and masks.',
)
group_kwargs.add_argument(
'--overwrite',
action='store_true',
help='With this option if already exist a dataset if this name will overwrite this!',
)
def create_subdataset(
*,
name: str,
original_dir: str,
output_base: str,
slice_images: tuple[int, ...] | None,
extract: set[int] | None,
categories_to_keep: tuple[int, set[int]] | None,
categories_to_check: set[int] | None,
delete: bool,
generate_masks: bool,
CCAgT_path: str | None,
paddings: str,
check_if_images_have_annotations: bool,
extensions: tuple[str, ...],
aux_file_path: str | None,
overwrite: bool,
) -> int:
output_dir = os.path.join(output_base, name)
output_images_dir = os.path.join(output_dir, STRUCTURE['i'])
output_masks_dir = os.path.join(output_dir, STRUCTURE['m'])
output_annotations_path = os.path.join(output_dir, STRUCTURE['l'])
if overwrite and os.path.isdir(output_dir):
print(f'Deleting all outdated data from `{output_dir}`...')
shutil.rmtree(output_dir)
elif (os.path.isdir(output_images_dir) or os.path.isdir(output_masks_dir)):
print(f'Already exist a dataset with name={name} at {output_base}!', file=sys.stderr)
return 1
if CCAgT_path is None:
CCAgT_path = os.path.join(original_dir, STRUCTURE['l'])
input_images_dir = os.path.join(original_dir, STRUCTURE['i'])
if not os.path.isfile(CCAgT_path):
print(f'Not found the annotations file at `{CCAgT_path}`!', file=sys.stderr)
return 1
if not os.path.isdir(input_images_dir):
print(f'Not found the original data at `{input_images_dir}`!', file=sys.stderr)
return 1
if isinstance(aux_file_path, str):
categories_infos = read_json(aux_file_path)
else:
categories_infos = CategoriesInfos()
print('------------------------')
print(f'Loading the original annotations file from `{CCAgT_path}`...')
ccagt_annotations = read_parquet(CCAgT_path)
if isinstance(categories_to_keep, tuple):
_choice_to_delete, _cats_to_keep = categories_to_keep
if _choice_to_delete == 0:
# --remove-images-without
print(f'Delete images where not have at least one annotation with the categories: {_cats_to_keep}')
_idx_with_categories = ccagt_annotations.df['category_id'].isin(_cats_to_keep)
images_with_categories = set(ccagt_annotations.df.loc[_idx_with_categories, 'image_id'].unique())
ccagt_annotations.df = ccagt_annotations.df[ccagt_annotations.df['image_id'].isin(images_with_categories)]
elif _choice_to_delete == 1:
# --remove-annotations-without
print(f'Delete annotations where the categories is not in: {_cats_to_keep} ')
ccagt_annotations.df = ccagt_annotations.df[ccagt_annotations.df['category_id'].isin(_cats_to_keep)]
else:
print('Unexpected choice for the type of removal proccess.', file=sys.stderr)
return 1
else:
print('No process of remove chosen, just skiped.')
if ccagt_annotations.df.shape[0] == 0:
print('The annotations file has none annotation, just finishing the process!', file=sys.stderr)
return 1
os.makedirs(output_dir, exist_ok=True)
print(f'Saving the annotations to `{output_annotations_path}`...')
ccagt_annotations.to_parquet(output_annotations_path)
print('------------------------')
if isinstance(slice_images, tuple):
# --slice-images
print(f'Generate images and annotations splitting the originals into {slice_images} (horizontal, vertical) parts...')
slice.images_and_annotations(
input_images_dir,
output_annotations_path,
output_dir,
output_annotations_path,
slice_images[0], slice_images[1],
extension=extensions,
look_recursive=True,
)
elif extract is not None:
# --extract
print(f'Create images and annotations for each instance of the categories {extract}')
print(' > If have the categories `Nucleus` or `Overlapped nuclei` will also keep the NORs annotations.')
extract_image_and_annotations_by_category(
input_images_dir,
output_dir,
extract,
output_annotations_path,
ast.literal_eval(paddings),
extensions,
True,
)
else:
# if not choice between --slice-images and --extract, will just copy
# TODO: copy with multiprocess
print('The images and masks of the subdataset will be copied from the original dataset!')
print('Coping images files...')
shutil.copytree(input_images_dir, output_images_dir)
print('------------------------')
print(f'Loading the annotations file from `{output_annotations_path}`...')
ccagt_annotations = read_parquet(output_annotations_path)
print('------------------------')
ccagt_annotations = ccagt_dataset(ccagt_annotations, categories_infos, do_fit_geometries=False)
print('------------------------')
images_without_the_categories: set[str] = set({})
if isinstance(categories_to_check, set):
# --check-if-all-have-at-least-one-of
print(f'Checking all images have at least one of the categories {categories_to_check}...')
images_names = set(ccagt_annotations.df['image_name'].unique())
images_names_filtered = set(
ccagt_annotations.df.loc[
ccagt_annotations.df['category_id'].isin(categories_to_check),
'image_name',
].unique(),
)
images_without_the_categories = images_names.difference(images_names_filtered)
if len(images_without_the_categories) > 0:
print(
(
f'A total of {len(images_without_the_categories)} files there is not at least one of the categories '
f'{categories_to_check}'
),
file=sys.stderr,
)
else:
print(f'Everything is ok, have 0 files that do not have at least one of the categories {categories_to_check}')
images_without_the_annotations: set[str] = set({})
if check_if_images_have_annotations:
# --no-check-images to skip this
print('Checking if have any image without annotation...')
all_images = {basename(k): v for k, v in find_files(output_images_dir, extensions, True).items()}
images_with_annotation = set(ccagt_annotations.df['image_name'].unique())
images_without_the_annotations = set(all_images.keys()).difference(images_with_annotation)
if len(images_without_the_annotations) > 0:
print(
(f'A total of {len(images_without_the_annotations)} files there is not at least one annotation'),
file=sys.stderr,
)
else:
print(f'Everything is ok, have 0 files that do not have any annotation {categories_to_check}')
if delete and (len(images_without_the_annotations) > 0 or len(images_without_the_categories) > 0):
# --delete
if len(images_without_the_categories) > 0:
print(f'Will delete images that do not have at least one of the categories {categories_to_check}')
ccagt_annotations.df = ccagt_annotations.df[
~ccagt_annotations.df['image_name'].isin(images_without_the_categories)
]
if len(images_without_the_annotations) > 0:
print('Will delete images that do not have at least one annotation')
ccagt_annotations.df = ccagt_annotations.df[
~ccagt_annotations.df['image_name'].isin(images_without_the_annotations)
]
basenames_to_delete = set(images_without_the_categories).union(set(images_without_the_annotations))
print(f'Finding the images to delete at `{output_images_dir}`...')
all_images = {basename(k): v for k, v in find_files(output_images_dir, extensions, True).items()}
print(f'Deleting a total of {len(basenames_to_delete)} images...')
for bn in basenames_to_delete:
os.remove(all_images[bn])
print('------------------------')
print(f'Saving the annotations to {output_annotations_path}...')
ccagt_annotations.to_parquet(output_annotations_path)
if generate_masks:
print('------------------------')
print('Generating masks for semantic segmentation...')
ccagt_generate_masks(output_annotations_path, output_masks_dir, True)
print('\n------------------------')
print('Creation of the subdataset finished!')
describe.dataset(output_annotations_path, categories_infos, output_dir, extensions)
return 0
def main(argv: Sequence[str] | None = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='CCAgT_converter')
# https://stackoverflow.com/a/8521644/812183
parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {VERSION}')
subparsers = parser.add_subparsers(dest='command')
create_subdataset_parser = subparsers.add_parser(
'create-subdataset',
help=(
'Based on the original dataset create a personalized version of '
'the dataset with the desired modifications. Examples: slice the '
'images into smaller parts, select just images with some category,'
' images of a specific category.'
),
)
_add_create_subdataset_options(create_subdataset_parser)
help = subparsers.add_parser('help', help='Show help for a specific command.')
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
if len(argv) == 0:
argv = ['help']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
if args.command == 'create-subdataset' and args.name != '':
categories_to_keep = None
if args.remove_images_without is not None:
categories_to_keep = (0, set(args.remove_images_without))
elif args.remove_annotations_different is not None:
categories_to_keep = (1, set(args.remove_annotations_different))
categories_to_check = None
if args.check_if_all_have_at_least_one_of is not None:
categories_to_check = set(args.check_if_all_have_at_least_one_of)
slice_images = None if args.slice_images is None else tuple(args.slice_images)
extract = None if args.extract is None else set(args.extract)
return create_subdataset(
name=str(args.name),
original_dir=str(args.original),
output_base=str(args.output),
slice_images=slice_images,
extract=extract,
categories_to_keep=categories_to_keep,
categories_to_check=categories_to_check,
delete=args.delete,
generate_masks=args.generate_masks,
CCAgT_path=args.labels,
paddings=args.paddings,
check_if_images_have_annotations=args.no_check_images,
extensions=tuple(args.extensions),
aux_file_path=args.aux_file,
overwrite=args.overwrite,
)
return 1
if __name__ == '__main__':
raise SystemExit(main())
```
#### File: CCAgT_utils/types/annotation.py
```python
from __future__ import annotations
import collections
from copy import copy
from dataclasses import dataclass
from shapely.geometry import MultiPolygon
from shapely.geometry import Polygon
from CCAgT_utils.categories import CategoriesInfos
from CCAgT_utils.types.colors import Color
from CCAgT_utils.types.colors import random_color_from_base
@dataclass
class Annotation:
geometry: Polygon | MultiPolygon
category_id: int
iscrowd: int = 0
color: Color = random_color_from_base(Color(0, 0, 0), 255)
@property
def bbox(self) -> BBox:
return bounds_to_BBox(self.geometry.bounds, self.category_id)
@property
def _geo_type(self) -> str:
return self.geometry.geom_type
@property
def coco_bbox(self) -> list[float | int]:
bbox = self.bbox
return [bbox.x_init, bbox.y_init, bbox.width, bbox.height]
def __iter__(self) -> Annotation:
self._idx = 0
if self._geo_type == 'MultiPolygon':
self._geometries = list(self.geometry.geoms)
elif self._geo_type == 'Polygon':
self._geometries = [self.geometry]
else:
raise TypeError(f'Unexpected geometry type (`{self._geo_type}`) - expected `MultiPolygon` or `Polygon`')
return self
def __next__(self) -> Polygon:
if self._idx < len(self._geometries):
out = self._geometries[self._idx]
self._idx += 1
return out
else:
raise StopIteration
def copy(self) -> Annotation:
return copy(self)
@dataclass
class BBox:
x_init: int
y_init: int
width: int
height: int
category_id: int
@property
def x_end(self) -> int:
return self.x_init + self.width
@property
def y_end(self) -> int:
return self.y_init + self.height
@property
def upper_left_point(self) -> tuple[int, int]:
return (self.x_init, self.y_init)
@property
def upper_right_point(self) -> tuple[int, int]:
return (self.x_end, self.y_init)
@property
def bottom_right_point(self) -> tuple[int, int]:
return (self.x_end, self.y_end)
@property
def bottom_left_point(self) -> tuple[int, int]:
return (self.x_init, self.y_end)
@property
def coords(self) -> list[tuple[int, int]]:
return [
self.upper_left_point,
self.upper_right_point,
self.bottom_right_point,
self.bottom_left_point,
]
@property
def xy(self) -> tuple[list[int], list[int]]:
_x, _y = zip(*self.coords)
return (list(_x), list(_y))
@property
def slice_x(self) -> slice:
return slice(self.x_init, self.x_end)
@property
def slice_y(self) -> slice:
return slice(self.y_init, self.y_end)
def center_point(self) -> tuple[int, int]:
return (
self.x_init + self.width // 2,
self.y_init + self.height // 2,
)
def area(self) -> int | float:
return self.width * self.height
def to_polygon(self) -> Polygon:
return Polygon(self.coords)
def fit_inside(self, bounds: tuple[int, int, int, int]) -> None:
min_x, min_y, max_x, max_y = bounds
self.x_init = min_x if self.x_init < min_x else self.x_init
self.y_init = min_y if self.y_init < min_y else self.y_init
self.width = max_x - self.x_init if self.x_end > max_x else self.width
self.height = max_y - self.y_init if self.y_end > max_y else self.height
def add_padding(self, padding: int | float = 0, bounds: tuple[int, int, int, int] = (0, 0, 0, 0)) -> None:
if padding != 0:
if isinstance(padding, int):
self.width += padding * 2
self.height += padding * 2
self.x_init -= padding
self.y_init -= padding
elif isinstance(padding, float):
self.x_init = int(self.x_init - (self.width * padding))
self.y_init = int(self.y_init - (self.height * padding))
self.width = int(self.width * (1 + padding * 2))
self.height = int(self.height * (1 + padding * 2))
else:
raise TypeError('Unexpected value for the padding! Use int or float values')
self.fit_inside(bounds)
def bounds_to_BBox(bounds: tuple[float], category_id: int) -> BBox:
b = tuple(int(i) for i in bounds)
min_x, min_y, max_x, max_y = b
return BBox(min_x, min_y, max_x - min_x, max_y - min_y, category_id)
def count_BBox_categories(
items: list[BBox],
categories_infos: CategoriesInfos,
) -> dict[str, int]:
c: dict[str, int] = collections.defaultdict(int)
for bbox in items:
cat_name = categories_infos[bbox.category_id].name
c[cat_name] += 1
return c
```
#### File: CCAgT_utils/types/checkers.py
```python
from __future__ import annotations
def is_2d(shape: tuple[int]) -> bool:
"""Verify if the shape is at a 2D shape expected (n X m)
where n and m can be any integer that represents height (rows) and
width (columns) size.
Parameters
----------
shape : tuple
A tuple of the size for each axis. In general from
`np.ndarray.shape`
Returns
-------
bool
True if the shape matches with (n X m) - just have 2 axis, any
other else False
"""
if len(shape) == 2:
return True
return False
def is_rgb_shape(shape: tuple[int]) -> bool:
"""Verify if the shape is at a RGB shape expected (n X m x 3)
where n and m can be any integer that represents height (rows) and
width (columns) size.
Parameters
----------
shape : tuple
A tuple of the size for each axis. In general from
`np.ndarray.shape`
Returns
-------
bool
True if the shape matches with (n X m x 3), any other else False
"""
if len(shape) == 3:
if shape[-1] == 3:
return True
return False
```
#### File: CCAgT_utils/types/colors.py
```python
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
@dataclass
class Color:
Red: int
Green: int
Blue: int
Alpha: int = 255
size: int = 8
def __post_init__(self) -> None:
self._max_value = 2 ** self.size - 1
_error_suf = f'value exceeds the max value for the color size! (bits size={self.size} -> max_value={self._max_value})'
if self.Red > self._max_value:
raise ValueError(f'The Red {_error_suf}')
if self.Green > self._max_value:
raise ValueError(f'The Green {_error_suf}')
if self.Blue > self._max_value:
raise ValueError(f'The Blue {_error_suf}')
if self.Alpha > self._max_value:
raise ValueError(f'The Alpha {_error_suf}')
@property
def rgb(self) -> tuple[int, int, int]:
return (self.Red, self.Green, self.Blue)
@property
def rgba(self) -> tuple[int, int, int, int]:
return (self.Red, self.Green, self.Blue, self.Alpha)
@property
def rgb_normalized(self) -> tuple[float, float, float]:
return (
self.Red / self._max_value,
self.Green / self._max_value,
self.Blue / self._max_value,
)
@property
def rgba_normalized(self) -> tuple[float, float, float, float]:
return (
self.Red / self._max_value,
self.Green / self._max_value,
self.Blue / self._max_value,
self.Alpha / self._max_value,
)
def from_tuple(t: tuple[int, int, int], alpha: int = 255, size: int = 8) -> Color:
return Color(t[0], t[1], t[2], alpha, size)
def hex_to_rgb(hex: str) -> tuple[int, ...]:
hex = hex.lstrip('#')
hlen = len(hex)
return tuple(int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3))
# Based on https://github.com/cocodataset/panopticapi/blob/7bb4655548f98f3fedc07bf37e9040a992b054b0/panopticapi/utils.py#L43
def random_color_from_base(base: Color, max_dist: int = 30) -> Color:
base_rgb = base.rgb
new_color = base_rgb + np.random.randint(low=-max_dist, high=max_dist + 1, size=3)
new_color = np.maximum(0, np.minimum(base._max_value, new_color))
return from_tuple((int(new_color[0]), int(new_color[1]), int(new_color[2])), size=base.size)
```
#### File: CCAgT_dataset_utils/CCAgT_utils/utils.py
```python
from __future__ import annotations
import functools
import os
import traceback
from enum import Enum
from typing import Callable
from typing import TypeVar
from CCAgT_utils.constants import FILENAME_SEP
from CCAgT_utils.constants import STRUCTURE
R = TypeVar('R')
def basename(filename: str, with_extension: bool = False) -> str:
"""From a full filename get the basename with or not with the
extension.
Parameters
----------
filename : str
A full filename
with_extension : bool, optional
Flag to return the basename with extension, if True return
the basename with the file extension, else will return just the
basename, by default False
Returns
-------
str
The basename of the # with or not the file extension
"""
bn = os.path.basename(filename)
if with_extension:
return bn
else:
return os.path.splitext(bn)[0]
def get_traceback(f: Callable[..., R]) -> Callable[..., R]:
"""Decorator for print an error that occurs inside of some process
Parameters
----------
f : Callable
The function that will be decorated, need to be a function called
by a worker.
Returns
-------
Callable
The return of the function if all runs fine
Raises
------
e
Will capture the exception from the process using the `traceback`
print.
"""
@functools.wraps(f)
def wrapper(*args: object, **kwargs: object) -> R:
try:
return f(*args, **kwargs)
except Exception as e:
print('Caught exception in worker thread:')
traceback.print_exc()
raise e
return wrapper
class FILENAME_ITEM(Enum):
slide = 0
tile_id = 1,
x_position_raw = 2,
y_position_raw = 3
def items_from_filename(filename: str) -> list[str]:
"""From a full filename get the itens/infos at the basename
Parameters
----------
filename : str
A full filename to an image or mask of CCAgT dataset
Returns
-------
list
A list with the 4 information that have at the basename
"""
bn = basename(filename)
items = bn.split(FILENAME_SEP)
return items
def slide_from_filename(filename: str) -> str:
"""Based on a filename get the slide ID information
Parameters
----------
filename : str
A full filename to an image or mask of CCAgT dataset
Returns
-------
str
The slide ID of the filename
"""
return items_from_filename(filename)[FILENAME_ITEM.slide.value]
def find_files(
dir_path: str,
extension: str | tuple[str, ...],
look_recursive: bool = False,
selection: set[str] = set(),
) -> dict[str, str]:
"""Find all files into at the path and subdirectories
Parameters
----------
dir_path : str
Path of the base directory to look
extension : str | tuple[str]
Extension of the dessired files
Returns
-------
dict[str, str]
A dict with the filename as key and the relative path for the
file
"""
if look_recursive:
files = {
file: os.path.join(path, file) for path, _, files in os.walk(dir_path) for file in files
if file.endswith(extension) and (len(selection) == 0 or file in selection)
}
else:
files = {
file: os.path.join(dir_path, file) for file in os.listdir(dir_path)
if file.endswith(extension) and (len(selection) == 0 or file in selection)
}
return files
def create_structure(dir_path: str, slides: set[str]) -> None:
dir_images = os.path.join(dir_path, STRUCTURE['i'])
dir_masks = os.path.join(dir_path, STRUCTURE['m'])
for slide in slides:
os.makedirs(os.path.join(dir_images, slide), exist_ok=True)
os.makedirs(os.path.join(dir_masks, slide), exist_ok=True)
```
#### File: CCAgT_utils/visualization/main.py
```python
from __future__ import annotations
import argparse
import sys
from typing import Sequence
from CCAgT_utils import categories
from CCAgT_utils.converters import CCAgT
from CCAgT_utils.visualization import _show
def main(argv: Sequence[str] | None = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='CCAgT_visualization')
subparsers = parser.add_subparsers(dest='command')
show_parser = subparsers.add_parser('show', help='To show the image with the boundary boxes.')
show_parser.add_argument(
'-l',
'--labels-file',
help='Path for the CCAgT file with the labels.',
required=True,
)
show_parser.add_argument(
'-a',
'--aux-file',
help='Path for the categories auxiliary/helper file. A JSON file is expected.',
required=True,
metavar='HELPER_FILE_PATH',
)
show_parser.add_argument(
'-t',
'--plot-type',
help='The type of plots desired.',
default='image-with-boxes',
choices=['image-with-boxes', 'image-and-mask', 'image-with-boxes-and-mask'],
)
show_parser.add_argument(
'-i',
'--images-names',
help='Filenames of the images to plot. If nothing be passed, all images will be plotted',
default=[],
nargs='+',
)
show_parser.add_argument(
'-d',
'--dir-path',
help='Path for a directory that have the images.',
default='./',
)
show_parser.add_argument(
'-m',
'--dir-masks-path',
help='Path for a directory that have the masks.',
default='./',
)
show_parser.add_argument(
'-s',
'--shuffle-images',
help='To shuffle the images order',
default=True,
)
show_parser.add_argument(
'-e',
'--image-extension',
help='Define the extension file of the images.',
default='.jpg',
)
show_parser.add_argument(
'--mask-extension',
help='Define the extension file of the masks.',
default='.png',
)
show_parser.add_argument(
'-r',
'--look-recursive',
help='Define if needs to look into the subdirectories of the --dir-path for find the images.',
default=True,
)
help = subparsers.add_parser('help', help='Show help for a specific command.')
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
if len(argv) == 0:
argv = ['show']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
if args.command == 'show' and (args.labels_file != '' and args.aux_file != ''):
CCAgT_ann = CCAgT.read_parquet(args.labels_file)
CCAgT_helper = categories.read_json(args.aux_file)
if args.plot_type == 'image-with-boxes' and args.dir_path != '':
return _show.image_with_boxes(
CCAgT_ann,
CCAgT_helper,
args.dir_path,
args.image_extension,
args.images_names,
args.shuffle_images,
args.look_recursive,
)
elif args.plot_type == 'image-and-mask' and args.dir_path != '':
return _show.image_and_mask(
CCAgT_ann,
CCAgT_helper,
args.dir_path,
args.dir_masks_path,
args.image_extension,
args.mask_extension,
args.images_names,
args.shuffle_images,
args.look_recursive,
)
elif args.plot_type == 'image-with-boxes-and-mask' and args.dir_path != '':
return _show.image_with_boxes_and_mask(
CCAgT_ann,
CCAgT_helper,
args.dir_path,
args.dir_masks_path,
args.image_extension,
args.mask_extension,
args.images_names,
args.shuffle_images,
args.look_recursive,
)
return 1
if __name__ == '__main__':
raise SystemExit(main())
```
#### File: CCAgT_utils/visualization/plot.py
```python
from __future__ import annotations
from typing import Any
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
from matplotlib import patches
from matplotlib import patheffects
from CCAgT_utils.categories import CategoriesInfos
from CCAgT_utils.types.annotation import BBox
from CCAgT_utils.types.mask import Mask
def bbox(
boxes: list[BBox],
ax: plt.pyplot.Axes,
categories_infos: CategoriesInfos,
fontsize: int = 8,
write_names: bool = True,
without_text: set[int] = set({}),
) -> plt.pyplot.Axes:
for bb in boxes:
ax.add_patch(
patches.Rectangle(
(bb.x_init, bb.y_init),
bb.width,
bb.height,
fill=False,
edgecolor=categories_infos[bb.category_id].color.rgba_normalized,
lw=1,
),
)
if write_names and bb.category_id not in without_text:
ax.text(
bb.x_init,
bb.y_init,
categories_infos[bb.category_id].name,
verticalalignment='top',
color=categories_infos[bb.category_id].color.rgba_normalized,
clip_on=True,
fontsize=fontsize,
).set_path_effects([
patheffects.Stroke(linewidth=4, foreground='black'),
patheffects.Normal(),
])
return ax
def image_with_boxes(
image: np.ndarray | PIL.Image.Image,
boxes: list[BBox],
ax: plt.pyplot.Axes,
categories_infos: CategoriesInfos,
**kwargs: Any
) -> plt.pyplot.Axes:
ax.imshow(image)
ax = bbox(boxes, ax, categories_infos, **kwargs)
ax.set_axis_off()
return ax
def mask_with_color(
mask: Mask,
ax: plt.pyplot.Axes,
categories_infos: CategoriesInfos,
colorized: bool = False,
**kwargs: Any
) -> plt.pyplot.Axes:
if colorized:
msk_rgb = mask.colorized(categories_infos)
ax.imshow(msk_rgb, **kwargs)
else:
mask_categories = mask.unique_ids
ax.imshow(
mask.categorical,
cmap=mask.cmap(categories_infos),
vmax=max(mask_categories),
vmin=min(mask_categories),
interpolation='nearest',
**kwargs
)
ax.set_axis_off()
return ax
def create_handles(
categories_infos: CategoriesInfos,
selected_categories: set[int] = set({}),
) -> list[patches.Patch]:
if len(selected_categories) > 0:
categories_id = selected_categories
else:
categories_id = set(categories_infos.keys())
return [
patches.Patch(
color=categories_infos.get_color(cat_id).rgba_normalized,
label=categories_infos.get_name(cat_id),
) for cat_id in categories_id
]
```
#### File: tests/converters/labelbox_annotations_test.py
```python
from __future__ import annotations
import numpy as np
import pandas as pd
import pytest
from shapely.geometry import Point
from shapely.geometry import Polygon
from CCAgT_utils.converters.LabelBox import LabelBox
def test_labelbox_to_dataFrame(lbb_ann, lbb_raw_sample_complete):
raw_df = pd.DataFrame(lbb_raw_sample_complete)
assert lbb_ann.raw_dataframe.equals(raw_df)
assert lbb_ann.raw_dataframe.shape[0] == len(lbb_raw_sample_complete)
def test_init_without_raw():
with pytest.raises(ValueError):
LabelBox(raw_labelbox=None)
def test_init_without_categories_map(lbb_raw_sample_complete):
lbb_ann = LabelBox(lbb_raw_sample_complete)
assert lbb_ann.categories_map is None
def test_init_without_expected_data():
with pytest.raises(KeyError):
LabelBox(raw_labelbox=[{'ID': 'a2', 'External ID': 'tmp/A_xxx'}])
def test_instance_categories_map(lbb_ann, categories_aux_data):
assert lbb_ann.categories_map == categories_aux_data
def test_labelbox_object_to_shapely():
obj = {'polygon': [{'x': 10, 'y': 10}, {'x': 20, 'y': 20}, {'x': 25, 'y': 15}, {'x': 15, 'y': 10}]}
pol_list = [(p['x'], p['y']) for p in obj['polygon']]
pol = Polygon(pol_list)
assert LabelBox.labelbox_to_shapely(obj) == pol
obj_nan = {'multiline': [{'x': 1, 'y': 1}]}
assert np.isnan(LabelBox.labelbox_to_shapely(obj_nan))
def test_labelbox_object_to_shapely_point():
obj = {'point': {'x': 10, 'y': 10}}
point = Point([10, 10])
assert LabelBox.labelbox_to_shapely(obj) == point
def test_to_CCAgT(lbb_ann, lbb_raw_expected_ccagt_df):
ccagt_ann = lbb_ann.to_CCAgT()
assert ccagt_ann.df.equals(lbb_raw_expected_ccagt_df)
def test_to_CCAgT_check_categories_maps(lbb_ann, categories_aux_data, lbb_raw_expected_ccagt_df):
ccagt_ann = lbb_ann.to_CCAgT(categories_aux_data)
assert ccagt_ann.df.equals(lbb_raw_expected_ccagt_df)
ccagt_ann = lbb_ann.to_CCAgT({})
assert ccagt_ann.df.equals(lbb_raw_expected_ccagt_df)
lbb_ann.categories_map = None
ccagt_ann = lbb_ann.to_CCAgT(categories_aux_data)
assert ccagt_ann.df.equals(lbb_raw_expected_ccagt_df)
lbb_ann.categories_map = None
with pytest.raises(ValueError):
lbb_ann.to_CCAgT(None)
lbb_ann.categories_map = None
with pytest.raises(ValueError):
lbb_ann.to_CCAgT('a')
def test_to_CCAgT_with_duplicated_image(
lbb_ann,
categories_aux_data,
lbb_raw_single_satellite,
lbb_raw_expected_ccagt_df,
):
sample = lbb_raw_single_satellite.copy()
sample.update(ID='otherID-99x')
lbb_ann.raw_labelbox.append(sample)
ccagt_ann = lbb_ann.to_CCAgT(categories_aux_data)
assert ccagt_ann.df.equals(lbb_raw_expected_ccagt_df)
sample = lbb_raw_single_satellite.copy()
sample.update(ID='otherID-99x', Reviews=[{'score': 0, 'labelId': 'otherID-99x'}])
lbb_ann.raw_labelbox.append(sample)
ccagt_ann = lbb_ann.to_CCAgT(categories_aux_data)
assert ccagt_ann.df.equals(lbb_raw_expected_ccagt_df)
sample = lbb_raw_single_satellite.copy()
del sample['Label']['objects'][0]['point']
lbb_ann.raw_labelbox = [sample]
with pytest.raises(ValueError):
lbb_ann.to_CCAgT(categories_aux_data)
```
#### File: tests/types/annotation_test.py
```python
from __future__ import annotations
import copy
import pytest
from shapely.geometry import MultiPolygon
from shapely.geometry import Polygon
from CCAgT_utils.categories import CategoriesInfos
from CCAgT_utils.types import annotation
@pytest.fixture
def bbox_params():
x = {
'x_init': 100,
'y_init': 500,
'width': 35,
'height': 88,
'category_id': 1,
}
x['x_end'] = x['x_init'] + x['width']
x['y_end'] = x['y_init'] + x['height']
return x
@pytest.fixture
def bbox_example(bbox_params):
params = bbox_params.copy()
del params['x_end']
del params['y_end']
return annotation.BBox(**params)
def test_x_end(bbox_example, bbox_params):
assert bbox_example.x_end == bbox_params['x_end']
def test_y_end(bbox_example, bbox_params):
assert bbox_example.y_end == bbox_params['y_end']
def test_upper_left_point(bbox_example, bbox_params):
assert bbox_example.upper_left_point == (bbox_params['x_init'], bbox_params['y_init'])
def test_bottom_left_point(bbox_example, bbox_params):
assert bbox_example.bottom_left_point == (bbox_params['x_init'], bbox_params['y_end'])
def test_upper_right_point(bbox_example, bbox_params):
assert bbox_example.upper_right_point == (bbox_params['x_end'], bbox_params['y_init'])
def test_bottom_right_point(bbox_example, bbox_params):
assert bbox_example.bottom_right_point == (bbox_params['x_end'], bbox_params['y_end'])
def test_coords(bbox_example, bbox_params):
x_init = bbox_params['x_init']
y_init = bbox_params['y_init']
x_end = bbox_params['x_end']
y_end = bbox_params['y_end']
assert bbox_example.coords == [
(x_init, y_init), (x_end, y_init),
(x_end, y_end), (x_init, y_end),
]
def test_xy(bbox_example, bbox_params):
x_init = bbox_params['x_init']
y_init = bbox_params['y_init']
x_end = bbox_params['x_end']
y_end = bbox_params['y_end']
assert bbox_example.xy == (
[x_init, x_end, x_end, x_init],
[y_init, y_init, y_end, y_end],
)
def test_center_point(bbox_example, bbox_params):
x_center = bbox_params['x_init'] + (bbox_params['width']) // 2
y_center = bbox_params['y_init'] + (bbox_params['height']) // 2
assert bbox_example.center_point() == (x_center, y_center)
def test_area(bbox_example, bbox_params):
assert bbox_example.area() == bbox_params['width'] * bbox_params['height']
def test_to_polygon(bbox_example, bbox_params):
x_init = bbox_params['x_init']
y_init = bbox_params['y_init']
x_end = bbox_params['x_end']
y_end = bbox_params['y_end']
coords = [
(x_init, y_init), (x_end, y_init),
(x_end, y_end), (x_init, y_end),
]
p = Polygon(coords)
assert bbox_example.to_polygon().equals(p)
def test_bounds_to_BBox(bbox_example, bbox_params):
bounds = (
bbox_params['x_init'], bbox_params['y_init'],
bbox_params['x_end'], bbox_params['y_end'],
)
assert bbox_example == annotation.bounds_to_BBox(bounds, bbox_params['category_id'])
def test_slices(bbox_example, bbox_params):
assert bbox_example.slice_y == slice(bbox_params['y_init'], bbox_params['y_end'])
assert bbox_example.slice_x == slice(bbox_params['x_init'], bbox_params['x_end'])
def test_count_BBox_categories(bbox_example):
cat_id_example = bbox_example.category_id
bbox_example1 = copy.copy(bbox_example)
bbox_example1.category_id = cat_id_example + 1
cat_id_example1 = bbox_example1.category_id
items = [
bbox_example, bbox_example, bbox_example,
bbox_example1, bbox_example1,
]
categories_infos = CategoriesInfos([
{'name': 'Nucleus', 'id': cat_id_example, 'color': (0, 0, 0)},
{'name': 'Cluster', 'id': cat_id_example1, 'color': (0, 0, 0)},
{'name': 'Satellite', 'id': cat_id_example1 + 1, 'color': (0, 0, 0)},
])
counter = annotation.count_BBox_categories(items, categories_infos)
assert counter == {'Nucleus': 3, 'Cluster': 2}
def test_fit_inside(bbox_example, bbox_params):
x_init, y_init, x_end, y_end = (
bbox_params['x_init'], bbox_params['y_init'],
int(bbox_params['x_end'] * 0.8), int(bbox_params['y_end'] * 0.8),
)
bbox_example.fit_inside((x_init, y_init, x_end, y_end))
assert bbox_example.x_init == x_init
assert bbox_example.y_init == y_init
assert bbox_example.x_end == x_end
assert bbox_example.y_end == y_end
def test_add_padding(bbox_example):
bbox_example1 = copy.copy(bbox_example)
bbox_example1.add_padding()
assert bbox_example1.coords == bbox_example.coords
with pytest.raises(TypeError):
bbox_example.add_padding('1')
@pytest.mark.parametrize('padding', [1, 5, 50, 5000])
def test_add_padding_in_pixel(bbox_example, bbox_params, padding):
x_init_expected = bbox_params['x_init'] - padding
y_init_expected = bbox_params['y_init'] - padding
x_end_expected = bbox_params['x_end'] + padding
y_end_expected = bbox_params['y_end'] + padding
bbox_example.add_padding(
padding, (
x_init_expected - 100,
y_init_expected - 100,
x_end_expected + 100,
y_end_expected + 100,
),
)
assert bbox_example.x_init == x_init_expected
assert bbox_example.y_init == y_init_expected
assert bbox_example.x_end == x_end_expected
assert bbox_example.y_end == y_end_expected
@pytest.mark.parametrize('padding', [.1, .5, 5.0, 50.00])
def test_add_padding_in_percentage(bbox_example, bbox_params, padding):
x_init_expected = int(bbox_params['x_init'] - (bbox_params['width'] * padding))
y_init_expected = int(bbox_params['y_init'] - (bbox_params['height'] * padding))
x_end_expected = int(bbox_params['x_end'] + (bbox_params['width'] * padding))
y_end_expected = int(bbox_params['y_end'] + (bbox_params['height'] * padding))
bbox_example.add_padding(
padding, (
x_init_expected - 100,
y_init_expected - 100,
x_end_expected + 100,
y_end_expected + 100,
),
)
assert bbox_example.x_init == x_init_expected
assert bbox_example.y_init == y_init_expected
assert bbox_example.x_end == x_end_expected
assert bbox_example.y_end == y_end_expected
def test_annotation_bbox(nucleus_ex):
ann = annotation.Annotation(nucleus_ex, 1)
assert ann.bbox.to_polygon().equals(nucleus_ex)
def test_annotation_geo_type(nucleus_ex):
ann = annotation.Annotation(nucleus_ex, 1)
geo_type = ann._geo_type
assert geo_type == 'Polygon'
def test_annotation_coco_bbox(nucleus_ex):
ann = annotation.Annotation(nucleus_ex, 1)
min_x, min_y, max_x, max_y = nucleus_ex.bounds
assert ann.coco_bbox == [min_x, min_y, int(max_x) - int(min_x), int(max_y) - int(min_y)]
def test_annotation_iter(nucleus_ex, cluster_ex):
ann = annotation.Annotation(nucleus_ex, 1)
assert len([geo for geo in ann]) == 1
mult_p = MultiPolygon([nucleus_ex, cluster_ex])
ann = annotation.Annotation(mult_p, 1)
assert len([geo for geo in ann]) == 2
def test_annotation_iter_wrong_geo(satellite_ex):
with pytest.raises(TypeError):
iter(annotation.Annotation(satellite_ex, 3))
```
#### File: tests/visualization/conftest.py
```python
from __future__ import annotations
import matplotlib.pyplot as plt
import pytest
from CCAgT_utils.types.annotation import BBox
@pytest.fixture
def boxes():
return [
BBox(10, 200, 50, 125, 1),
BBox(20, 290, 10, 10, 2),
BBox(50, 250, 8, 9, 3),
]
@pytest.fixture
def remove_plt_show(monkeypatch):
monkeypatch.setattr(plt, 'show', lambda: None)
```
#### File: tests/visualization/plot_test.py
```python
from __future__ import annotations
import matplotlib.pyplot as plt
import pytest
from matplotlib.testing.decorators import image_comparison
from CCAgT_utils.visualization import plot
@pytest.mark.slow
@image_comparison(baseline_images=['boxes_plot'], extensions=['png'])
def test_bbox(boxes, categories_infos, shape):
plt.close('all')
_, ax = plt.subplots()
ax.set_xlim([0, shape[1]])
ax.set_ylim([shape[0], 0])
ax.set_axis_off()
plot.bbox(boxes, ax, categories_infos, write_names=True, without_text=set({3}))
@pytest.mark.slow
@image_comparison(baseline_images=['image_with_boxes_plot'], extensions=['png'])
def test_image_with_boxes(rgb_image, boxes, categories_infos):
plt.close('all')
_, ax = plt.subplots()
plot.image_with_boxes(rgb_image, boxes, ax, categories_infos, write_names=False)
@pytest.mark.slow
@image_comparison(baseline_images=['mask_with_color_plot'], extensions=['png'])
def test_mask_with_color_cmap(mask, categories_infos):
plt.close('all')
_, ax = plt.subplots(figsize=(16, 9), dpi=300)
plot.mask_with_color(mask, ax, categories_infos)
@pytest.mark.slow
@image_comparison(baseline_images=['mask_with_color_plot'], extensions=['png'])
def test_mask_with_color_colorized(mask, categories_infos):
plt.close('all')
_, ax = plt.subplots(figsize=(16, 9), dpi=300)
plot.mask_with_color(mask, ax, categories_infos, colorized=True)
def test_create_handles(categories_infos):
handles1 = plot.create_handles(categories_infos)
assert len(handles1) == len(categories_infos)
assert handles1[0].get_label() == categories_infos[0].name
assert tuple(handles1[0].get_edgecolor()) == categories_infos[0].color.rgba_normalized
selected_categories = [2, 3]
handles2 = plot.create_handles(categories_infos, selected_categories)
assert len(handles2) == len(selected_categories)
assert handles2[0].get_label() == categories_infos[2].name
assert tuple(handles2[1].get_edgecolor()) == categories_infos[3].color.rgba_normalized
```
#### File: tests/visualization/_show_test.py
```python
from __future__ import annotations
import shutil
import pytest
from CCAgT_utils.visualization import _show
from testing import create
@pytest.mark.slow
def test_image_and_mask(remove_plt_show, ccagt_ann_multi, categories_infos, shape):
names = ccagt_ann_multi.df['image_name'].unique()
with create.ImageMaskFiles(shape[0], shape[1], names) as paths:
_, mask_dir, image_dir = paths
_params = {
'CCAgT_ann': ccagt_ann_multi,
'categories_infos': categories_infos,
'dir_path': image_dir,
'dir_mask_path': mask_dir,
'images_extension': '.jpg',
'masks_extension': '.png',
}
out = _show.image_and_mask(**_params, images_names=['example'])
out1 = _show.image_and_mask(**_params)
out2 = _show.image_and_mask(**_params, look_recursive=False, shuffle_images=False)
assert out == 0
assert out1 == 0
assert out2 == 0
@pytest.mark.slow
def test_image_and_mask_not_found(capsys, ccagt_ann_multi, categories_infos, shape):
names = ccagt_ann_multi.df['image_name'].unique()
with create.ImageMaskFiles(shape[0], shape[1], names) as paths:
_, mask_dir, image_dir = paths
_params = {
'CCAgT_ann': ccagt_ann_multi,
'categories_infos': categories_infos,
'dir_path': image_dir,
'dir_mask_path': mask_dir,
'images_extension': '.jpg',
'masks_extension': '.png',
}
shutil.rmtree(mask_dir)
out1 = _show.image_and_mask(**_params, look_recursive=False, shuffle_images=False)
_, err1 = capsys.readouterr()
shutil.rmtree(image_dir)
out2 = _show.image_and_mask(**_params, look_recursive=False, shuffle_images=False)
_, err2 = capsys.readouterr()
assert out1 == 0
assert 'Not found the mask' in err1
assert out2 == 0
assert 'Not found the image' in err2
@pytest.mark.slow
def test_image_with_boxes(ccagt_ann_multi, categories_infos, shape, remove_plt_show):
names = ccagt_ann_multi.df['image_name'].unique()
with create.ImageMaskFiles(shape[0], shape[1], names, create_mask=False) as paths:
_, _, image_dir = paths
_params = {
'CCAgT_ann': ccagt_ann_multi,
'categories_infos': categories_infos,
'dir_path': image_dir,
'images_extension': '.jpg',
}
out = _show.image_with_boxes(**_params)
out1 = _show.image_with_boxes(**_params, images_names=['example'])
out2 = _show.image_with_boxes(**_params, look_recursive=False, shuffle_images=False)
shutil.rmtree(image_dir)
assert out == 0
assert out1 == 0
assert out2 == 0
@pytest.mark.slow
def test_image_with_boxes_not_found(capsys, ccagt_ann_multi, categories_infos, shape, remove_plt_show):
names = ccagt_ann_multi.df['image_name'].unique()
with create.ImageMaskFiles(shape[0], shape[1], names, create_mask=False) as paths:
_, _, image_dir = paths
_params = {
'CCAgT_ann': ccagt_ann_multi,
'categories_infos': categories_infos,
'dir_path': image_dir,
'images_extension': '.jpg',
}
shutil.rmtree(image_dir)
out = _show.image_with_boxes(**_params, look_recursive=False, shuffle_images=False)
_, err = capsys.readouterr()
assert out == 0
assert 'Not found the image' in err
@pytest.mark.slow
def test_image_with_boxes_and_mask(remove_plt_show, ccagt_ann_multi, categories_infos, shape):
names = ccagt_ann_multi.df['image_name'].unique()
with create.ImageMaskFiles(shape[0], shape[1], names) as paths:
_, mask_dir, image_dir = paths
_params = {
'CCAgT_ann': ccagt_ann_multi,
'categories_infos': categories_infos,
'dir_path': image_dir,
'dir_mask_path': mask_dir,
'images_extension': '.jpg',
'masks_extension': '.png',
}
out = _show.image_with_boxes_and_mask(**_params, images_names=['example'])
out1 = _show.image_with_boxes_and_mask(**_params)
out2 = _show.image_with_boxes_and_mask(**_params, look_recursive=False, shuffle_images=False)
assert out == 0
assert out1 == 0
assert out2 == 0
def test_image_with_boxes_and_mask_not_found(capsys, ccagt_ann_multi, categories_infos, shape):
names = ccagt_ann_multi.df['image_name'].unique()
with create.ImageMaskFiles(shape[0], shape[1], names) as paths:
_, mask_dir, image_dir = paths
_params = {
'CCAgT_ann': ccagt_ann_multi,
'categories_infos': categories_infos,
'dir_path': image_dir,
'dir_mask_path': mask_dir,
'images_extension': '.jpg',
'masks_extension': '.png',
}
shutil.rmtree(mask_dir)
out1 = _show.image_with_boxes_and_mask(**_params, look_recursive=False, shuffle_images=False)
_, err1 = capsys.readouterr()
shutil.rmtree(image_dir)
out2 = _show.image_with_boxes_and_mask(**_params, look_recursive=False, shuffle_images=False)
_, err2 = capsys.readouterr()
assert out1 == 0
assert 'Not found the mask' in err1
assert out2 == 0
assert 'Not found the image' in err2
``` |
{
"source": "johnnv1/compensator_projects",
"score": 2
} |
#### File: Make-Controls/core/catrf.py
```python
from .misc import *
def catrf(e_esp, Kp_MA, G_MA, MFd, MFseg, phase_MA, wout_MA):
"""
e_esp : erro esperado em regime permanente
Kp_MA : Ganho em Malha Aberta da planta
MFd : Margem de Fase desejada
MFseg : Margem de Fase de segurança
phase_MA: resposta do bode (uma lista) das fase da plata em Malha Aberta
wout_MA : resposta do bode (uma lista) das frequencias da planta em Malha Aberta
"""
# Determina o ganho do controlador
Kc = get_kc_rf(e_esp, Kp_MA)
print(f"Kc = {Kc}")
print("*********************************************\n")
# Verifica o comportamento do sistema apenas com o ganho do controlador
Cat = Kc
Gma_Cat = G_MA*Cat
bode(G_MA)
[mag_Cat,phase_Cat,wout_Cat] = bode(Gma_Cat)
print("*********************************************\n")
# Determinar a localização da resposta em frequencia (RF) do compensador, Wcd
Wcd = get_Wcd(phase_MA, MFd, MFseg, wout_MA)
print(f"Wcd = {Wcd}")
print("*********************************************\n")
# Determina o valor a do compensador
a = get_a_at(mag_Cat, wout_MA, Wcd)
print(f"a = {a}")
print("*********************************************\n")
# Determinar do parametro T do compensador
T = get_T_at(Wcd)
print(f"T = {T}")
print("*********************************************\n")
# Monta controlador com os parametros determinados
numC = np.array([T, 1], dtype=float)
denC = np.array([T*a, 1], dtype=float)
C = tf(float(Kc)*numC, denC) # Controlador em atraso
print(f"Controlador em atraso = Kc * (T*s+1)/(T*a*s+1) = ")
print(f"\t= {Kc} * ({round(T,2)}*s+1)/({round(T,2)}*{round(a,2)}*s+1) = \t{C}")
# Plota os locais dos polos e zeros do controlador
#plot_c(polesDominant, zero_c, polo_c)
# Retorna o controlador
return C
```
#### File: Make-Controls/core/cavrf.py
```python
from .misc import *
def cavrf(e_esp, Kp_MA, G_MA, MFd, MFseg, mag_MA, wout_MA):
"""
e_esp : erro esperado em regime permanente
Kp_MA : Ganho em Malha Aberta da planta
MFd : Margem de Fase desejada
MFseg : Margem de Fase de segurança
mag_MA : resposta do bode (uma lista) das magnitudes da planta em Malha Aberta
wout_MA : resposta do bode (uma lista) das frequencias da planta em Malha Aberta
"""
# Determina o ganho do controlador
Kc = get_kc_rf(e_esp, Kp_MA)
print(f"Kc = {Kc}")
print("*********************************************\n")
# Verifica o comportamento do sistema apenas com o ganho do controlador
Cav = Kc
Gma_Cav = G_MA*Cav
bode(G_MA)
bode(Gma_Cav)
[gm, pm, wcg, wcp] = margin(Gma_Cav) # Verificando a MF e MG do sistema, MF= margem de fase, MG = margem de ganho
MFkc = pm; # margem de fase após o Kc
phiMax = MFd - MFkc + MFseg # em graus
print(f"φ_max = {phiMax}")
print("*********************************************\n")
# Determina o valor a do compensador
a = get_a_av(phiMax, deg=True)
print(f"a = {a}")
print("*********************************************\n")
# Determinar a localização da resposnta em frequencia (RF) do compensador, Wm
C_jWm, Wm = get_Wm(Kc,a,mag_MA, wout_MA)
print(f"C(jWm) = {C_jWm}")
print(f"Wm = {Wm}")
print("*********************************************\n")
# Determinar do parametro T do compensador
T = get_T_av(a, Wm)
print(f"T = {T}")
print("*********************************************\n")
# Monta controlador com os parametros determinados
numC = np.array([T, 1], dtype=float)
denC = np.array([T*a, 1], dtype=float)
C = tf(float(Kc)*numC, denC) # Controlador m avanço
print(f"Controlador em avanço = Kc * (T*s+1)/(T*a*s+1) = ")
print(f"\t= {Kc} * ({round(T,2)}*s+1)/({round(T,2)}*{round(a,2)}*s+1) = \t{C}")
# Plota os locais dos polos e zeros do controlador
#plot_c(polesDominant, zero_c, polo_c)
# Retorna o controlador
return C
```
#### File: Make-Controls/core/misc.py
```python
from control.matlab import * # Biblioteca de controle
import numpy as np # Biblioteca com funções matematicas
import matplotlib as mpl
import matplotlib.pyplot as plt # Para realizar plotagens de funções
from sympy import * # Para adicionar simbolos e resolver a equações
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Funções para auxiliar na avaliação dos controladores
def testControl(G_MA, C, step, ramp, t, dist_p, dist_n, stepInfoB = True):
# Função para verificar comportamento do sistema com um controlador
print("<NAME>")
CG_MA = C * G_MA
print(f"C(s) * G(s) = {CG_MA}")
print("*********************************************")
print("<NAME>")
G_MF = feedback(CG_MA, 1)
print(f"Gmf = {G_MF}")
print("*********************************************")
# Verifica as informações do sistema perante a uma entrada degrau
if stepInfoB:
infoG_MF = stepinfo(G_MF)
print(f"stepinfo: ")
for info in infoG_MF:
print(f"\t{info}: {infoG_MF[info]}")
# Verifica a resposta em frequencia
bode(G_MA)
bode(CG_MA)
print("Rlocus de C(s) * G(s):")
rlocusCG_MA = rlocus(CG_MA)
# resposta perante a entrada e root locus da planta
y_step = lsim(G_MF, step, t)
y_ramp = lsim(G_MF, ramp, t)
plt.figure() # create a plot figure
plt.subplot(2, 1, 1) # (rows, columns, panel number)
plt.plot(t, step)
plt.plot(t, y_step[0])
plt.legend(["R", "Gmf"])
plt.subplot(2, 1, 2)
plt.plot(t, ramp)
plt.plot(t, y_ramp[0])
plt.legend(["R", "Gmf"])
plt.show()
print("*********************************************")
ymf_step2 = lsim(G_MF, dist_p, t);
plt.figure()
plt.clf()
plt.plot(t, dist_p)
plt.plot(t, ymf_step2[0])
plt.legend(["R", "Gmf"])
plt.ylabel("Amplitude")
plt.show()
# monta o sistema em MF com perturbações
Gmf = feedback(CG_MA, 1);
Gd1 = feedback(G_MA, CG_MA); # perturbação entre o controlador e a planta
Gd2 = feedback(1, CG_MA); # pertubação na saida da planta
# verifica a resposta do sistema frente a um step
yma_step = lsim(G_MA, step, t);
ymf_step = lsim(G_MF, step, t);
yd1_dist = lsim(Gd1, dist_p, t);
yd2_dist = lsim(Gd2, dist_n, t);
yr_step = ymf_step[0]+yd1_dist[0]+yd2_dist[0]
# ----- calculo do erro
yr = step
er = yr-yr_step
u = lsim(C, er, t)
print("*********************************************")
plt.figure()
plt.clf()
plt.subplot(4, 1, 1)
plt.plot(t, step)
plt.plot(t, dist_p)
plt.plot(t, dist_n)
plt.plot(t, yr_step)
plt.legend(["R","dist_p", "dist_n", "Gmf"])
plt.ylabel("Amplitude")
#plt.show()
#plt.figure()
plt.subplot(4, 1, 2)
plt.plot(t, er)
plt.legend("e")
plt.ylabel("Erro")
#plt.show()
#plt.figure()
plt.subplot(4, 1, 3)
plt.plot(t, u[0])
plt.legend("u")
plt.ylabel("Controle")
#plt.show()
#plt.figure()
plt.subplot(4, 1, 4)
plt.plot(t, dist_p)
plt.plot(t, dist_n)
plt.legend(["dist_p", "dist_n"])
plt.ylabel("Amplitude")
plt.show()
print("*********************************************")
ev_MF = ramp[-1] - y_ramp[0][-1] #erro apos ser adicionado o controlador ao
print(f"ev(∞) = {ev_MF}")
ep_MF = step[-1] - y_step[0][-1] #erro apos ser adicionado o controlador ao
print(f"ep(∞) = {ep_MF}")
print("*********************************************")
print("Rlocus de gmf")
rlocusG_MF = rlocus(G_MF)
def plot_c(polesDominant, zero_c, polo_c):
plt.figure()
plt.scatter(polesDominant[0].real,polesDominant[0].imag, color='red')
plt.scatter(polesDominant[1].real,polesDominant[1].imag, color='red')
plt.scatter(-abs(zero_c).real,-abs(zero_c).imag, color='blue')
plt.scatter(-abs(polo_c).real,-abs(polo_c).imag, color='green', marker='X')
plt.legend(["Polo dominante +", "Polo dominante -", "Zero controlador", "Polo controlador"])
plt.grid(color='black', linestyle='-', linewidth=0.5)
plt.show()
def plot_cavat(polesDominant, zero_cav, polo_cav, zero_cat, polo_cat):
plt.scatter(polesDominant[0].real,polesDominant[0].imag, color='red')
plt.scatter(polesDominant[1].real,polesDominant[1].imag, color='red')
plt.scatter(-abs(zero_cav).real,-abs(zero_cav).imag, color='blue')
plt.scatter(-abs(polo_cav).real,-abs(polo_cav).imag, color='green', marker='X')
plt.scatter(-abs(zero_cat).real,-abs(zero_cat).imag, color='black')
plt.scatter(-abs(polo_cat).real,-abs(polo_cat).imag, color='gray', marker='X')
plt.legend(["Polo dominante +", "Polo dominante -", "Zero controlador avanço", "Polo controlador avanço", "Zero controlador atraso", "Polo controlador atraso"])
plt.grid(color='black', linestyle='-', linewidth=0.5)
plt.show()
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Funções para o controlador por lugar das raizes
def get_kc_lr(e_esp, gaindc):
# Determinando ganho do compensador, Kc
Kp_s = symbols('Kp_s')
eq_Kp = [(1/(1+Kp_s)) - e_esp]
Kp = solve(eq_Kp, Kp_s)
Kp = Kp[Kp_s]
Kc_s = symbols('Kc_s')
eq_Kc = [((Kc_s*gaindc) - Kp)]
Kc = solve(eq_Kc, Kc_s)
Kc = Kc[Kc_s]
#print(f"Kc = {Kc}")
return Kp, Kc
def get_psi(Mp_esp, Mp_folga):
# Determinando o coeficiente de amortecimento, psi = ξ
psi = np.arange(0, 1, 0.01)
MpVetor = 100*np.exp(((-1*psi*np.pi)/np.sqrt(1-psi**2)))
MpLoc = np.where(MpVetor>=(Mp_esp-Mp_folga))[-1][-1] + 1
psi = psi[MpLoc]
#print(f"ξ = {psi}")
return psi
def get_wn(ts_esp, psi):
# Determinando a frequencia natural, Wn
Wn_s = symbols('Wn_s')
eq_Wn = [4/(psi*Wn_s)-ts_esp]
Wn = solve(eq_Wn, Wn_s)
Wn = Wn[Wn_s]
#print(f"Wn = {Wn}\t rad/s")
return Wn
def get_paramOrd2(psi,Wn):
# Parametros de uma função de 2ª ordem
sigma = psi * Wn
Wd = Wn * np.sqrt(1-psi**2)
#print(f"σ = {sigma}")
#print(f"Wd = {Wd}")
return [sigma, Wd]
def get_poleDominant(sigma, Wd):
s = []
s.append(complex(-sigma, Wd))
s.append(complex(-sigma, -Wd))
return s
def get_phiByCF(polos_MA, polesDominant, zero_c):
# Determinando phi pela condição de fase
# Angulo entre o polo dominante e o zero do controlador
thetaZero = np.arctan2(polesDominant[0].imag, polesDominant[0].real - zero_c.real)
thetaZeroD = np.degrees(thetaZero)
#print(f"Angulo entre o zero do controlador e o polo dominante = ϴ =")
#print(f"\t= {round(thetaZeroD,4)}º = {round(thetaZero,4)} rad")
# Angulo entre o polo dominante e o polo do controlador
phiPolo = []
textSum = "("
for k in range(len(polos_MA)):
phiPolo.append(np.arctan2(polesDominant[0].imag, polesDominant[0].real - polos_MA[k]))
print(f"φ{k} = {round(phiPolo[-1], 4)} rad")
textSum += f"+ {round(np.degrees(phiPolo[-1]), 3)} "
textSum += ")"
phiPoloD = np.degrees(phiPolo)
phiPolo_C = (180 - np.sum(phiPoloD) + thetaZeroD)
#print(polos_MA)
#print(f"Angulo entre o polo do controlador e o polo dominante = φ{len(polos_MA)} =")
#print(f"\t= 180 - {textSum} + {round(thetaZeroD,3)} =")
#print(f"\t= {round(phiPolo_C, 4)}º = {round(np.radians(phiPolo_C),4)} rad")
return [textSum, thetaZero, thetaZeroD, phiPolo_C]
def get_posPole(polesDominant, phiPolo_C, zero_c):
d_s = symbols('d_s')
eq_d = [polesDominant[0].imag/d_s - np.tan(np.radians(phiPolo_C))]
d = solve(eq_d, d_s)
return (-1*(abs(d[d_s]) + abs(zero_c.real)))
def get_KcByCM(polesDominant, polos_MA, zeros_MA, zero_c, polo_c, Kp_MA):
h = [] # lista com as distancias ate os polos
c = [] # lista com as distancias ate os zeros
# calcula distancias entre os polos e os polos dominantes
for k in range(len(polos_MA)):
h.append(np.sqrt((abs(polesDominant[0].imag)-abs(polos_MA[k].imag))**2 + (abs(polesDominant[0].real) - abs(polos_MA[k].real))**2))
h.append(np.sqrt((abs(polesDominant[0].imag) - abs(polo_c.imag))**2 + (abs(polesDominant[0].real) - abs(polo_c.real))**2))
# calcula distancias entre os zeros e os polos dominantes
for k in range(len(zeros_MA)):
c.append(np.sqrt((abs(polesDominant[0].imag)-abs(zeros_MA[k].imag))**2 + (abs(polesDominant[0].real) - abs(zeros_MA[k].real))**2))
c.append(np.sqrt((abs(polesDominant[0].imag) - abs(zero_c.imag))**2 + (abs(polesDominant[0].real) - abs(zero_c.real))**2))
# calcula o ganho do controlador
Kc = np.prod(h) / (np.prod(c)*Kp_MA)
#print(f"Kc = {Kc}")
return Kc
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Funções para o controlador por resposta em frequencia
def get_kc_rf(e_esp, Kp_MA):
# Determinando ganho do compensador, Kc
Kv_min = 1 / e_esp
Kp = Kp_MA
Kc = Kv_min / Kp
#print(f"Kc = {Kc}")
return Kc
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Funções para o controlador em avanço por resposta em frequencia
def get_a_av(phiMax, deg=True):
if deg:
return (1-np.sin(np.radians(phiMax))) / (1+np.sin(np.radians(phiMax)))
else:
return (1-np.sin(phiMax)) / (1+np.sin(phiMax))
def get_Wm(Kc,a,mag_MA, wout_MA):
C_jwm = 20 * np.log10(Kc/np.sqrt(a)) # em Db
magDb = 20 * np.log10(mag_MA)
# % Lugar em que cruzar pela reta [-C_jwm -C_jwm] é referente a frequencia Wm
# % encontra o ponto de cruzamento
magDbLoc = np.where(magDb >= -float(abs(C_jwm)))[-1][-1]
Wm = wout_MA[magDbLoc]
#print(f"C(jWm) = {C_jwm}")
#print(f"Wm = {Wm}")
return [C_jwm, Wm ]
def get_T_av(a, Wm):
return 1 /(np.sqrt(a)*Wm)
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Funções para o controlador em atraso por resposta em frequencia
def get_Wcd(phase_MA, MFd, MFseg, wout_MA):
phaseLoc = np.where(180+np.degrees(phase_MA) >= (MFseg+MFd))[-1][0] - 1 #primeiro seleciona o array (só retorna 1), depois seleciona qual item (0 ==first, -1 == last)
Wcd = wout_MA[phaseLoc]
#print(f"C(jWm) = {C_jwm}")
#print(f"Wcd = {Wcd}")
return Wcd
def get_a_at(mag_Cat, wout_MA, Wcd):
magDb_Cat = 20*np.log10(mag_Cat)
wLoc = np.where(wout_MA >= Wcd)[-1][-1]-1
KcG_WCD = magDb_Cat[wLoc]
return 10**(abs(KcG_WCD)/20)
def get_T_at(Wcd):
return 10 /(Wcd)
``` |
{
"source": "johnnwallace/PAVE_RC",
"score": 3
} |
#### File: johnnwallace/PAVE_RC/HumPro.py
```python
from machine import Pin, UART
import random
import numpy as np
class HumPro:
def __init__(
self, crespPin, bePin, cmdPin, ctsPin, txPin, rxPin, modeIndPin, buttonPin
):
self.CRESP = Pin(crespPin, Pin.IN) # CRESP pin (FOR INTERRUPT)
self.BE = Pin(bePin, Pin.IN) # BE pin (CAN BE READ THROUGH LSTATUS IF NEEDED)
self.CMD = Pin(cmdPin, Pin.OUT) # CMD pin
self.CTS = Pin(ctsPin, Pin.IN) # CTS pin
self.TX = Pin(txPin, Pin.OUT) # TX pin
self.RX = Pin(rxPin, Pin.IN) # RX pin
self.MODE_IND = Pin(modeIndPin, Pin.IN) # MODE_IND pin
self.BUTTON = Pin(buttonPin, Pin.IN) # button pin
self.uart = UART(1, 9600, tx=self.TX, rx=self.RX) # initialize UART
# attach interrupt handlers
self.CRESP.irq(trigger=Pin.IRQ_RISING, handler=self.readData)
self.BUTTON.irq(trigger=Pin.IRQ_RISING, handler=self.transmitRandNumber)
# used to configure the HumPRO's settings
def configure(self):
if self.MODE_IND.value == 0:
self.CMD.value(0)
def transmitData(self, data):
if self.CTS.value == 0:
self.CMD.value(1)
self.uart.write(data + "\n") # prints a line of data to HumPRO
self.CMD.value(0)
# hold until HumPRO buffer is empty, indicating all data has been transmitted
while True:
if self.BE.value == 1:
# ADD EEXFLAG REGISTER SAMPLING HERE --> RETURN TRUE IF NO ERRORS, FALSE IF ERRORS
return
# used to read data from the uart connection with the HumPRO
def readData(self):
print(self.uart.readline())
def transmitRandNumber(self):
num = self.generateRandom()
self.transmitData(num)
print(num)
def generateRandom(self):
num = 0
for i in range(10):
num += random.randint(0, 9)
num * -10
return num
```
#### File: johnnwallace/PAVE_RC/throttle.py
```python
import math
import numpy as np
from steering import PIDController
# from steering import PIDController as PID
maxLateral = 20 # assuming maximum lateral acceleration of 20 m/s/s
maxLongitudinal = 30 # assuming maximum longitudinal acceleration of 20 m/s/s
class Circle:
def perpBis(point1, point2):
m1 = -1 / ((point2[1] - point1[1]) / (point2[0] - point1[0]))
mp1 = (points[:, 0] + points[:, 1]) / 2
b1 = mp1[1] - m1 * mp1[0]
return m1, b1
def getCircle(points):
# find 2 perpendicular bisectors
# check if y1 == y0 or y2 == y0 or x1 == x0 or x2 == x0
if (
np.array_equal(points[:, 0], points[:, 1])
or np.array_equal(points[:, 0], points[:, 2])
or np.array_equal(points[:, 1], points[:, 2])
):
raise ValueError("Two or more points are the same")
# find intersection of 2 perpendicular bisectors, considering cases when slope = 0 or is undefined
if points[1, 0] == points[1, 1]:
xIntersect = (points[0, 0] + points[0, 1]) / 2
elif points[1, 0] == points[1, 2]:
xIntersect = (points[0, 0] + points[0, 2]) / 2
else:
m1, b1 = Circle.perpBis(points[:, 0], points[:, 1])
m2, b2 = Circle.perpBis(points[:, 0], points[:, 2])
xIntersect = (b2 - b1) / (m1 - m2)
if points[1, 0] == points[1, 1]:
yIntersect = (points[1, 0] + points[1, 1]) / 2
elif points[1, 0] == points[1, 2]:
yIntersect = (points[1, 0] + points[1, 2]) / 2
else:
m1, b1 = Circle.perpBis(points[:, 0], points[:, 1])
m2, b2 = Circle.perpBis(points[:, 0], points[:, 2])
yIntersect = (b2 - b1) / (m1 - m2)
radius = math.sqrt(
(points[0, 2] - xIntersect) ** 2 + (points[1, 2] - yIntersect) ** 2
)
return (xIntersect, yIntersect, radius)
def getCentripetal(
points, velocity
): # get centripetal acceleration from points given a velocity
radius = Circle.getCircle(points)[2]
return velocity * velocity / radius
def getVelo(
points, accel
): # get velocity from points given a centripetal acceleration
radius = Circle.getCircle(points)[2]
return math.sqrt(accel * radius)
class Throttle:
def __init__(self, maxLat, maxLon, points, lookAheadTime, Kp, Ki, Kd):
self.maxLat = maxLat # maximum lateral acceleration before capsizing
self.maxLon = maxLon # maximum longitudinal acceleration
self.points = points
self.lookAheadTime = lookAheadTime
self.current = 0
self.controller = PID(0, Kp, Ki, Kd)
def getAccel(self, velocity):
lookAheadDist = velocity * self.lookAheadTime
return Circle.getCentripetal(
self.points[:, lookAheadDist * 10 : lookAheadDist * 10 + 2]
) # multiply lookAheadDist by 10 because path point spacing is 10 cm. This can be changed to a variable in a path class, for example.
def getMaxVelo(self):
lookAheadDist = velocity * self.lookAheadTime
return Circle.getVelo(
self.points[:, lookAheadDist * 10 : lookAheadDist * 10 + 2]
)
def update(self, accel, velo, dt):
actualVelo = (
accel * self.lookAheadTime + velo
) # predict velocity at lookahead point
desiredVelo = self.getMaxVelo(
self.maxLat
) # get maximum velocity before capsizing at lookahead distance
self.controller.updateError(actualVelo, dt)
self.controller.updateSetpoint(desiredVelo)
return self.controller.evaluate()
points = np.array([(-5, 0, 5), (0, -5, 0)])
velocity = 10
print(Circle.getCentripetal(points, velocity))
``` |
{
"source": "JohnNWarila/apex",
"score": 2
} |
#### File: apex/cassie/dynamics_random.py
```python
from .cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis
from .trajectory import CassieTrajectory
from math import floor
import numpy as np
import os
import random
import pickle
class CassieEnv_rand_dyn:
def __init__(self, traj, simrate=60, clock_based=False, state_est=False):
self.sim = CassieSim("./cassie/cassiemujoco/cassie.xml")
self.vis = None
self.clock_based = clock_based
self.state_est = state_est
if clock_based:
self.observation_space = np.zeros(42)
if self.state_est:
self.observation_space = np.zeros(48) # Size for use with state est
else:
self.observation_space = np.zeros(80)
if self.state_est:
self.observation_space = np.zeros(86) # Size for use with state est
self.action_space = np.zeros(10)
dirname = os.path.dirname(__file__)
if traj == "walking":
traj_path = os.path.join(dirname, "trajectory", "stepdata.bin")
elif traj == "stepping":
traj_path = os.path.join(dirname, "trajectory", "more-poses-trial.bin")
self.trajectory = CassieTrajectory(traj_path)
self.P = np.array([100, 100, 88, 96, 50])
self.D = np.array([10.0, 10.0, 8.0, 9.6, 5.0])
self.u = pd_in_t()
# TODO: should probably initialize this to current state
self.cassie_state = state_out_t()
self.simrate = simrate # simulate X mujoco steps with same pd target
# 60 brings simulation from 2000Hz to roughly 30Hz
self.time = 0 # number of time steps in current episode
self.phase = 0 # portion of the phase the robot is in
self.counter = 0 # number of phase cycles completed in episode
# NOTE: a reference trajectory represents ONE phase cycle
# should be floor(len(traj) / simrate) - 1
# should be VERY cautious here because wrapping around trajectory
# badly can cause assymetrical/bad gaits
self.phaselen = floor(len(self.trajectory) / self.simrate) - 1
# see include/cassiemujoco.h for meaning of these indices
self.pos_idx = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34]
self.vel_idx = [6, 7, 8, 12, 18, 19, 20, 21, 25, 31]
self.speed = 0
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase)
self.prev_action = ref_pos[self.pos_idx]
self.phase_add = 1
# Record default dynamics parameters
self.default_damping = self.sim.get_dof_damping()
self.default_mass = self.sim.get_body_mass()
self.default_ipos = self.sim.get_body_ipos()
#print(self.default_damping)
#print(self.default_mass)
#print(self.default_ipos)
#input()
def step_simulation(self, action):
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase + self.phase_add)
target = action + ref_pos[self.pos_idx]
self.u = pd_in_t()
for i in range(5):
# TODO: move setting gains out of the loop?
# maybe write a wrapper for pd_in_t ?
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0 # Feedforward torque
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.cassie_state = self.sim.step_pd(self.u)
def step(self, action):
for _ in range(self.simrate):
self.step_simulation(action)
height = self.sim.qpos()[2]
self.time += 1
self.phase += self.phase_add
if self.phase > self.phaselen:
self.phase = 0
self.counter += 1
# Early termination
done = not(height > 0.4 and height < 3.0)
reward = self.compute_reward()
# TODO: make 0.3 a variable/more transparent
if reward < 0.3:
done = True
return self.get_full_state(), reward, done, {}
def reset(self, randomize=True):
# Randomize dynamics:
if randomize:
damp = self.default_damping
weak_factor = 1
strong_factor = 1
pelvis_damp_range = [[damp[0], damp[0]], [damp[1], damp[1]], [damp[2], damp[2]], [damp[3], damp[3]], [damp[4], damp[4]], [damp[5], damp[5]]] # 0->5
hip_damp_range = [[damp[6]/weak_factor, damp[6]*weak_factor], [damp[7]/weak_factor, damp[7]*weak_factor], [damp[8]/weak_factor, damp[8]*weak_factor]] # 6->8 and 19->21
achilles_damp_range = [[damp[9]/weak_factor, damp[9]*weak_factor], [damp[10]/weak_factor, damp[10]*weak_factor], [damp[11]/weak_factor, damp[11]*weak_factor]] # 9->11 and 22->24
knee_damp_range = [[damp[12]/weak_factor, damp[12]*weak_factor]] # 12 and 25
shin_damp_range = [[damp[13]/weak_factor, damp[13]*weak_factor]] # 13 and 26
tarsus_damp_range = [[damp[14], damp[14]*strong_factor]] # 14 and 27
heel_damp_range = [[damp[15], damp[15]]] # 15 and 28
fcrank_damp_range = [[damp[16]/weak_factor, damp[16]*weak_factor]] # 16 and 29
prod_damp_range = [[damp[17], damp[17]]] # 17 and 30
foot_damp_range = [[damp[18]/weak_factor, damp[18]*weak_factor]] # 18 and 31
side_damp = hip_damp_range + achilles_damp_range + knee_damp_range + shin_damp_range + tarsus_damp_range + heel_damp_range + fcrank_damp_range + prod_damp_range + foot_damp_range
damp_range = pelvis_damp_range + side_damp + side_damp
damp_noise = [np.random.uniform(a, b) for a, b in damp_range]
print(damp_noise - self.default_damping)
#nbody layout:
# 0: worldbody (zero)
# 1: pelvis
# 2: left hip roll
# 3: left hip yaw
# 4: left hip pitch
# 5: left achilles rod
# 6: left knee
# 7: left knee spring
# 8: left shin
# 9: left tarsus
# 10: left heel spring
# 12: left foot crank
# 12: left plantar rod
# 13: left foot
# 14: right hip roll
# 15: right hip yaw
# 16: right hip pitch
# 17: right achilles rod
# 18: right knee
# 19: right knee spring
# 20: right shin
# 21: right tarsus
# 22: right heel spring
# 23: right foot crank
# 24: right plantar rod
# 25: right foot
hi = 1.2
lo = 0.8
m = self.default_mass
pelvis_mass_range = [[lo*m[1], hi*m[1]]] # 1
hip_mass_range = [[lo*m[2], hi*m[2]], # 2->4 and 14->16
[lo*m[3], hi*m[3]],
[lo*m[4], hi*m[4]]]
achilles_mass_range = [[lo*m[5], hi*m[5]]] # 5 and 17
knee_mass_range = [[lo*m[6], hi*m[6]]] # 6 and 18
knee_spring_mass_range = [[lo*m[7], hi*m[7]]] # 7 and 19
shin_mass_range = [[lo*m[8], hi*m[8]]] # 8 and 20
tarsus_mass_range = [[lo*m[9], hi*m[9]]] # 9 and 21
heel_spring_mass_range = [[lo*m[10], hi*m[10]]] # 10 and 22
fcrank_mass_range = [[lo*m[11], hi*m[11]]] # 11 and 23
prod_mass_range = [[lo*m[12], hi*m[12]]] # 12 and 24
foot_mass_range = [[lo*m[13], hi*m[13]]] # 13 and 25
side_mass = hip_mass_range + achilles_mass_range + knee_mass_range + knee_spring_mass_range + shin_mass_range + tarsus_mass_range + heel_spring_mass_range + fcrank_mass_range + prod_mass_range + foot_mass_range
mass_range = [[0, 0]] + pelvis_mass_range + side_mass + side_mass
mass_noise = [np.random.uniform(a, b) for a, b in mass_range]
delta = 0.001
com_noise = [0, 0, 0] + [self.default_ipos[i] + np.random.uniform(-delta, delta) for i in range(3, len(self.default_ipos))]
"""
pelvis_com_range = [[0.05066, 0.05066], [0.000346, 0.000346], [0.02841, 0.02841]] # 3->5
left_hip_com_range = [[-0.01793, -0.01793], [0.0001, 0.0001], [-0.04428, -0.04428], [0.0, 0.0], [-1e-5, -1e-5], [-0.034277, -0.034277], [0.05946, 0.05946], [0.00005, 0.00005], [-0.03581, -0.03581]] # 6->14
right_hip_com_range = [[-0.01793, -0.01793], [0.0001, 0.0001], [-0.04428, -0.04428], [0.0, 0.0], [ 1e-5, 1e-5], [-0.034277, -0.034277], [0.05946, 0.05946], [0.00005, 0.00005], [ 0.03581, 0.03581]] # 42->50
achilles_com_range = [[0.24719, 0.24719], [0.0, 0.0], [0.0, 0.0]] # 15->17 and 51->53
left_knee_com_range = [[0.023, 0.023], [0.03207, 0.03207], [-0.002181, -0.002181]] # 18->20
right_knee_com_range = [[0.023, 0.023], [0.03207, 0.03207], [ 0.002181, 0.002181]] # 54->56
knee_spring_com_range = [[0.0836, 0.0836], [0.0034, 0.0034], [0.0, 0.0]] # 21->23 and 57->59
left_shin_com_range = [[0.18338, 0.18338], [0.001169, 0.001169], [ 0.0002123, 0.0002123]] # 24->26
right_shin_com_range = [[0.18338, 0.18338], [0.001169, 0.001169], [-0.0002123, -0.0002123]] # 60->62
left_tarsus_com_range = [[0.11046, 0.11046], [-0.03058, -0.03058], [-0.00131, -0.00131]] # 27->29
right_tarsus_com_range = [[0.11046, 0.11046], [-0.03058, -0.03058], [ 0.00131, 0.00131]] # 63->65
heel_com_range = [[0.081, 0.081], [0.0022, 0.0022], [0.0, 0.0]] # 30->32 and 66->68
left_fcrank_com_range = [[0.00493, 0.00493], [0.00002, 0.00002], [-0.00215, -0.00215]] # 33->35 and 69->71
right_fcrank_com_range = [[0.00493, 0.00493], [0.00002, 0.00002], [ 0.00215, 0.00215]] # 33->35 and 69->71
prod_com_range = [[0.17792, 0.17792], [0.0, 0.0], [0.0, 0.0]] # 36->38 and 72->74
left_foot_com_range = [[0.00474, 0.00474], [0.02748, 0.02748], [-0.00014, -0.00014]] # 39->41 and 75->77
right_foot_com_range = [[0.00474, 0.00474], [0.02748, 0.02748], [ 0.00014, 0.00014]] # 39->41 and 75->77
left_com = left_hip_com_range + achilles_com_range + left_knee_com_range + knee_spring_com_range + left_shin_com_range + left_tarsus_com_range + heel_com_range + left_fcrank_com_range + prod_com_range + left_foot_com_range
right_com = right_hip_com_range + achilles_com_range + right_knee_com_range + knee_spring_com_range + right_shin_com_range + right_tarsus_com_range + heel_com_range + right_fcrank_com_range + prod_com_range + right_foot_com_range
com_range = [[0, 0], [0, 0], [0, 0]] + pelvis_com_range + left_com + right_com
com_noise = [np.random.uniform(a, b) for a, b in com_range]
"""
self.sim.set_dof_damping(np.clip(damp_noise, 0, None))
self.sim.set_body_mass(np.clip(mass_noise, 0, None))
self.sim.set_body_ipos(np.clip(com_noise, 0, None))
self.phase = random.randint(0, self.phaselen)
self.time = 0
self.counter = 0
qpos, qvel = self.get_ref_state(self.phase)
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
# Need to reset u? Or better way to reset cassie_state than taking step
self.cassie_state = self.sim.step_pd(self.u)
self.speed = (random.randint(0, 10)) / 10
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase)
self.prev_action = ref_pos[self.pos_idx]
return self.get_full_state()
# used for plotting against the reference trajectory
def reset_for_test(self):
self.phase = 0
self.time = 0
self.counter = 0
qpos, qvel = self.get_ref_state(self.phase)
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase)
self.prev_action = ref_pos[self.pos_idx]
# Need to reset u? Or better way to reset cassie_state than taking step
self.cassie_state = self.sim.step_pd(self.u)
return self.get_full_state()
def set_joint_pos(self, jpos, fbpos=None, iters=5000):
"""
Kind of hackish.
This takes a floating base position and some joint positions
and abuses the MuJoCo solver to get the constrained forward
kinematics.
There might be a better way to do this, e.g. using mj_kinematics
"""
# actuated joint indices
joint_idx = [7, 8, 9, 14, 20,
21, 22, 23, 28, 34]
# floating base indices
fb_idx = [0, 1, 2, 3, 4, 5, 6]
for _ in range(iters):
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
qpos[joint_idx] = jpos
if fbpos is not None:
qpos[fb_idx] = fbpos
self.sim.set_qpos(qpos)
self.sim.set_qvel(0 * qvel)
self.sim.step_pd(pd_in_t())
# NOTE: this reward is slightly different from the one in Xie et al
# see notes for details
def compute_reward(self):
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
ref_pos, ref_vel = self.get_ref_state(self.phase)
# TODO: should be variable; where do these come from?
# TODO: see magnitude of state variables to gauge contribution to reward
weight = [0.15, 0.15, 0.1, 0.05, 0.05, 0.15, 0.15, 0.1, 0.05, 0.05]
joint_error = 0
com_error = 0
orientation_error = 0
spring_error = 0
# each joint pos
for i, j in enumerate(self.pos_idx):
target = ref_pos[j]
actual = qpos[j]
joint_error += 30 * weight[i] * (target - actual) ** 2
# center of mass: x, y, z
for j in [0, 1, 2]:
target = ref_pos[j]
actual = qpos[j]
# NOTE: in Xie et al y target is 0
com_error += (target - actual) ** 2
# COM orientation: qx, qy, qz
for j in [4, 5, 6]:
target = ref_pos[j] # NOTE: in Xie et al orientation target is 0
actual = qpos[j]
orientation_error += (target - actual) ** 2
# left and right shin springs
for i in [15, 29]:
target = ref_pos[i] # NOTE: in Xie et al spring target is 0
actual = qpos[i]
spring_error += 1000 * (target - actual) ** 2
reward = 0.5 * np.exp(-joint_error) + \
0.3 * np.exp(-com_error) + \
0.1 * np.exp(-orientation_error) + \
0.1 * np.exp(-spring_error)
# reward = np.sign(qvel[0])*qvel[0]**2
# desired_speed = 3.0
# speed_diff = np.abs(qvel[0] - desired_speed)
# if speed_diff > 1:
# speed_diff = speed_diff**2
# reward = 20 - speed_diff
return reward
# get the corresponding state from the reference trajectory for the current phase
def get_ref_state(self, phase=None):
if phase is None:
phase = self.phase
if phase > self.phaselen:
phase = 0
pos = np.copy(self.trajectory.qpos[phase * self.simrate])
# this is just setting the x to where it "should" be given the number
# of cycles
# pos[0] += (self.trajectory.qpos[-1, 0] - self.trajectory.qpos[0, 0]) * self.counter
# ^ should only matter for COM error calculation,
# gets dropped out of state variable for input reasons
###### Setting variable speed #########
pos[0] *= self.speed
pos[0] += (self.trajectory.qpos[-1, 0]- self.trajectory.qpos[0, 0])* self.counter * self.speed
###### ########
# setting lateral distance target to 0?
# regardless of reference trajectory?
pos[1] = 0
vel = np.copy(self.trajectory.qvel[phase * self.simrate])
vel[0] *= self.speed
return pos, vel
def get_full_state(self):
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
ref_pos, ref_vel = self.get_ref_state(self.phase + self.phase_add)
# TODO: maybe convert to set subtraction for clarity
# {i for i in range(35)} -
# {0, 10, 11, 12, 13, 17, 18, 19, 24, 25, 26, 27, 31, 32, 33}
# this is everything except pelvis x and qw, achilles rod quaternions,
# and heel spring/foot crank/plantar rod angles
# note: x is forward dist, y is lateral dist, z is height
# makes sense to always exclude x because it is in global coordinates and
# irrelevant to phase-based control. Z is inherently invariant to
# trajectory despite being global coord. Y is only invariant to straight
# line trajectories.
# [ 0] Pelvis y
# [ 1] Pelvis z
# [ 2] Pelvis orientation qw
# [ 3] Pelvis orientation qx
# [ 4] Pelvis orientation qy
# [ 5] Pelvis orientation qz
# [ 6] Left hip roll (Motor [0])
# [ 7] Left hip yaw (Motor [1])
# [ 8] Left hip pitch (Motor [2])
# [ 9] Left knee (Motor [3])
# [10] Left shin (Joint [0])
# [11] Left tarsus (Joint [1])
# [12] Left foot (Motor [4], Joint [2])
# [13] Right hip roll (Motor [5])
# [14] Right hip yaw (Motor [6])
# [15] Right hip pitch (Motor [7])
# [16] Right knee (Motor [8])
# [17] Right shin (Joint [3])
# [18] Right tarsus (Joint [4])
# [19] Right foot (Motor [9], Joint [5])
pos_index = np.array([1,2,3,4,5,6,7,8,9,14,15,16,20,21,22,23,28,29,30,34])
# [ 0] Pelvis x
# [ 1] Pelvis y
# [ 2] Pelvis z
# [ 3] Pelvis orientation wx
# [ 4] Pelvis orientation wy
# [ 5] Pelvis orientation wz
# [ 6] Left hip roll (Motor [0])
# [ 7] Left hip yaw (Motor [1])
# [ 8] Left hip pitch (Motor [2])
# [ 9] Left knee (Motor [3])
# [10] Left shin (Joint [0])
# [11] Left tarsus (Joint [1])
# [12] Left foot (Motor [4], Joint [2])
# [13] Right hip roll (Motor [5])
# [14] Right hip yaw (Motor [6])
# [15] Right hip pitch (Motor [7])
# [16] Right knee (Motor [8])
# [17] Right shin (Joint [3])
# [18] Right tarsus (Joint [4])
# [19] Right foot (Motor [9], Joint [5])
vel_index = np.array([0,1,2,3,4,5,6,7,8,12,13,14,18,19,20,21,25,26,27,31])
if self.clock_based:
#qpos[self.pos_idx] -= ref_pos[self.pos_idx]
#qvel[self.vel_idx] -= ref_vel[self.vel_idx]
clock = [np.sin(2 * np.pi * self.phase / self.phaselen),
np.cos(2 * np.pi * self.phase / self.phaselen)]
ext_state = clock
else:
ext_state = np.concatenate([ref_pos[pos_index], ref_vel[vel_index]])
# Use state estimator
robot_state = np.concatenate([
[self.cassie_state.pelvis.position[2] - self.cassie_state.terrain.height], # pelvis height
self.cassie_state.pelvis.orientation[:], # pelvis orientation
self.cassie_state.motor.position[:], # actuated joint positions
self.cassie_state.pelvis.translationalVelocity[:], # pelvis translational velocity
self.cassie_state.pelvis.rotationalVelocity[:], # pelvis rotational velocity
self.cassie_state.motor.velocity[:], # actuated joint velocities
self.cassie_state.pelvis.translationalAcceleration[:], # pelvis translational acceleration
self.cassie_state.joint.position[:], # unactuated joint positions
self.cassie_state.joint.velocity[:] # unactuated joint velocities
])
if self.state_est:
return np.concatenate([robot_state,
ext_state])
else:
return np.concatenate([qpos[pos_index],
qvel[vel_index],
ext_state])
def render(self):
if self.vis is None:
self.vis = CassieVis(self.sim, "./cassie/cassiemujoco/cassie.xml")
return self.vis.draw(self.sim)
```
#### File: apex/deprecated/logging.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from functools import partial
import os.path as osp, shutil, time, atexit, os, subprocess, hashlib, sys
import configparser
from collections import OrderedDict
import numpy as np
import ray
matplotlib.rcParams.update({'font.size': 8})
#from scipy.signal import medfilt
class Logger():
def __init__(self, args, env_name, viz=True, viz_list=[]):
self.ansi = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
self.name = args.name
self.viz_list = ["all"]
self.args = args
if viz:
from visdom import Visdom
self.viz = Visdom(port=args.viz_port)
self.env = env_name
self.plots = {}
else:
self.viz = None
self.output_dir = self._get_directory(args)
self.init = True
self.header = []
self.current_row = {}
def _get_directory(self, args):
"""Use hyperparms to set a directory to output diagnostic files."""
#get hyperparameters as dictionary
arg_dict = args.__dict__
assert "seed" in arg_dict, \
"You must provide a 'seed' key in your command line arguments"
assert "logdir" in arg_dict, \
"You must provide a 'logdir' key in your command line arguments."
#sort the keys so the same hyperparameters will always have the same hash
arg_dict = OrderedDict(sorted(arg_dict.items(), key=lambda t: t[0]))
#remove seed so it doesn't get hashed, store value for filename
# same for logging directory
seed = str(arg_dict.pop("seed"))
logdir = str(arg_dict.pop('logdir'))
# get a unique hash for the hyperparameter settings, truncated at 10 chars
arg_hash = hashlib.md5(str(arg_dict).encode('ascii')).hexdigest()[0:10]
output_dir = osp.join(logdir, arg_hash)
# create a directory with the hyperparm hash as its name, if it doesn't
# already exist.
os.makedirs(output_dir, exist_ok=True)
# create a file for this seed, this is where output will be logged
filename = "seed" + seed + ".log"
# currently logged-to directories will be pre-pended with "ACTIVE_"
active_path = osp.join(output_dir, filename)
# Create a file with all the hyperparam settings in plaintext
info_path = osp.join(output_dir, "experiment.info")
self._generate_info_file(open(info_path, 'w'), arg_dict)
print(self._colorize("Logging data to %s" % active_path,
'green', bold=True))
return active_path
def record(self, key, val, x_val, title_name, x_var_name="Timesteps", split_name="train"):
"""
Log some diagnostic value in current iteration.
Call this exactly once for each diagnostic, every iteration
"""
if self.init:
self.header.append(key)
# if self.viz is not None:
# self.wins.append(None)
else:
assert key in self.header, \
"Key %s not in header. All keys must be set in first iteration" % key
assert key not in self.current_row, \
"You already set key %s this iteration. Did you forget to call dump()?" % key
self.current_row[key] = val
if self.viz is not None:
self.plot(key, x_var_name, split_name, title_name, x_val, val)
def dump(self):
"""Write all of the diagnostics from the current iteration"""
vals = []
sys.stdout.write("-" * 37 + "\n")
for key in self.header:
val = self.current_row.get(key, "")
if hasattr(val, "__float__"):
valstr = "%8.3g" % val
else:
valstr = val
sys.stdout.write("| %15s | %15s |" % (key, valstr) + "\n")
vals.append(float(val))
sys.stdout.write("-" * 37 + "\n")
sys.stdout.flush()
output_file = None
if self.init:
output_file = open(self.output_dir, "w")
output_file.write("\t".join(self.header))
output_file.write("\n")
else:
output_file = open(self.output_dir, "a")
output_file.write("\t".join(map(str, vals)))
output_file.write("\n")
output_file.flush()
output_file.close()
self.current_row.clear()
self.init = False
# if self.viz is not None:
# self.plot()
def config_monitor(self, config_path=None):
if config_path is None:
config_path = os.path.join(os.path.dirname(__file__), "../config/monitor.ini")
config = configparser.ConfigParser()
config.read(config_path)
return config["monitor"]
def plot(self, var_name, x_var_name, split_name, title_name, x, y):
if var_name not in self.plots:
self.plots[var_name] = self.viz.line(X=np.array([x,x]), Y=np.array([y,y]), env=self.env, opts=dict(
legend=[split_name],
title=title_name,
xlabel=x_var_name,
ylabel=var_name
))
else:
self.viz.line(X=np.array([x]), Y=np.array([y]), env=self.env, win=self.plots[var_name], name=split_name, update = 'append')
def _load_data(self):
log_file = open(self.output_dir, 'r')
header = log_file.readline().rstrip('\n').split('\t')
data = []
for line in log_file:
vals = line.rstrip('\n').split('\t')
vals = [float(val) for val in vals]
data.append(vals)
data = np.array(data)
log_file.close()
return data, header
def _generate_info_file(self, file, arg_dict):
for key, val in arg_dict.items():
file.write("%s: %s" % (key, val))
file.write('\n')
def _colorize(self, string, color, bold=False, highlight=False):
"""Format string to print with color 'color' if printed to unix terminal."""
attr = []
num = self.ansi[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
```
#### File: deprecated/tests/test_ppo.py
```python
from collections import defaultdict
import pytest
import numpy as np
import torch
from rl.policies import GaussianMLP
from rl.algos import PPO
class SampleTesterEnv:
def __init__(self, obs_dim, action_dim, done_state=10, gamma=0.99):
"""
A simple environment that unit tests whether or the
experience buffer and trajectory sampling code are
producing the correct output. This is to test for things
like off-by-one errors in the experience buffers or
reward-to-go calculations.
In other words:
Makes sure the "experience table" is of the form:
--------------------
s_0 | a_0 | r_0
--------------------
. . .
. . .
. . .
--------------------
s_T | a_T | r_T
--------------------
s_T+1 | |
--------------------
where entries are defined by the MDP transitions:
s_0 -> (s_1, a_0, r_0) -> ... -> (s_T+1, a_T, r_T)
"""
self.observation_space = np.zeros(obs_dim)
self.action_space = np.zeros(action_dim)
self.state = 0
self.obs_dim = obs_dim
# TODO: add GAE support?
self.gamma = gamma
self.done = done_state
self.actions = []
def step(self, action):
self.state += 1
output = np.ones(shape=(1, self.obs_dim)) * self.state
done = (self.state % self.done) == 0
#print("real: ", done)
# the first reward corresponds to the second state
reward = np.ones(shape=(1, 1)) * (self.state - 1)
self.actions.append(action.squeeze(0)) # TODO
return output, reward, done, None
def reset(self):
self.state = 0
output = np.ones(shape=(1, self.obs_dim)) * self.state
return output
@pytest.mark.parametrize("num_steps, obs_dim, action_dim", [
(5, 1, 1),
(10, 1, 1),
(25, 1, 1),
(10, 80, 10),
(30, 80, 10),
(35, 80, 10)
])
def test_ppo_sample(num_steps, obs_dim, action_dim):
# useful for debugging
np.set_printoptions(threshold=10000)
torch.set_printoptions(threshold=10000)
# TODO: test value est bootstrap for truncated trajectories
gamma = 0.99
env = SampleTesterEnv(obs_dim=obs_dim, action_dim=action_dim, gamma=gamma)
policy = GaussianMLP(obs_dim, action_dim)
# don't need to specify args that don't affect ppo.sample()
args = defaultdict(lambda: None, {'gamma': gamma})
algo = PPO(args)
memory = algo.sample(env, policy, num_steps, 100)
states, actions, rewards, returns = map(torch.Tensor,
(memory.states, memory.actions, memory.rewards, memory.returns)
)
num_steps = states.shape[0]
assert states.shape == (num_steps, obs_dim)
assert actions.shape == (num_steps, action_dim)
assert rewards.shape == (num_steps, 1)
assert rewards.shape == (num_steps, 1)
expected_states = np.array([(np.ones(shape=(obs_dim,)) * (s % env.done)) for s in range(num_steps)])
assert np.allclose(states, expected_states)
expected_rewards = np.array([(np.ones(shape=(1)) * (s % env.done)) for s in range(num_steps)])
assert np.allclose(rewards, expected_rewards)
expected_actions = np.array(env.actions)
assert np.allclose(actions, expected_actions)
expected_returns, R = [], 0
for r in reversed(expected_rewards):
R = R * gamma + r
expected_returns.insert(0, R.copy())
if r == 0: # this only happens on initial state, so restart the return
R = 0
expected_returns = np.array(expected_returns)
assert np.allclose(returns, expected_returns)
@pytest.mark.parametrize("num_steps, obs_dim, action_dim", [
(5, 1, 1),
(10, 1, 1),
(25, 1, 1),
(10, 80, 10),
(30, 80, 10),
(35, 80, 10)
])
def test_ppo_sample_parallel(num_steps, obs_dim, action_dim):
# useful for debugging
np.set_printoptions(threshold=10000)
torch.set_printoptions(threshold=10000)
# TODO: test value est bootstrap for truncated trajectories
gamma = 0.99
from functools import partial
env = SampleTesterEnv(obs_dim=obs_dim, action_dim=action_dim, gamma=gamma)
env_fn = partial(
SampleTesterEnv,
obs_dim=obs_dim,
action_dim=action_dim,
gamma=gamma
)
policy = GaussianMLP(obs_dim, action_dim)
# don't need to specify args that don't affect ppo.sample()
args = defaultdict(lambda: None, {'gamma': gamma, 'num_procs': 4})
algo = PPO(args)
memory = algo.sample_parallel(env_fn, policy, num_steps, 100)
expected_memory = algo.sample(env, policy, 40, 100)
#breakpoint()
assert np.allclose(memory.states, expected_memory.states)
#assert np.allclose(memory.actions, expected_memory.actions)
assert np.allclose(memory.rewards, expected_memory.rewards)
#assert np.allclose(memory.returns, expected_memory.returns)
assert np.allclose(memory.returns, expected_memory.returns)
assert np.allclose(memory.ep_returns, expected_memory.ep_returns)
assert np.allclose(memory.ep_lens, expected_memory.ep_lens)
test_ppo_sample_parallel(5, 1, 1)
```
#### File: rl/envs/wrappers.py
```python
import numpy as np
import torch
# Gives a vectorized interface to a single environment
class WrapEnv:
def __init__(self, env_fn):
self.env = env_fn()
def __getattr__(self, attr):
return getattr(self.env, attr)
def step(self, action):
state, reward, done, info = self.env.step(action[0])
return np.array([state]), np.array([reward]), np.array([done]), np.array([info])
def render(self):
self.env.render()
def reset(self):
return np.array([self.env.reset()])
# TODO: this is probably a better case for inheritance than for a wrapper
# Gives an interface to exploit mirror symmetry
class SymmetricEnv:
def __init__(self, env_fn, mirrored_obs=None, mirrored_act=None, obs_fn=None, act_fn=None):
assert (bool(mirrored_act) ^ bool(act_fn)) and (bool(mirrored_obs) ^ bool(obs_fn)), \
"You must provide either mirror indices or a mirror function, but not both, for \
observation and action."
if mirrored_act:
self.act_mirror_matrix = torch.Tensor(_get_symmetry_matrix(mirrored_act))
elif act_fn:
assert callable(act_fn), "Action mirror function must be callable"
self.mirror_action = act_fn
if mirrored_obs:
self.obs_mirror_matrix = torch.Tensor(_get_symmetry_matrix(mirrored_obs))
elif obs_fn:
assert callable(obs_fn), "Observation mirror function must be callable"
self.mirror_observation = obs_fn
self.env = env_fn()
def __getattr__(self, attr):
return getattr(self.env, attr)
def mirror_action(self, action):
return action @ self.act_mirror_matrix
def mirror_observation(self, obs):
return obs @ self.obs_mirror_matrix
# To be used when there is a clock in the observation. In this case, the mirrored_obs vector inputted
# when the SymmeticEnv is created should not move the clock input order. The indices of the obs vector
# where the clocks are located need to be inputted.
def mirror_clock_observation(self, obs, clock_inds):
print(obs.shape)
print(self.obs_mirror_matrix.shape)
mirror_obs = obs @ self.obs_mirror_matrix
clock = mirror_obs[:, self.clock_inds]
# print("clock: ", clock)
for i in range(np.shape(clock)[1]):
mirror_obs[:, clock_inds[i]] = np.sin(np.arcsin(clock[:, i]) + np.pi)
return mirror_obs
def _get_symmetry_matrix(mirrored):
numel = len(mirrored)
mat = np.zeros((numel, numel))
for (i, j) in zip(np.arange(numel), np.abs(np.array(mirrored).astype(int))):
mat[i, j] = np.sign(mirrored[i])
return mat
```
#### File: rl/policies/actor.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import sqrt
from rl.policies.base import Net
LOG_STD_HI = 2
LOG_STD_LO = -20
class Actor(Net):
def __init__(self):
super(Actor, self).__init__()
def forward(self):
raise NotImplementedError
def get_action(self):
raise NotImplementedError
class Linear_Actor(Actor):
def __init__(self, state_dim, action_dim, hidden_size=32):
super(Linear_Actor, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_size)
self.l2 = nn.Linear(hidden_size, action_dim)
self.action_dim = action_dim
for p in self.parameters():
p.data = torch.zeros(p.shape)
def forward(self, state):
a = self.l1(state)
a = self.l2(a)
self.action = a
return a
def get_action(self):
return self.action
# Actor network for gaussian mlp
class Gaussian_FF_Actor(Actor): # more consistent with other actor naming conventions
def __init__(self, state_dim, action_dim, layers=(256, 256), env_name=None, nonlinearity=torch.nn.functional.relu, fixed_std=None, bounded=False, normc_init=True, obs_std=None, obs_mean=None):
super(Gaussian_FF_Actor, self).__init__()
self.actor_layers = nn.ModuleList()
self.actor_layers += [nn.Linear(state_dim, layers[0])]
for i in range(len(layers)-1):
self.actor_layers += [nn.Linear(layers[i], layers[i+1])]
self.means = nn.Linear(layers[-1], action_dim)
if fixed_std is None: # probably don't want to use this for ppo, always use fixed std
self.log_stds = nn.Linear(layers[-1], action_dim)
self.learn_std = True
else:
self.fixed_std = fixed_std
self.learn_std = False
self.action = None
self.action_dim = action_dim
self.env_name = env_name
self.nonlinearity = nonlinearity
self.obs_std = obs_std
self.obs_mean = obs_mean
# weight initialization scheme used in PPO paper experiments
self.normc_init = normc_init
self.bounded = bounded
self.init_parameters()
self.train()
def init_parameters(self):
if self.normc_init:
self.apply(normc_fn)
self.means.weight.data.mul_(0.01)
def _get_dist_params(self, state):
if self.training == False:
state = (state - self.obs_mean) / self.obs_std
x = state
for l in self.actor_layers:
x = self.nonlinearity(l(x))
x = self.means(x)
if self.bounded:
mean = torch.tanh(x)
else:
mean = x
if self.learn_std:
sd = torch.clamp(self.log_stds(x), LOG_STD_LO, LOG_STD_HI).exp()
else:
sd = self.fixed_std
return mean, sd
def forward(self, state, deterministic=True):
mu, sd = self._get_dist_params(state)
if not deterministic:
self.action = torch.distributions.Normal(mu, sd).sample()
else:
self.action = mu
return self.action
def get_action(self):
return self.action
def distribution(self, inputs):
mu, sd = self._get_dist_params(inputs)
return torch.distributions.Normal(mu, sd)
class FF_Actor(Actor):
def __init__(self, state_dim, action_dim, layers=(256, 256), env_name=None, nonlinearity=F.relu, max_action=1):
super(FF_Actor, self).__init__()
self.actor_layers = nn.ModuleList()
self.actor_layers += [nn.Linear(state_dim, layers[0])]
for i in range(len(layers)-1):
self.actor_layers += [nn.Linear(layers[i], layers[i+1])]
self.network_out = nn.Linear(layers[-1], action_dim)
self.action = None
self.action_dim = action_dim
self.env_name = env_name
self.nonlinearity = nonlinearity
self.initialize_parameters()
self.max_action = max_action
def forward(self, state, deterministic=True):
x = state
for idx, layer in enumerate(self.actor_layers):
x = self.nonlinearity(layer(x))
self.action = torch.tanh(self.network_out(x))
return self.action * self.max_action
def get_action(self):
return self.action
class LSTM_Actor(Actor):
def __init__(self, input_dim, action_dim, layers=(128, 128), env_name=None, nonlinearity=torch.tanh, max_action=1):
super(LSTM_Actor, self).__init__()
self.actor_layers = nn.ModuleList()
self.actor_layers += [nn.LSTMCell(state_dim, layers[0])]
for i in range(len(layers)-1):
self.actor_layers += [nn.LSTMCell(layers[i], layers[i+1])]
self.network_out = nn.Linear(layers[i-1], action_dim)
self.action = None
self.action_dim = action_dim
self.init_hidden_state()
self.env_name = env_name
self.nonlinearity = nonlinearity
self.is_recurrent = True
self.max_action = max_action
def get_hidden_state(self):
return self.hidden, self.cells
def set_hidden_state(self, data):
if len(data) != 2:
print("Got invalid hidden state data.")
exit(1)
self.hidden, self.cells = data
def init_hidden_state(self, batch_size=1):
self.hidden = [torch.zeros(batch_size, l.hidden_size) for l in self.actor_layers]
self.cells = [torch.zeros(batch_size, l.hidden_size) for l in self.actor_layers]
def forward(self, x, deterministic=True):
dims = len(x.size())
if dims == 3: # if we get a batch of trajectories
self.init_hidden_state(batch_size=x.size(1))
y = []
for t, x_t in enumerate(x):
for idx, layer in enumerate(self.actor_layers):
c, h = self.cells[idx], self.hidden[idx]
self.hidden[idx], self.cells[idx] = layer(x_t, (h, c))
x_t = self.hidden[idx]
y.append(x_t)
x = torch.stack([x_t for x_t in y])
else:
if dims == 1: # if we get a single timestep (if not, assume we got a batch of single timesteps)
x = x.view(1, -1)
for idx, layer in enumerate(self.actor_layers):
h, c = self.hidden[idx], self.cells[idx]
self.hidden[idx], self.cells[idx] = layer(x, (h, c))
x = self.hidden[idx]
x = self.nonlinearity(self.network_out(x))
if dims == 1:
x = x.view(-1)
self.action = self.network_out(x)
return self.action
def get_action(self):
return self.action
class Gaussian_LSTM_Actor(Actor):
def __init__(self, state_dim, action_dim, layers=(128, 128), env_name=None, nonlinearity=F.tanh, normc_init=False, max_action=1, fixed_std=None):
super(Gaussian_LSTM_Actor, self).__init__()
self.actor_layers = nn.ModuleList()
self.actor_layers += [nn.LSTMCell(state_dim, layers[0])]
for i in range(len(layers)-1):
self.actor_layers += [nn.LSTMCell(layers[i], layers[i+1])]
self.network_out = nn.Linear(layers[i-1], action_dim)
self.action = None
self.action_dim = action_dim
self.init_hidden_state()
self.env_name = env_name
self.nonlinearity = nonlinearity
self.max_action = max_action
self.is_recurrent = True
if fixed_std is None:
self.log_stds = nn.Linear(layers[-1], action_dim)
self.learn_std = True
else:
self.fixed_std = fixed_std
self.learn_std = False
if normc_init:
self.initialize_parameters()
self.act = self.forward
def _get_dist_params(self, state):
if self.training == False:
state = (state - self.obs_mean) / self.obs_std
dims = len(state.size())
x = state
if dims == 3: # if we get a batch of trajectories
self.init_hidden_state(batch_size=x.size(1))
action = []
y = []
for t, x_t in enumerate(x):
for idx, layer in enumerate(self.actor_layers):
c, h = self.cells[idx], self.hidden[idx]
self.hidden[idx], self.cells[idx] = layer(x_t, (h, c))
x_t = self.hidden[idx]
y.append(x_t)
x = torch.stack([x_t for x_t in y])
else:
if dims == 1: # if we get a single timestep (if not, assume we got a batch of single timesteps)
x = x.view(1, -1)
for idx, layer in enumerate(self.actor_layers):
h, c = self.hidden[idx], self.cells[idx]
self.hidden[idx], self.cells[idx] = layer(x, (h, c))
x = self.hidden[idx]
if dims == 1:
x = x.view(-1)
mu = self.network_out(x)
if self.learn_std:
sd = torch.clamp(self.log_stds(x), LOG_STD_LO, LOG_STD_HI).exp()
else:
sd = self.fixed_std
return mu, sd
def init_hidden_state(self, batch_size=1):
self.hidden = [torch.zeros(batch_size, l.hidden_size) for l in self.actor_layers]
self.cells = [torch.zeros(batch_size, l.hidden_size) for l in self.actor_layers]
def forward(self, state, deterministic=True):
mu, sd = self._get_dist_params(state)
if not deterministic:
self.action = torch.distributions.Normal(mu, sd).sample()
else:
self.action = mu
return self.action
def distribution(self, inputs):
mu, sd = self._get_dist_params(inputs)
return torch.distributions.Normal(mu, sd)
def get_action(self):
return self.action
## Initialization scheme for gaussian mlp (from ppo paper)
# NOTE: the fact that this has the same name as a parameter caused a NASTY bug
# apparently "if <function_name>" evaluates to True in python...
def normc_fn(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
GaussianMLP_Actor = Gaussian_FF_Actor # for legacy code compatibility
``` |
{
"source": "johnny161/Text-Clustering",
"score": 3
} |
#### File: Text-Clustering/Kmeans/kmeans_cluster.py
```python
import os, sys
from sklearn.cluster import KMeans
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import silhouette_score
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
'''vectorize the input documents'''
def tfidf_vector(corpus_path):
corpus_train=[]
#
target_train=[]
for line in open(corpus_path):
line = line.strip().split('\t')
if len(line) == 2:
words = line[1]
category = line[0]
target_train.append(category)
corpus_train.append(words)
print ("build train-corpus done!!")
count_v1 = CountVectorizer(max_df = 0.4, min_df = 0.01)
counts_train = count_v1.fit_transform(corpus_train)
word_dict = {}
for index, word in enumerate(count_v1.get_feature_names()):#出现3次以上的关键词
word_dict[index] = word
print ("the shape of train is " + repr(counts_train.shape))
tfidftransformer = TfidfTransformer()
tfidf_train = tfidftransformer.fit_transform(counts_train)
return tfidf_train, word_dict
'''topic cluster'''
def cluster_kmeans(tfidf_train, word_dict, cluster_docs, cluster_keywords, num_cluster):
f_docs = open(cluster_docs, 'w+')
km = KMeans(n_clusters = num_clusters)
km.fit(tfidf_train)
clusters = km.labels_.tolist()
cluster_dict = {}
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
doc = 1
for cluster in clusters:
f_docs.write(str(doc) + ',' + str(cluster) + '\n')
doc += 1
if cluster not in cluster_dict:
cluster_dict[cluster] = 1
else:
cluster_dict[cluster] += 1
f_docs.close()
for idx in range(num_cluster): # 每个聚类的数量
print ("cluster" + str(idx + 1) + ': ' + str(cluster_dict[idx]))
cluster = 1
f_clusterwords = open(cluster_keywords, 'w+')
for ind in order_centroids: # 每个聚类选 50 个词
words = []
for index in ind[:10]:
words.append(word_dict[index])
print (cluster,','.join(words))
f_clusterwords.write(str(cluster) + '\t' + ','.join(words) + '\n')
cluster += 1
print ('*****' * 5)
f_clusterwords.close()
visualization(tfidf_train.toarray(), km.labels_)
'''select the best cluster num'''
def best_kmeans(tfidf_matrix, word_dict):
import matplotlib.pyplot as plt
# from matplotlib.font_manager import FontProperties
from scipy.spatial.distance import cdist
import numpy as np
K = range(1, 50)
meandistortions = []
for k in K:
print (k, '****'*5)
kmeans = KMeans(n_clusters = k)
kmeans.fit(tfidf_matrix)
meandistortions.append(sum(np.min(cdist(tfidf_matrix.toarray(), kmeans.cluster_centers_, 'euclidean'), axis=1)) /\
tfidf_matrix.shape[0])
plt.plot(K, meandistortions, 'bx-')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Average within-cluster sum of squares')
plt.title('Eibow for Kmeans clustering')
plt.show()
'''calculate Silhouette Coefficient'''
def cal_silhouette_coef(tfidf_train):
weight = tfidf_train.toarray()
Scores = []
for k in range(2, 50):
km = KMeans(n_clusters = k)
km.fit(weight)
Scores.append(silhouette_score(weight, km.labels_, metric='euclidean'))
X = range(2, 50)
plt.xlabel('K-value')
plt.ylabel('Silhouette-Coefficient')
plt.plot(X, Scores, 'o-')
plt.show()
'''visualization'''
def visualization(tfidf_train, labels_):
tsne = TSNE(n_components=2)
decomposition_data = tsne.fit_transform(tfidf_train)
x = []
y = []
for i in decomposition_data:
x.append(i[0])
y.append(i[1])
fig = plt.figure(figsize=(10, 10))
ax = plt.axes()
plt.scatter(x, y, c=labels_, marker="x")
plt.title("k = 15")
plt.xticks(())
plt.yticks(())
plt.show()
plt.savefig('./figure/sample.png', aspect=1)
if __name__ == '__main__':
corpus_train = "./corpus_train.txt"
cluster_docs = "./cluster_result_document.txt"
cluster_keywords = "./cluster_result_keyword.txt"
num_clusters = 15
tfidf_train, word_dict = tfidf_vector(corpus_train)
# cal_silhouette_coef(tfidf_train) # judge which K-value to take
# best_kmeans(tfidf_train, word_dict)
cluster_kmeans(tfidf_train, word_dict, cluster_docs, cluster_keywords, num_clusters)
``` |
{
"source": "johnny1up/eng_archive",
"score": 2
} |
#### File: Ska/engarchive/update_server_sync.py
```python
import argparse
import gzip
import pickle
import re
import shutil
from itertools import count
from pathlib import Path
import numpy as np
import pyyaks.context
import pyyaks.logger
import tables
from Chandra.Time import DateTime
from Ska.DBI import DBI
from astropy.table import Table
from . import fetch
from . import file_defs
from .utils import get_date_id, STATS_DT
sync_files = pyyaks.context.ContextDict('update_server_sync.sync_files')
sync_files.update(file_defs.sync_files)
def get_options(args=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--sync-root",
default=".",
help="Root directory for sync files (default='.')")
parser.add_argument("--content",
action='append',
help="Content type to process [match regex] (default = all)")
parser.add_argument("--max-days",
type=float,
default=1.5,
help="Max number of days of files per sync directory (default=1.5)")
parser.add_argument("--max-lookback",
type=float,
default=60,
help="Maximum number of days to look back from --date-stop (default=60)")
parser.add_argument("--log-level",
default=20,
help="Logging level")
parser.add_argument("--date-start",
help="Start process date (default=NOW - max-lookback)")
parser.add_argument("--date-stop",
help="Stop process date (default=NOW)")
return parser.parse_args(args)
def update_msid_contents_pkl(logger):
"""
Update the `msid_contents.pkl` file to contain a dict of the msid:content pairs.
:return: None
"""
filename = Path(sync_files['msid_contents'].abs)
# Check if an existing version of the file is the same and do not overwrite
# in that case.
if filename.exists():
with gzip.open(filename, 'rb') as fh:
msid_contents = pickle.load(fh)
if msid_contents == fetch.content:
return
logger.info(f'Writing contents pickle {filename}')
with gzip.open(filename, 'wb') as fh:
pickle.dump(fetch.content, fh, protocol=-1)
def main(args=None):
# Setup for updating the sync repository
opt = get_options(args)
sync_files.basedir = opt.sync_root
# Set up logging
loglevel = int(opt.log_level)
logger = pyyaks.logger.get_logger(name='cheta_update_server_sync', level=loglevel,
format="%(asctime)s %(message)s")
if opt.content:
contents = opt.content
else:
contents = set(fetch.content.values())
for content in sorted(contents):
update_sync_repo(opt, logger, content)
# Make the main msid_contents.pkl file
update_msid_contents_pkl(logger)
def remove_outdated_sync_files(opt, logger, index_tbl):
"""
Remove the sync data dirs and index file rows which correspond to data
that is more than opt.max_lookback days older than opt.date_stop (typically
NOW).
:param opt: options
:param logger: logger
:param index_tbl: table containing sync repo entries
:return: mask of rows that were removed
"""
min_time = (DateTime(opt.date_stop) - opt.max_lookback).secs
# Ephemeris files are time stamped around a month before current date,
# so leave them around for couple months longer.
if re.search(r'ephem\d$', str(fetch.ft['content'])):
min_time -= 60 * 86400
remove_mask = np.zeros(len(index_tbl), dtype=bool)
# Iterate over all but the last row of the table, removing any
# directories for updates from before `min_time`. Leaving the last
# row gives a direct record of when the last update occurred, but is
# benign from the perspective of updating the client archive.
for idx, row in zip(range(len(index_tbl) - 1), index_tbl):
if row['filetime0'] < min_time:
fetch.ft['date_id'] = row['date_id']
remove_mask[idx] = True
data_dir = sync_files['data_dir'].abs
if Path(data_dir).exists():
logger.info(f'Removing sync directory {data_dir}')
shutil.rmtree(data_dir)
return remove_mask
def update_sync_repo(opt, logger, content):
"""
:param opt: argparse options
:param logger: logger instance
:param content: content type
:return:
"""
# File types context dict
ft = fetch.ft
ft['content'] = content
index_file = Path(sync_files['index'].abs)
index_tbl = update_index_file(index_file, opt, logger)
if index_tbl is None:
# Index table was not created, nothing more to do here
logger.warning(f'No index table for {content}')
return
for row in index_tbl:
ft = fetch.ft
ft['date_id'] = row['date_id']
update_sync_data_full(content, logger, row)
update_sync_data_stat(content, logger, row, '5min')
update_sync_data_stat(content, logger, row, 'daily')
remove_mask = remove_outdated_sync_files(opt, logger, index_tbl)
if np.any(remove_mask):
index_tbl = index_tbl[~remove_mask]
logger.info(f'Writing {len(index_tbl)} row(s) to index file {index_file}')
index_tbl.write(index_file, format='ascii.ecsv')
def get_row_from_archfiles(archfiles):
# Make a row that encapsulates info for this setup of data updates. The ``date_id`` key is a
# date like 2019-02-20T2109z, human-readable and Windows-friendly (no :) for a unique
# identifier for this set of updates.
date_id = get_date_id(DateTime(archfiles[0]['filetime']).fits)
row = {'filetime0': archfiles[0]['filetime'],
'filetime1': archfiles[-1]['filetime'],
'date_id': date_id,
'row0': archfiles[0]['rowstart'],
'row1': archfiles[-1]['rowstop']}
return row
def check_index_tbl_consistency(index_tbl):
"""
Check for consistency of the index table.
:param index_tbl: index table (astropy Table)
:return msg: inconsistency message or None
"""
filetimes = []
for row in index_tbl:
filetimes.append(row['filetime0'])
filetimes.append(row['filetime1'])
if np.any(np.diff(filetimes) < 0):
msg = 'filetime values not monotonically increasing'
return msg
for idx, row0, row1 in zip(count(), index_tbl[:-1], index_tbl[1:]):
if row0['row1'] != row1['row0']:
msg = f'rows not contiguous at table date0={index_tbl["date_id"][idx]}'
return msg
# No problems
return None
def update_index_file(index_file, opt, logger):
"""Update the top-level index file of data available in the sync archive
:param index_file: Path of index ECSV file
:param opt: options
:param logger: output logger
:return: index table (astropy Table)
"""
if index_file.exists():
# Start time of last update contained in the sync repo (if it exists), but do not look
# back more than max_lookback days. This is relevant for rarely sampled
# content like cpe1eng.
filetime0 = (DateTime(opt.date_stop) - opt.max_lookback).secs
index_tbl = Table.read(index_file)
if len(index_tbl) == 0:
# Need to start with a fresh index_tbl since the string column will end up
# with a length=1 string (date_id) and add_row later will give the wrong result.
index_tbl = None
else:
filetime0 = max(filetime0, index_tbl['filetime1'][-1])
else:
# For initial index file creation use the --date-start option
index_tbl = None
filetime0 = DateTime(opt.date_start).secs
max_secs = int(opt.max_days * 86400)
time_stop = DateTime(opt.date_stop).secs
# Step through the archfile files entries and collect them into groups of up
# to --max-days based on file time stamp (which is an integer in CXC secs).
rows = []
filename = fetch.msid_files['archfiles'].abs
logger.debug(f'Opening archfiles {filename}')
with DBI(dbi='sqlite', server=filename) as dbi:
while True:
filetime1 = min(filetime0 + max_secs, time_stop)
logger.verbose(f'select from archfiles '
f'filetime > {DateTime(filetime0).fits[:-4]} {filetime0} '
f'filetime <= {DateTime(filetime1).fits[:-4]} {filetime1} '
)
archfiles = dbi.fetchall(f'select * from archfiles '
f'where filetime > {filetime0} '
f'and filetime <= {filetime1} '
f'order by filetime ')
# Found new archfiles? If so get a new index table row for them.
if len(archfiles) > 0:
rows.append(get_row_from_archfiles(archfiles))
filedates = DateTime(archfiles['filetime']).fits
logger.verbose(f'Got {len(archfiles)} archfiles rows from '
f'{filedates[0]} to {filedates[-1]}')
filetime0 = filetime1
# Stop if already queried out to the end of desired time range
if filetime1 >= time_stop:
break
if not rows:
logger.info(f'No updates available for content {fetch.ft["content"]}')
return index_tbl
# Create table from scratch or add new rows. In normal processing there
# will just be one row per run.
if index_tbl is None:
index_tbl = Table(rows)
else:
for row in rows:
index_tbl.add_row(row)
if not index_file.parent.exists():
logger.info(f'Making directory {index_file.parent}')
index_file.parent.mkdir(exist_ok=True, parents=True)
msg = check_index_tbl_consistency(index_tbl)
if msg:
msg += '\n'
msg += '\n'.join(index_tbl.pformat(max_lines=-1, max_width=-1))
logger.error(f'Index table inconsistency: {msg}')
return None
logger.info(f'Writing {len(rows)} row(s) to index file {index_file}')
index_tbl.write(index_file, format='ascii.ecsv')
return index_tbl
def update_sync_data_full(content, logger, row):
"""
Update full-resolution sync data including archfiles for index table ``row``
This generates a gzipped pickle file with a dict that has sync update values
for all available MSIDs in this chunk of ``content`` telemetry. This has
`archfiles` (structured ndarray of rows) to store archfiles rows and then
{msid}.quality, {msid}.data, {msid}.row0 and {msid}.row1.
:param content: content type
:param logger: global logger
:param row: archfile row
:return: None
"""
ft = fetch.ft
ft['interval'] = 'full'
outfile = Path(sync_files['data'].abs)
if outfile.exists():
logger.debug(f'Skipping {outfile}, already exists')
return
out = {}
msids = list(fetch.all_colnames[content]) + ['TIME']
# row{filetime0} and row{filetime1} are the *inclusive* `filetime` stamps
# for the archfiles to be included in this row. They do not overlap, so
# the selection below must be equality.
with DBI(dbi='sqlite', server=fetch.msid_files['archfiles'].abs) as dbi:
query = (f'select * from archfiles '
f'where filetime >= {row["filetime0"]} '
f'and filetime <= {row["filetime1"]} '
f'order by filetime ')
archfiles = dbi.fetchall(query)
out['archfiles'] = archfiles
# Row slice indexes into full-resolution MSID h5 files. All MSIDs share the
# same row0:row1 range.
row0 = row['row0']
row1 = row['row1']
# Go through each MSID and collect values
n_msids = 0
for msid in msids:
ft['msid'] = msid
filename = fetch.msid_files['msid'].abs
if not Path(filename).exists():
logger.debug(f'No MSID file for {msid} - skipping')
continue
n_msids += 1
with tables.open_file(filename, 'r') as h5:
out[f'{msid}.quality'] = h5.root.quality[row0:row1]
out[f'{msid}.data'] = h5.root.data[row0:row1]
out[f'{msid}.row0'] = row0
out[f'{msid}.row1'] = row1
n_rows = row1 - row0
logger.info(f'Writing {outfile} with {n_rows} rows of data and {n_msids} msids')
outfile.parent.mkdir(exist_ok=True, parents=True)
# TODO: increase compression to max (gzip?)
with gzip.open(outfile, 'wb') as fh:
pickle.dump(out, fh)
def _get_stat_data_from_archive(filename, stat, tstart, tstop, last_row1, logger):
"""
Return stat table rows in the range tstart <= time < tstop.
Also returns the corresponding table row indexes.
:param filename: HDF5 file to read
:param stat: stat (5min or daily)
:param tstart: min time
:param tstop: max time
:param last_row1: row1 for previous index table entry
:param logger: logger
:return:
"""
dt = STATS_DT[stat]
logger.debug(f'_get_stat_data({filename}, {stat}, {DateTime(tstart).fits}, '
f'{DateTime(tstop).fits}, {last_row1})')
with tables.open_file(filename, 'r') as h5:
# Check if tstart is beyond the end of the table. If so, return an empty table
table = h5.root.data
last_index = table[-1]['index']
last_time = (last_index + 0.5) * dt
if tstart > last_time:
logger.debug(f'No available stats data {DateTime(tstart).fits} > '
f'{DateTime(last_time).fits} (returning empty table)')
row0 = row1 = len(table)
table_rows = table[row0:row1]
else:
# Compute approx number of rows from the end for tstart. Normally the index value
# goes in lock step with row, but it can happen that an index is missed because of
# missing data. But if we back up by delta_rows, we are guaranteed to get to at
# least the row corresponding to tstart.
delta_rows = int((last_time - tstart) / dt) + 10
times = (table[-delta_rows:]['index'] + 0.5) * dt
# In the worst case of starting to sync a client archive for a rarely-sampled
# content like cpe1eng or pcad7eng (AOSPASA2CV,) we need to include an extra ``dt``
# on both ends to ensure that the first / last rows are caught. If the last
# full-res sample is either before or after the stat mid-point timestamp then
# stat sample may get dropped. This happened in real life for AOSPASA2CV.
# Having extra rows on front is OK because they just get clipped, and an extra
# row on back is OK because of clipping on the next update (and in normal
# processing we always want the sync archive to have all recent data).
sub_row0, sub_row1 = np.searchsorted(times, [tstart - dt, tstop + dt])
sub_row_offset = len(table) - delta_rows
row0 = sub_row0 + sub_row_offset
row1 = sub_row1 + sub_row_offset
# If we have the last value of row1 (from previous sync entry) then use
# that instead of computed value for row0.
if last_row1 is not None:
row0 = last_row1
table_rows = table[row0:row1] # returns np.ndarray (structured array)
return table_rows, row0, row1
def update_sync_data_stat(content, logger, row, stat):
"""
Update stats (5min, daily) sync data for index table ``row``
:param content: content name (e.g. acis4eng)
:param logger: logger
:param row: one row of the full-res index table
:param stat: stat interval (5min or daily)
:return:
"""
ft = fetch.ft
ft['interval'] = stat
outfile = Path(sync_files['data'].abs)
if outfile.exists():
logger.debug(f'Skipping {outfile}, already exists')
return
# First get the times corresponding to row0 and row1 in the full resolution archive
ft['msid'] = 'TIME'
with tables.open_file(fetch.msid_files['msid'].abs, 'r') as h5:
table = h5.root.data
tstart = table[row['row0']]
# Ensure that table row1 (for tstop) doesn't fall off the edge since the last
# index file row will have row1 exactly equal to the table length.
row1 = min(row['row1'], len(table) - 1)
tstop = table[row1]
out = {}
msids = list(fetch.all_colnames[content] - set(fetch.IGNORE_COLNAMES))
# Get dict of last sync repo row for each MSID. This is keyed as {msid: last_row1},
# where row1 is (as always) the slice row1.
last_rows_filename = sync_files['last_rows'].abs
if Path(last_rows_filename).exists():
logger.verbose(f'Reading {last_rows_filename}')
last_rows = pickle.load(open(last_rows_filename, 'rb'))
else:
last_rows = {}
# Go through each MSID and get the raw HDF5 table data corresponding to the
# time range tstart:tstop found above.
n_rows_set = set()
n_msids = 0
for msid in msids:
last_row1 = last_rows.get(msid)
ft['msid'] = msid
filename = fetch.msid_files['stats'].abs
if not Path(filename).exists():
logger.debug(f'No {stat} stat data for {msid} - skipping')
continue
n_msids += 1
stat_rows, row0, row1 = _get_stat_data_from_archive(
filename, stat, tstart, tstop, last_row1, logger)
logger.verbose(f'Got stat rows {row0} {row1} for stat {stat} {msid}')
n_rows_set.add(row1 - row0)
if row1 > row0:
out[f'{msid}.data'] = stat_rows
out[f'{msid}.row0'] = row0
out[f'{msid}.row1'] = row1
last_rows[msid] = row1
n_rows = n_rows_set.pop() if len(n_rows_set) == 1 else n_rows_set
outfile.parent.mkdir(exist_ok=True, parents=True)
# TODO: increase compression to max (gzip?)
logger.info(f'Writing {outfile} with {n_rows} rows of data and {n_msids} msids')
with gzip.open(outfile, 'wb') as fh:
pickle.dump(out, fh)
# Save the row1 value for each MSID to use as row0 for the next update
logger.verbose(f'Writing {last_rows_filename}')
with open(last_rows_filename, 'wb') as fh:
pickle.dump(last_rows, fh)
if __name__ == '__main__':
main()
``` |
{
"source": "johnny22/Weather_app",
"score": 3
} |
#### File: johnny22/Weather_app/ambient_data_getter.py
```python
import asyncio
from aiohttp import ClientSession
from aioambient import Client
from datetime import date
async def main() -> None:
"""Create a session"""
client = Client("96255560b32d46bf82101c3a42a9213ffae52644090e485eac958e2c4e55e88a", "65a5fd146d2a4bc09640a1fdf8c44887595fb4a5b0504693b8554e12a4ca2d87")
await client.api.get_devices()
print ('hi')
asyncio.run(main())
```
#### File: johnny22/Weather_app/database_creater.py
```python
import mysql.connector
from mysql.connector import errorcode
import mysql_config
def create_table(TABLE_NAME, TABLE_LIST):
cnx = mysql.connector.connect(**mysql_config.config)
cursor = cnx.cursor()
#cursor.execute ("DROP TABLE {}".format (TABLE_NAME))
try:
cursor.execute ("USE {}".format('weather_app'))
except mysql.connector.Error as err:
print ("Database {} does not exist.".format('weather_app'))
raise err
try:
cursor.execute('CREATE TABLE ' + TABLE_NAME + ' (' + TABLE_LIST[0] + ')')
print ('Created table {}.'.format(TABLE_NAME))
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print ('table was already created')
for column in TABLE_LIST:
try:
print ("Adding column ", column)
cursor.execute('ALTER TABLE ' + TABLE_NAME + ' ADD ' + column)
except mysql.connector.Error as err:
if err.errno == 1060:
print ("column already existed")
cursor.close()
cnx.close()
def create_wunderground_table():
TABLE_NAME = 'wunderground'
ACTION_VAR = 'CREATE TABLE'
TABLE_LIST = [
'date datetime',
'current_pressure decimal(6,2)',
'current_temp decimal(6,2)',
'today_precip decimal(6,2)',
'current_humidity decimal(6,2)'
]
cnx = mysql.connector.connect(**mysql_config.config)
cursor = cnx.cursor()
#cursor.execute ("DROP TABLE {}".format (TABLE_NAME))
try:
cursor.execute ("USE {}".format('weather_app'))
except mysql.connector.Error as err:
print ("Database {} does not exist.".format('weather_app'))
raise err
try:
cursor.execute(ACTION_VAR + ' ' + TABLE_NAME + ' (' + TABLE_LIST[0] + ')')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print ('table was already created')
for column in TABLE_LIST:
try:
cursor.execute('ALTER TABLE ' + TABLE_NAME + ' ADD ' + column)
except mysql.connector.Error as err:
if err.errno == 1060:
print ("column already existed")
cursor.close()
cnx.close()
def create_accuweather_table():
TABLE_NAME = 'accuweather'
TABLE_LIST = ['date datetime',
'hour_precip decimal(6,2)',
'humidity decimal(6,2)',
'temperature decimal(6,2)',
'12_hour_precip decimal(6,2)',
'24_hour_precip decimal(6,2)',
'pressure decimal(6,2)',
'pressure_tendancy VARCHAR(255)',
'apparent_temperature decimal(6,2)',
'indoor_relative_humidity decimal(6,2)',
'feels_like_temperature decimal(6,2)',
'relative_humidity decimal(6,2)',
'wet_bulb_temperature decimal(6,2)',
'wind_direction decimal(6,2)',
'wind_speed decimal(6,2)',
'dew_point decimal(6,2)',
'temperature_max_past_12 decimal(6,2)',
'temperature_min_past_12 decimal(6,2)',
'temperature_max_past_24 decimal(6,2)',
'temperature_min_past_24 decimal(6,2)'
]
cnx = mysql.connector.connect(**mysql_config.config)
cursor = cnx.cursor()
cursor.execute ("DROP TABLE {}".format (TABLE_NAME))
try:
cursor.execute ("USE {}".format('weather_app'))
except mysql.connector.Error as err:
print ("Database {} does not exist.".format('weather_app'))
raise err
try:
print ('creating table ', TABLE_NAME)
cursor.execute('CREATE TABLE ' + TABLE_NAME + ' (' + TABLE_LIST[0] + ')')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print ('table was already created')
for column in TABLE_LIST:
try:
print ("Adding column ", column)
cursor.execute('ALTER TABLE ' + TABLE_NAME + ' ADD ' + column)
except mysql.connector.Error as err:
if err.errno == 1060:
print ("Column already existed")
else: raise err
cursor.close()
cnx.close()
wunderground_column_list = [
'date datetime',
'location VARCHAR(255)',
'current_pressure decimal(6,2)',
'current_temp decimal(6,2)',
'today_precip decimal(6,2)',
'current_humidity decimal(6,2)',
'wind_speed decimal(6,2)',
'wind_direction decimal(6,2)',
'wind_gust decimal(6,2)',
'wind_chill decimal(6,2)',
'dew_point decimal(6,2)'
]
wunderground_forecast_column_list = [
'date_gathered datetime',
'date_forecast datetime',
'max_temp decimal(6,2)',
'min_temp decimal(6,2)',
'qpf decimal(6,2)',
'precip_type_day VARCHAR(255)',
'precip_type_night VARCHAR(255)',
'precip_chance_day decimal(6,2)',
'precip_chance_night decimal(6,2)',
'relative_humidity_day decimal(6,2)',
'relative_humidity_night decimal(6,2)',
'wx_phrase_day VARCHAR(255)',
'wx_phrase_night VARCHAR(255)',
'snow_amount_day decimal(6,2)',
'snow_amount_night decimal(6,2)',
'wind_direction_day decimal(6,2)',
'wind_direction_night decimal(6,2)',
'wind_direction_cardinal_day VARCHAR(255)',
'wind_direction_cardinal_night VARCHAR(255)',
'wind_speed_day decimal(6,2)',
'wind_speed_night decimal(6,2)',
'cloud_cover_chance_day decimal(6,2)',
'cloud_cover_chance_night decimal(6,2)'
]
accuweather_column_list = [
'date datetime',
'location VARCHAR(255)',
'hour_precip decimal(6,2)',
'humidity decimal(6,2)',
'temperature decimal(6,2)',
'12_hour_precip decimal(6,2)',
'24_hour_precip decimal(6,2)',
'pressure decimal(6,2)',
'pressure_tendancy VARCHAR(255)',
'apparent_temperature decimal(6,2)',
'indoor_relative_humidity decimal(6,2)',
'feels_like_temperature decimal(6,2)',
'relative_humidity decimal(6,2)',
'wet_bulb_temperature decimal(6,2)',
'wind_direction decimal(6,2)',
'wind_speed decimal(6,2)',
'dew_point decimal(6,2)',
'temperature_max_past_12 decimal(6,2)',
'temperature_min_past_12 decimal(6,2)',
'temperature_max_past_24 decimal(6,2)',
'temperature_min_past_24 decimal(6,2)'
]
create_table('wunderground', wunderground_column_list)
#create_table('accuweather', accuweather_column_list)
#create_table('wunderground_forecast', wunderground_forecast_column_list)
```
#### File: johnny22/Weather_app/data_storer.py
```python
import datetime
import mysql.connector
from mysql.connector import errorcode
import mysql_config
cnx = mysql.connector.connect(**mysql_config.config)
cursor = cnx.cursor()
def unpack(s):
return " ".join(map(str, s))
def store_data(table, column, data):
sql = "INSERT INTO %s %s VALUE %s" % (table, column, data)
try:
#print (sql)
cursor.execute(sql)
cnx.commit()
except ValueError:
print('bummer')
#print (table, column, data)
def store_list(table, data_dict):
"""This takes a table name and a dictionary of key:value pairs.
It is built to deal with integers with quotes around them, so if a string needs to be stored, it needs to have double quotes"""
#need to change this we should probably have the date created in each data getting location
if table != 'wunderground_forecast':
current_date = str(datetime.datetime.now())[:-4]
data_dict['date'] ="'" + str(current_date) + "'"
#print (data_dict)
column_list = []
for column in data_dict:
column_list.append(column)
column_list = tuple(column_list)
data_list = [data_dict[column] for column in column_list]
data_list = tuple(data_list)
#print ("columns", column_list)
#print (data_list)
translation = {39: None}
sql = "INSERT INTO {} ".format(table) + "{} ".format(str(column_list).translate(translation))
sql += " VALUES" + "{} ".format(str(data_list).translate(translation))
#sql = f."INSERT INTO {table} ({unpack(column_list)}) VALUES ({unpack(data_list)})"
#sql = "INSERT INTO {} ({}) VALUES ({})".format(table, *column_list, *data_list)
try:
#sql = "INSERT INTO wunderground (current_temp, current_pressure, today_precip, current_humidity, date) VALUES(25,59,.3,65, '{}')".format(current_date)
#print (sql)
cursor.execute(sql)
cnx.commit()
except ValueError:
print('bummer')
#for column in data_dict:
# store_data(table, column, data_dict[column])
if __name__ == "__main__":
test_dict = {
'current_temp' : '25',
'current_pressure' : 59,
'today_precip' : .3,
'current_humidity' : 65
}
store_list('wunderground', test_dict)
```
#### File: Weather_app/front_end/output_template.py
```python
import jinja2
loader = jinja2.FileSystemLoader('Templates')
Env = jinja2.Environment(loader=loader)
template = Env.get_template('template.html')
#template.globals['return_print_var'] = return_print_var
#template.globals['len'] = len
def render_template(dict_list, forecast_list):
"""This renders the jinja template."""
return template.render(dict_list=dict_list, forecast_list=forecast_list)
``` |
{
"source": "johnny369369/dnspod_api",
"score": 3
} |
#### File: johnny369369/dnspod_api/T_dnsPod.py
```python
import sys,os
from T_dnsApi import *
from Mylogger import *
from All_Params import All_params
class Procedure(Global_Var):
def Add_Domain_And_Record(self):
Domains_List = []
with open('domain_list', 'r+',encoding='utf-8') as domains:
for d in domains.readlines():
Domains_List.append(d.strip())
running = Dns_Add_Domain_Record(self.Login_Token,self.Product)
running.Add_Domain(Domains_List)
running.Add_Record(Domains_List)
def Del_Domain(self):
'''删除域名'''
Domains = All_params.check_input("您需要删除的域名,用逗号分隔:")
Domains_List = Domains.split(sep=',')
running = Dns_Del_Domain(self.Login_Token,self.Product)
running.Del_Domain(Domains_List)
def Add_Record(self):
'''添加解析记录'''
Domains = All_params.check_input("您需要添加解析记录的域名,多个域名用逗号分隔:")
Sub_Domians = All_params.check_input("需要添加的子域名,用逗号分隔:")
Domains_List = Domains.split(sep=',')
running = Dns_Add_Record(self.Login_Token,self.Product)
Record_Type = All_params.check_input("选择记录类型,输入( A 或 CNAME ):")
Value = All_params.check_input("要解析的记录值:")
Record_Line = All_params.check_input("输入记录线路:输入null为默认:")
if Record_Line == 'null':
Record_Line_value = '默认'
else:
Record_Line_value = Record_Line
Sub_Domian_List = Sub_Domians.split(sep=',')
running.Add_Record(Domains_List,Sub_Domian_List,Record_Type,Record_Line_value,Value)
def Alter_Record(self):
'''修改记录'''
Domains = All_params.check_input("您需要修改解析记录的域名,以便获取其子域名的记录ID,多个记录(只能单个域名修改)用逗号分隔:")
Domains_List = Domains.split(sep=',')
running = Dns_Alter_Record(self.Login_Token,self.Product)
running.Get_Record(Domains_List)
Records = All_params.check_input("需要修改的解析记录的ID,并且输入ID,用逗号分隔:")
Records_List = Records.split(sep=',')
Change = All_params.check_input("您要修改的字段([sub_domain,record_type,area,value,mx,ttl,status]):")
Change_TO = All_params.check_input("你要修改的字段例如:record_type= A CNAME MX TTL,sub_domain= @ www test|这个值对应是你上一步选择的字段| 要修改为:")
if Change == 'value':
running.Alter_Record(Records_List, Change,Change_TO,Value='')
else:
Value = All_params.check_input("要修改到的记录值:")
running.Alter_Record(Records_List,Change,Change_TO,Value)
def Del_Record(self):
'''删除记录'''
Domains = All_params.check_input("您需要删除解析记录的域名,以便获取其子域名的记录ID,用逗号分隔:")
Domains_List = Domains.split(sep=',')
running = Dns_Del_Record(self.Login_Token,self.Product)
running.Get_Record(Domains_List)
Records = All_params.check_input("需要删除的 域名ID 和 解析记录ID,多个记录已逗号分隔(格式,agvip2003.com=384855336):")
Domains_Records_List = Records.split(sep=',')
running.Del_Record(Domains_Records_List)
def Get_Domain_List(self):
'''获取域名列表'''
running = Dns_Get_Domain_List(self.Login_Token,self.Product)
running.Get_Domain_List()
def Get_Domain_Record_Info(self):
'''获取域名解析记录'''
Domains = All_params.check_input("您需要查看记录的域名,以便获取其子域名的记录ID,多个域名用逗号分隔:")
Domains_List = Domains.split(sep=',')
running = Dns_Get_Domain_Record_Info(self.Login_Token,self.Product)
running.Get_Domain_Record_Info(Domains_List)
def Add_Domain(self):
'''添加域名'''
Domains = input("请输入您要添加的域名,用逗号分隔:")
Sub_Domians = input("请输入需要添加的子域名,用逗号分隔(不添加则回车):")
Domains_List = Domains.split(sep=',')
running = Dns_Add_Domain(self.Login_Token,self.Product)
if Sub_Domians == '':
running.Add_Domain(Domains_List)
def Get_Domain_Log(self):
'''查询域名日志'''
Domains = All_params.check_input(u'输入查询日志的域名,用逗号分隔:')
Domains_List = Domains.split(sep=',')
running = Dns_Get_Domain_Log(self.Login_Token,self.Product)
running.Get_Domain_Log_Info(Domains_List)
def Batch_Alter_Domain_Record(self):
'''批量修改记录'''
with open('domain_list', 'r+',encoding='utf-8') as domains:
for domainlist in domains.readlines():
running = Dns_Alter_Domin_Record(self.Login_Token,self.Product)
record_info = running.Record_Info(domainlist)
#value_dict = eval('self.{}'.format(self.Product))
Change = "value"
try:
for record in record_info:
if record['type'] == 'CNAME' and record['name'] == 'www':
Records = record['id']
Change_TO = 'your record'
running.Batch_Alter_Record(Records,Change,Change_TO,Value='')
if record['type'] == 'CNAME' and record['name'] == '@':
Records = record['id']
Change_TO = 'yyour record'
running.Batch_Alter_Record(Records,Change,Change_TO,Value='')
if record['type'] == 'CNAME' and record['name'] == 'm':
Records = record['id']
Change_TO = 'your recode'
running.Batch_Alter_Record(Records,Change,Change_TO,Value='')
if record['type'] == 'CNAME' and record['name'] == 'vip':
Records = record['id']
Change_TO = 'your record'
running.Batch_Alter_Record(Records,Change,Change_TO,Value='')
if record['type'] == 'CNAME' and record['name'] == 'vipm':
Records = record['id']
Change_TO = 'your record'
running.Batch_Alter_Record(Records,Change,Change_TO,Value='')
except Exception as e:
print(e)
finally:
exit(0)
def Get_product_monit(self):
'''获取D监控报警'''
running = Dns_Get_D_Monit(self.Login_Token,self.Product)
running.D_Monit_Info()
def Get_D_list(self):
'''获取监控列表'''
running = Dns_Get_monit_list(self.Login_Token,self.Product)
running.Monit_list()
``` |
{
"source": "johnny5550822/gdax-army",
"score": 3
} |
#### File: gdax-army/lib/BuyStrategier.py
```python
import logging
from lib import Strategier
from lib.utils import *
class BuyStrategier(Strategier):
"""
Provide algorithms and rules to determine if we should buy.
"""
def __init__(self, army, currency, granularity, num_buckets, term_n,
macd_short_n, macd_long_n, time_str
):
Strategier.__init__(self, army, currency, granularity,
num_buckets, term_n, macd_short_n, macd_long_n,
time_str)
# logger
self.logger = setup_logger(__name__, 'logs/%s_log.log' % time_str)
def should_buy(self, option=1):
"""
To determine if a stock should buy or not
"""
if option == 1:
return self._determine_by_ema()
elif option == 2:
return self._determine_by_macd()
return False
def _determine_by_ema(self):
"""
Determine if we should buy in based exponetial moving average rule.
"""
# Get current price
price = self.army.get_currency_price(self.currency)
# Get the cloest ema
ema = self._get_cloest_ema(self.term_n)
# log
self.logger.info('Simple EMA: price:$%s, ema:$%s' % (price, ema))
# Todo: just simply check price >= ema and decide to buy may be wrong
# because we don't know if we buy at a very high price (e.gh., the
# peak), we may have to have a better way to determine if we should
# excute a buy order. For example, (1) we should determine if the trend
# of the price is still going up or not (i.e., pass the peak). (2) Or
# we should let the algorithm to wait from price<=ema to price>=ema,
# the turning point should be a good price to buy
return (price >= ema)
def _determine_by_macd(self):
"""
Determine if we should buy based on MACD. if short_ema > long_ema, then buy.
"""
short_macd_ema, long_macd_ema = self._get_macd_ema()
# log
self.logger.info('MACD: short ema:$%s, long ema:$%s' %
(short_macd_ema, long_macd_ema))
return (short_macd_ema >= long_macd_ema)
```
#### File: gdax-army/lib/SellStrategier.py
```python
from __future__ import division
import logging
from lib import Strategier
from lib.utils import *
class SellStrategier(Strategier):
"""
Provide algorithms and rules to determine if we should sell.
"""
def __init__(self, army, currency, granularity, num_buckets, term_n,
macd_short_n, macd_long_n, time_str
):
Strategier.__init__(self, army, currency, granularity,
num_buckets, term_n, macd_short_n, macd_long_n,
time_str)
# logger
self.logger = setup_logger(__name__, 'logs/%s_log.log' % time_str)
def should_sell(self, buy_order, option=1):
"""
To determine if a stock should sell or not based on the buy order info
"""
if option == 1:
return self._determine_by_ema(buy_order)
elif option == 2:
return self._determine_by_macd(buy_order)
elif option == 3:
return (self._determine_by_macd(buy_order) or self._determine_by_gain_percentage(buy_order))
return False
def should_resell(self, buy_order):
"""
To determine if resell even the sell operation fails. If the selling price after deduction from the take fee (0.25%) is bigger than buying price, then return True.
TODO: improve it
"""
# taker percentage
taker_fee = 0.0025
# Buy price & current price
buy_price = float(buy_order['price'])
price = self.army.get_currency_price(self.currency)
return price * (1 - taker_fee) > buy_price
def _determine_by_ema(self, buy_order):
"""
Determine if we should buy in based on exponetial moving average rule.
"""
# Buy price
buy_price = float(buy_order['price'])
# Get current price
price = self.army.get_currency_price(self.currency)
# Get the cloest ema
ema = self._get_cloest_ema(self.term_n)
# log
self.logger.info('Simple EMA: price:$%s, ema:$%s' % (price, ema))
# return True
# return (price < ema) and (price > buy_price)
return (price < ema)
def _determine_by_macd(self, buy_order):
"""
Determine if we should sell based on MACD
"""
# Buy price
buy_price = float(buy_order['price'])
# Get current price
price = self.army.get_currency_price(self.currency)
# Get the cloest ema
short_macd_ema, long_macd_ema = self._get_macd_ema()
# log
self.logger.info('MACD: short ema:$%s, long ema:$%s' %
(short_macd_ema, long_macd_ema))
# return. We cannot use price> buy_price because we may run into infinte while loop for sell. Well, if we assume the stock is always going up, then price > buy_price would make sense, but it will takes a long time (e.g., one day) to wait for cycle to complete.
# return (short_macd_ema < long_macd_ema) and (price > buy_price)
return (short_macd_ema < long_macd_ema)
def _determine_by_gain_percentage(self, buy_order, percentage=0.02):
"""
Determine if we should sell based on gain percentage.
:params buy_order: the buy order
:params percentage: the minimal gain percentage
"""
# Buy price
buy_price = float(buy_order['price'])
# Get current price
price = self.army.get_currency_price(self.currency)
# calculate the percentage
gain_percentage = (price - buy_price) / buy_price
return gain_percentage > percentage
```
#### File: gdax-army/lib/Strategier.py
```python
class Strategier():
"""
The parent of the buy and sell strategiers.
"""
def __init__(self, army, currency, granularity, num_buckets, term_n,
macd_short_n, macd_long_n, time_str
):
# basic
self.army = army
self.currency = currency
# exponential moving average parameters
self.granularity = granularity # e.g., 3600=an hour, some values are
# weird (100, etc). We probably use 60 (1 min), 300 (5 min),
# 1800(30min), 3600(1 hr)
self.num_buckets = num_buckets # total number of buckets we are
# interested in
self.term_n = term_n # the number of buckets that is used to
# calculate the ema, important parameter to determine how sensitive and
# global of the moving average, i.e., the smallest, the more senstive
# to the latest price
# MACD parameters
self.macd_short_n = macd_short_n
self.macd_long_n = macd_long_n
def _get_simple_moving_average(self, df, n=10):
"""
return the simple moving average.
:params df: dataframe or series with one column
:params n: the size of the moving window (number or periods
involved)
10-20 short-term trends
50 mid-term trends
200 long-term trends
"""
return df.rolling(n).mean()
def _get_exponential_moving_average(self, n=10):
"""
return the n-day exponential moving average
:params n: the size of the moving window (number or periods
involved)
10-20 short-term trends
50 mid-term trends
200 long-term trends
"""
time_, low_, high_, mean_, open_, close_, \
volume_ = self.army.get_trade_trends(currency=self.currency,
granularity=self.granularity,
num_buckets=self.num_buckets)
return close_.ewm(span=n).mean()
def _get_cloest_ema(self, n=10):
"""
Get the cloest ema.
"""
# Get the cloest ema
ema = self._get_exponential_moving_average(n)
return ema.iloc[-1]
def _get_macd_ema(self):
"""
Get the macd short and long EMA.
https://www.youtube.com/watch?v=E3KP1WyLITY&index=11&list=PL0I_pt3KKS0
zT0y7gW2CLrSP1xTPYRMK_
"""
# Get the cloest ema for short ema
short_ema = self._get_cloest_ema(self.macd_short_n)
# Get the cloest ema for long ema
long_ema = self._get_cloest_ema(self.macd_long_n)
return short_ema, long_ema
```
#### File: gdax-army/lib/utils.py
```python
from datetime import datetime
import tzlocal
import logging
import time
from pytz import timezone, utc
def unix_timestamp_to_readable(timestamp):
"""
Convert a unix timestamp is readable format
params timestamp: unix timestamp
"""
local_timezone = tzlocal.get_localzone() # get pytz timezone
local_time = datetime.fromtimestamp(timestamp, local_timezone)
return local_time.strftime("%Y-%m-%d %H:%M:%S.%f%z (%Z)")
def to_decimal_place(x, decimal_place=2):
"""
Correct the number to some decimal place
:params decimal_Place: decimal place I want to pick
"""
decimal = '{0:.%sf}' % decimal_place
return float(decimal.format(x))
def setup_logger(name, log_file, level=logging.INFO):
"""
Function setup as many loggers as you want
:params log_file: log file location
:params level: logging level
"""
# Set timezone converter
logging.Formatter.converter = _customTime
# set formmater
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# set FileHandler
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
# set StreamHandler so that you can see in terminal
ch = logging.StreamHandler()
logger.addHandler(ch)
return logger
def _customTime(*args):
utc_dt = utc.localize(datetime.utcnow())
my_tz = timezone("America/Los_Angeles")
converted = utc_dt.astimezone(my_tz)
return converted.timetuple()
def get_current_time():
"""
Get current time in PST.
"""
tz = timezone('America/Los_Angeles')
ct = datetime.now(tz)
return ct.strftime('%Y-%m-%d_%H-%M-%S')
``` |
{
"source": "johnny555/2d3g",
"score": 3
} |
#### File: johnny555/2d3g/main.py
```python
__author__ = 'Admin'
import utils as john
# list of holes in Complete_Geophysics.csv
# ['DD0509' 'DD0541' 'DD0542' 'DD0544' 'DD0551' 'DD0473' 'DD0409' 'DD0415'
# 'DD0980A' 'DD0989' 'DD1000' 'DD0991' 'DD1006' 'DD1005' 'DD1010' 'DD0992'
# 'DD1012' 'DD1013' 'DD1014' 'DD1070' 'DD1073' 'DD1077' 'DD1080' 'DD1081'
# 'DD1083' 'DD1082' 'DD1083A' 'DD1086A' 'DD1091' 'DD1095' 'DD1097' 'DD1098'
# 'DD1099' 'DD1100' 'DD1097A' 'DD1101' 'DD1102' 'DD1103' 'DD1105' 'DD1104'
# 'DD1106' 'DD1107' 'DD1104A' 'DD1108' 'DD1110' 'DD1111' 'DD1112' 'DD1113'
# '\x1a']
def n_holes(df):
return len(df.HOLEID.unique())
def extract_holes(HOLEID):
import pandas as pd
import os
if os.path.isfile('%s.csv'%HOLEID):
subset = pd.read_csv('%s.csv'%HOLEID)
else:
geo = pd.read_csv('Complete_Geophysics.csv')
hole = geo.query('HOLEID == "%s"'%HOLEID)
subset = hole[['DEPTH','DENB','DENL','GRDE', 'LSDU']].sort('DEPTH')
subset.to_csv('%s.csv'%HOLEID, index = False)
return subset
def extract_peak_loc(hole, holeID):
response_th = 1000
# window_size = 1# meters
window_size = 4# meters
peak_flag = [0]*len(hole['DEPTH'])
seam_list = [] # list of holes
nRows = len(hole)
coal_seam_bound_start = False
for i,depth in enumerate(hole['DEPTH']):
if i%200 == 0:
print( '%s progress: %i/%i'%(holeID, i, nRows))
# if depth > 80: # start looking at 80 meters
if depth > 90: # start looking at 80 meters
# get the indexes within the scan window, this is very slow, maybe faster query?
window_idx = hole[(hole['DEPTH'] >= (depth - window_size/2.0)) & ((hole['DEPTH'] <= (depth + window_size/2.0)))].index.tolist()
bottom =depth - window_size/2.0
top = depth + window_size/2.0
# atv = hole.query('DEPTH > @bottom and DEPTH <= @top')['LSDU'].mean()
# print hole['LSDU'][window_idx].mean()
if hole['LSDU'][window_idx].mean() > response_th:
# if hole.query('DEPTH > @bottom and DEPTH <= @top')['LSDU'].mean() > response_th:
peak_flag[i] = 10000
if coal_seam_bound_start == False:
seam_prop = [depth]
coal_seam_bound_start = True
# print 'ich bin hier'
elif coal_seam_bound_start == True:
# print 'ich bin wieder hier'
seam_prop.append(depth) # add the end depth
seam_list.append(seam_prop) # add hole [start end] to hole list
seam_prop = [] # reset hole [start end]
coal_seam_bound_start = False
# if hole['LSDU'][i] > response_th:
# peak_flag[i] = 10000
hole['Flag'] = peak_flag
total_depth = depth
coal_depth = 0
for coal_seam in seam_list:
coal_depth += (coal_seam[1] - coal_seam[0])
coal_percentage = coal_depth/total_depth
# write to txt
f = open('%s.txt'%holeID, 'w')
f.write('Coal Percentage: %s\n'%coal_percentage)
f.write('Coal Depth: %s\n'%coal_depth)
f.write('Total Depth: %s\n'%total_depth)
f.write('Seam Structure: %s'%seam_list)
f.close()
# write to json
out_dict = {}
out_dict['Coal Percentage'] = coal_percentage
out_dict['Coal Depth'] = coal_depth
out_dict['Total Depth'] = total_depth
out_dict['Seam Structure'] = seam_list
import json
with open('%s.json'%holeID,'w') as fp:
json.dump(out_dict, fp)
return seam_list
def extract_seams(bore_id, seam_list = []):
import numpy as np
# depth = seam_list[0][0]
print('Extracting {}'.format(bore_id))
top = 100
bottom = 400
window_size = bottom-top
mid = (top+bottom)/2.0
bin_size = 0.1
try:
df_data = john.get_data(boreid = bore_id, centre_point = mid, window_size = window_size, bin_width = bin_size)
except Exception as e:
print('Exception raised! {}'.format(e))
return
df_data.to_csv('%s_cleandata.csv'%bore_id, ignore_index=True)
return df_data
# ['ADEN', 'GRDE', 'DENB', 'LSDU', 'acoustic']
# hole data exist in both geophysics and acoustic scanner
# ['DD0541' 'DD0542' 'DD0551'
# 'DD0980A' 'DD0989' 'DD1000' 'DD0991' 'DD1006' 'DD1005' 'DD1010' 'DD0992'
# 'DD1012' 'DD1013' 'DD1014' 'DD1070' 'DD1073' 'DD1077' 'DD1080' 'DD1081'
# 'DD1083' 'DD1082' 'DD1083A' 'DD1086A' 'DD1091' 'DD1095' 'DD1097' 'DD1098'
# 'DD1099' 'DD1100' 'DD1097A' 'DD1101' 'DD1102' 'DD1103' 'DD1105' 'DD1104'
# 'DD1106' 'DD1107' 'DD1104A' 'DD1108' 'DD1110' 'DD1111' 'DD1112' 'DD1113'
# '\x1a']
if __name__ == '__main__':
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
holeId = [
'DD1097',
'DD1098',
'DD1099',
'DD1100',
'DD1101',
'DD1102',
'DD1103', 'DD1104', 'DD1105', 'DD1106',
'DD1107', 'DD1108',
'DD0541',
'DD0542',
'DD0551',
'DD0980A',
'DD0989',
'DD0991',
'DD0992',
'DD1000',
'DD1005',
'DD1006',
'DD1010',
'DD1012',
'DD1013',
'DD1014']
# extract_seams(bore_id = holeID, seam_list = hole_boundaries)
result = pd.concat([extract_seams(bore_id=h) for h in holeId], ignore_index=True)
result.to_csv('all_data.csv', index=False)
```
#### File: johnny555/2d3g/utils.py
```python
__author__ = 'jvial'
import pandas as pd
import numpy as np
lith = pd.read_csv('corrected_lithology.csv')
geo = pd.read_csv('Complete_Geophysics.csv')
# Read in all ATV data once.
atv_dictionary = {}
print('Read in lith and geo')
def get_label(bore_id, depth, rtype=False):
"""
Function to get the label, will return either a string, nan or None.
I bore_id is unknown it will raise an error.
If we are at a labelled stratigraphy it will return a string.
If we are at an unlabbelled stratigraphy it will return NaN
if we are outside the bounds it will return None.
:param bore_id: A string containing the bore id
:param depth: a float for the depth
:return:
"""
holeid = pd.unique(lith.HOLEID)
if bore_id not in holeid.tolist():
raise Exception('BoreId {} not in corrected lith logs'.format(bore_id))
bore = lith.query('HOLEID == @bore_id and GEOLFROM < @depth and GEOLTO >= @depth')
if bore.shape[0] >= 1:
if rtype:
seam = bore.iloc[0, 4] # Rock type
else:
seam = bore.iloc[0, 5] # The lith_seam is at location 5
else:
seam = None
return seam
cols = ['ADEN', 'AUCS', 'AVOL', 'AXLE', 'AXLN', 'AZID', 'AZIF', 'AZP1', 'BBRG',
'BISI', 'BRAD', 'BRDU', 'BRG1', 'BRG2', 'BRG3', 'BRG4', 'CADE', 'CALD',
'CODE', 'CORF', 'DECR', 'DENB', 'DENL', 'DEPO', 'DIPF', 'FE1', 'FE1C',
'FE1U', 'FE2', 'FMAG', 'GRDE', 'GRNP', 'HVOL', 'LSDU', 'LSN', 'MC2A',
'MC2F', 'MC2U', 'MC4F', 'MC6F', 'MCUF', 'MDTC', 'MSAL', 'P1F', 'P2F',
'P3F', 'PCH1', 'RAD1', 'RAD2', 'RAD3', 'RAD4', 'RPOR', 'SPOR', 'SSN',
'TDEP', 'TDIF', 'TEMP', 'TILD', 'UCS', 'UCSM', 'VDEN', 'VL2A', 'VL2F',
'VL4F', 'VL6F', 'VLUF', 'UCSD', 'TILF', 'GRFE', 'DTCA',
'DTCB', 'DTCC', 'DTCD', 'DTCE', 'DTCF']
atv_cols = ['ATV_AMP[0]',
'ATV_AMP[1]',
'ATV_AMP[2]',
'ATV_AMP[3]',
'ATV_AMP[4]',
'ATV_AMP[5]',
'ATV_AMP[6]',
'ATV_AMP[7]',
'ATV_AMP[8]',
'ATV_AMP[9]',
'ATV_AMP[10]',
'ATV_AMP[11]',
'ATV_AMP[12]',
'ATV_AMP[13]',
'ATV_AMP[14]',
'ATV_AMP[15]',
'ATV_AMP[16]',
'ATV_AMP[17]',
'ATV_AMP[18]',
'ATV_AMP[19]',
'ATV_AMP[20]',
'ATV_AMP[21]',
'ATV_AMP[22]',
'ATV_AMP[23]',
'ATV_AMP[24]',
'ATV_AMP[25]',
'ATV_AMP[26]',
'ATV_AMP[27]',
'ATV_AMP[28]',
'ATV_AMP[29]',
'ATV_AMP[30]',
'ATV_AMP[31]',
'ATV_AMP[32]',
'ATV_AMP[33]',
'ATV_AMP[34]',
'ATV_AMP[35]',
'ATV_AMP[36]',
'ATV_AMP[37]',
'ATV_AMP[38]',
'ATV_AMP[39]',
'ATV_AMP[40]',
'ATV_AMP[41]',
'ATV_AMP[42]',
'ATV_AMP[43]',
'ATV_AMP[44]',
'ATV_AMP[45]',
'ATV_AMP[46]',
'ATV_AMP[47]',
'ATV_AMP[48]',
'ATV_AMP[49]',
'ATV_AMP[50]',
'ATV_AMP[51]',
'ATV_AMP[52]',
'ATV_AMP[53]',
'ATV_AMP[54]',
'ATV_AMP[55]',
'ATV_AMP[56]',
'ATV_AMP[57]',
'ATV_AMP[58]',
'ATV_AMP[59]',
'ATV_AMP[60]',
'ATV_AMP[61]',
'ATV_AMP[62]',
'ATV_AMP[63]',
'ATV_AMP[64]',
'ATV_AMP[65]',
'ATV_AMP[66]',
'ATV_AMP[67]',
'ATV_AMP[68]',
'ATV_AMP[69]',
'ATV_AMP[70]',
'ATV_AMP[71]',
'ATV_AMP[72]',
'ATV_AMP[73]',
'ATV_AMP[74]',
'ATV_AMP[75]',
'ATV_AMP[76]',
'ATV_AMP[77]',
'ATV_AMP[78]',
'ATV_AMP[79]',
'ATV_AMP[80]',
'ATV_AMP[81]',
'ATV_AMP[82]',
'ATV_AMP[83]',
'ATV_AMP[84]',
'ATV_AMP[85]',
'ATV_AMP[86]',
'ATV_AMP[87]',
'ATV_AMP[88]',
'ATV_AMP[89]',
'ATV_AMP[90]',
'ATV_AMP[91]',
'ATV_AMP[92]',
'ATV_AMP[93]',
'ATV_AMP[94]',
'ATV_AMP[95]',
'ATV_AMP[96]',
'ATV_AMP[97]',
'ATV_AMP[98]',
'ATV_AMP[99]',
'ATV_AMP[100]',
'ATV_AMP[101]',
'ATV_AMP[102]',
'ATV_AMP[103]',
'ATV_AMP[104]',
'ATV_AMP[105]',
'ATV_AMP[106]',
'ATV_AMP[107]',
'ATV_AMP[108]',
'ATV_AMP[109]',
'ATV_AMP[110]',
'ATV_AMP[111]',
'ATV_AMP[112]',
'ATV_AMP[113]',
'ATV_AMP[114]',
'ATV_AMP[115]',
'ATV_AMP[116]',
'ATV_AMP[117]',
'ATV_AMP[118]',
'ATV_AMP[119]',
'ATV_AMP[120]',
'ATV_AMP[121]',
'ATV_AMP[122]',
'ATV_AMP[123]',
'ATV_AMP[124]',
'ATV_AMP[125]',
'ATV_AMP[126]',
'ATV_AMP[127]',
'ATV_AMP[128]',
'ATV_AMP[129]',
'ATV_AMP[130]',
'ATV_AMP[131]',
'ATV_AMP[132]',
'ATV_AMP[133]',
'ATV_AMP[134]',
'ATV_AMP[135]',
'ATV_AMP[136]',
'ATV_AMP[137]',
'ATV_AMP[138]',
'ATV_AMP[139]',
'ATV_AMP[140]',
'ATV_AMP[141]',
'ATV_AMP[142]',
'ATV_AMP[143]']
def get_windows(boreid, centre_point, window_size, bin_width):
"""
Function to get data related to the windows around a point.
Note that the first run with a new bore id will need to load
the data from xls (SLOOOOW!) subsequent runs will use a cached
form of this data.
:param bore_id: String of the bore id.
:param centre_point: depth of the centre oint
:param window_size: window size in meters.
:param bin_width: bin width in meters
:return: will return a pandas data frame containing data.
"""
bore = geo.query('HOLEID == @boreid').sort('DEPTH')
if atv_dictionary.get(boreid, None) is None:
print('Need to read the acoustic scanner file')
atv = pd.read_excel('Acoustic Scanner/ATV_Data_{}.xlsx'.format(boreid))
print('done')
atv_dictionary[boreid] = atv
else:
atv = atv_dictionary[boreid]
bottom = centre_point - window_size/2.
top = centre_point + window_size/2.
bore = bore.query('DEPTH > @bottom and DEPTH <= @top').sort('DEPTH')
atv = atv.rename(columns={'MD': 'DEPTH'})
atv = atv.query('DEPTH > @bottom and DEPTH <= @top').sort('DEPTH')
def bin_number(depth):
return np.floor(depth/bin_width)*bin_width
geo_df = bore.set_index('DEPTH')[cols].groupby(bin_number, axis=0).mean()
atv_df = atv.set_index('DEPTH').groupby(bin_number).mean()
result = pd.concat([geo_df, atv_df], axis=1)
return result
def get_data(boreid, centre_point, window_size, bin_width):
result = get_windows(boreid, centre_point, window_size, bin_width)
result = result.reset_index().rename(columns={'index':'DEPTH'})
result['LABELS'] = result.DEPTH.apply(lambda x: get_label(boreid, x))
result['LABELS_ROCK_TYPE'] = result.DEPTH.apply(lambda x: get_label(boreid, x, rtype=True))
return result
``` |
{
"source": "johnny555/coffee_robot",
"score": 3
} |
#### File: coffee_robot/src/dad_joke.py
```python
import rospy
import os
from move_base_msgs.msg import MoveBaseActionResult
from numpy.random import choice
# Taken from icanhazdadjoke.com
jokes = [
"I'm tired of following my dreams. I'm just going to ask them where they are going and meet up with them later."
"Did you hear about the guy whose whole left side was cut off? He's all right now.",
"Why didn't the skeleton cross the road? Because he had no guts.",
"What did one nut say as he chased another nut? I'm a cashew!",
"Chances are if you' ve seen one shopping center, you've seen a mall.",
"I knew I shouldn't steal a mixer from work, but it was a whisk I was willing to take.",
"How come the stadium got hot after the game? Because all of the fans left.",
"Why was it called the dark ages? Because of all the knights. ",
"A steak pun is a rare medium well done.",
"Why did the tomato blush? Because it saw the salad dressing.",
"Did you hear the joke about the wandering nun? She was a roman catholic.",
"What creature is smarter than a talking parrot? A spelling bee.",
"I'll tell you what often gets over looked... garden fences.",
"Why did the kid cross the playground? To get to the other slide.",
"Why do birds fly south for the winter? Because it's too far to walk.",
"What is a centipedes's favorite Beatle song? I want to hold your hand, hand, hand, hand...",
"My first time using an elevator was an uplifting experience. The second time let me down.",
"To be Frank, I'd have to change my name.",
"Slept like a log last night ... woke up in the fireplace.",
"Why does a Moon-rock taste better than an Earth-rock? Because it's a little meteor."
]
class DadJoke():
def __init__(self):
self.fiducial_pose_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult,
self.speak_joke)
self.ctrl_c = False
self.rate = rospy.Rate(10) # 10hz
rospy.on_shutdown(self.shutdownhook)
def speak_joke(self, result):
rospy.loginfo('deploy dad joke')
os.system('espeak "Hello, here is your coffee ..."')
os.system('espeak " ' + choice(jokes) + ' " ')
os.system('espeak ' + '"Goodbye"')
def shutdownhook(self):
# works better than the rospy.is_shutdown()
self.ctrl_c = True
if __name__ == '__main__':
rospy.init_node('dad_joke_node', anonymous=True)
joker = DadJoke()
rospy.loginfo('Ready for jokes')
try:
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo('exceptien...')
pass
rospy.loginfo('shutting down')
```
#### File: coffee_robot/src/pose_init.py
```python
import rospy
from geometry_msgs.msg import PoseWithCovarianceStamped
class InitPose():
def __init__(self):
self.amcl_pose_init = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=1)
self.fiducial_pose = None
self.fiducial_pose_sub = rospy.Subscriber('/fiducial_pose', PoseWithCovarianceStamped,
self.get_pose)
self.ctrl_c = False
self.rate = rospy.Rate(10) # 10hz
rospy.on_shutdown(self.shutdownhook)
def get_pose(self, pose):
self.fiducial_pose = pose
def publish_once(self, time=0):
"""
This is because publishing in topics sometimes fails the first time you publish.
In continuous publishing systems, this is no big deal, but in systems that publish only
once, it IS very important.
"""
if self.fiducial_pose is not None:
while not self.ctrl_c:
connections = self.amcl_pose_init.get_num_connections()
if connections > 0:
self.amcl_pose_init.publish(self.fiducial_pose)
rospy.loginfo(self.fiducial_pose)
rospy.loginfo("Pose Published")
break
else:
self.rate.sleep()
def shutdownhook(self):
# works better than the rospy.is_shutdown()
self.ctrl_c = True
if __name__ == '__main__':
rospy.init_node('reset_amcl_pose_node', anonymous=True)
resetter = InitPose()
rospy.loginfo('setting up')
try:
rospy.sleep(1)
resetter.publish_once()
rospy.Timer(rospy.Duration(60), resetter.publish_once)
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo('exceptien...')
pass
rospy.loginfo('shutting down')
``` |
{
"source": "johnny555/Reinforcement-Learning",
"score": 3
} |
#### File: Reinforcement-Learning/Week3/utils.py
```python
import numpy as np
import gym
def test_game(env, agent, test_episodes):
reward_games = []
for _ in range(test_episodes):
obs = env.reset()
rewards = 0
while True:
action = agent.act(obs)
next_obs, reward, done, _ = env.step(action)
obs = next_obs
rewards += reward
if done:
reward_games.append(rewards)
obs = env.reset()
break
return np.mean(reward_games)
```
#### File: Reinforcement-Learning/Week5/PPO.py
```python
import numpy as np
import gym
from tensorboardX import SummaryWriter
import datetime
from collections import namedtuple
from collections import deque
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils.clip_grad import clip_grad_norm_
class A2C_policy(nn.Module):
'''
Policy neural network
'''
def __init__(self, input_shape, n_actions):
super(A2C_policy, self).__init__()
self.lp = nn.Sequential(
nn.Linear(input_shape[0], 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU())
self.mean_l = nn.Linear(32, n_actions[0])
self.mean_l.weight.data.mul_(0.1)
self.var_l = nn.Linear(32, n_actions[0])
self.var_l.weight.data.mul_(0.1)
self.logstd = nn.Parameter(torch.zeros(n_actions[0]))
def forward(self, x):
ot_n = self.lp(x.float())
return F.tanh(self.mean_l(ot_n))
class A2C_value(nn.Module):
'''
Actor neural network
'''
def __init__(self, input_shape):
super(A2C_value, self).__init__()
self.lp = nn.Sequential(
nn.Linear(input_shape[0], 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 1))
def forward(self, x):
return self.lp(x.float())
class Env:
'''
Environment class
'''
game_rew = 0
last_game_rew = 0
game_n = 0
last_games_rews = [-200]
n_iter = 0
def __init__(self, env_name, n_steps, gamma, gae_lambda, save_video=False):
super(Env, self).__init__()
# create the new environment
self.env = gym.make(env_name)
self.obs = self.env.reset()
self.n_steps = n_steps
self.action_n = self.env.action_space.shape
self.observation_n = self.env.observation_space.shape[0]
self.gamma = gamma
self.gae_lambda = gae_lambda
# CHANGED
def steps(self, agent_policy, agent_value):
'''
Execute the agent n_steps in the environment
'''
memories = []
for s in range(self.n_steps):
self.n_iter += 1
# get the agent policy
ag_mean = agent_policy(torch.tensor(self.obs))
# get an action following the policy distribution
logstd = agent_policy.logstd.data.cpu().numpy()
action = ag_mean.data.cpu().numpy() + np.exp(logstd) * np.random.normal(size=logstd.shape)
#action = np.random.normal(loc=ag_mean.data.cpu().numpy(), scale=torch.sqrt(ag_var).data.cpu().numpy())
action = np.clip(action, -1, 1)
state_value = float(agent_value(torch.tensor(self.obs)))
# Perform a step in the environment
new_obs, reward, done, _ = self.env.step(action)
# Update the memories with the last interaction
if done:
# change the reward to 0 in case the episode is end
memories.append(Memory(obs=self.obs, action=action, new_obs=new_obs, reward=0, done=done, value=state_value, adv=0))
else:
memories.append(Memory(obs=self.obs, action=action, new_obs=new_obs, reward=reward, done=done, value=state_value, adv=0))
self.game_rew += reward
self.obs = new_obs
if done:
print('#####',self.game_n, 'rew:', int(self.game_rew), int(np.mean(self.last_games_rews[-100:])), np.round(reward,2), self.n_iter)
# reset the environment
self.obs = self.env.reset()
self.last_game_rew = self.game_rew
self.game_rew = 0
self.game_n += 1
self.n_iter = 0
self.last_games_rews.append(self.last_game_rew)
# compute the discount reward of the memories and return it
return self.generalized_advantage_estimation(memories)
def generalized_advantage_estimation(self, memories):
'''
Calculate the advantage diuscounted reward as in the paper
'''
upd_memories = []
run_add = 0
for t in reversed(range(len(memories)-1)):
if memories[t].done:
run_add = memories[t].reward
else:
sigma = memories[t].reward + self.gamma * memories[t+1].value - memories[t].value
run_add = sigma + run_add * self.gamma * self.gae_lambda
## NB: the last memoy is missing
# Update the memories with the discounted reward
upd_memories.append(Memory(obs=memories[t].obs, action=memories[t].action, new_obs=memories[t].new_obs, reward=run_add + memories[t].value, done=memories[t].done, value=memories[t].value, adv=run_add))
return upd_memories[::-1]
def log_policy_prob(mean, std, actions):
# policy log probability
act_log_softmax = -((mean-actions)**2)/(2*torch.exp(std).clamp(min=1e-4)) - torch.log(torch.sqrt(2*math.pi*torch.exp(std)))
return act_log_softmax
def compute_log_policy_prob(memories, nn_policy, device):
'''
Run the policy on the observation in the memory and compute the policy log probability
'''
n_mean = nn_policy(torch.tensor(np.array([m.obs for m in memories], dtype=np.float32)).to(device))
n_mean = n_mean.type(torch.DoubleTensor)
logstd = agent_policy.logstd.type(torch.DoubleTensor)
actions = torch.DoubleTensor(np.array([m.action for m in memories])).to(device)
return log_policy_prob(n_mean, logstd, actions)
def clipped_PPO_loss(memories, nn_policy, nn_value, old_log_policy, adv, epsilon, writer, device):
'''
Clipped PPO loss as in the paperself.
It return the clipped policy loss and the value loss
'''
# state value
rewards = torch.tensor(np.array([m.reward for m in memories], dtype=np.float32)).to(device)
value = nn_value(torch.tensor(np.array([m.obs for m in memories], dtype=np.float32)).to(device))
# Value loss
vl_loss = F.mse_loss(value.squeeze(-1), rewards)
new_log_policy = compute_log_policy_prob(memories, nn_policy, device)
rt_theta = torch.exp(new_log_policy - old_log_policy.detach())
adv = adv.unsqueeze(-1) # add a dimension because rt_theta has shape: [batch_size, n_actions]
pg_loss = -torch.mean(torch.min(rt_theta*adv, torch.clamp(rt_theta, 1-epsilon, 1+epsilon)*adv))
return pg_loss, vl_loss
def test_game(tst_env, agent_policy, test_episodes):
'''
Execute test episodes on the test environment
'''
reward_games = []
steps_games = []
for _ in range(test_episodes):
obs = tst_env.reset()
rewards = 0
steps = 0
while True:
ag_mean = agent_policy(torch.tensor(obs))
action = np.clip(ag_mean.data.cpu().numpy().squeeze(), -1, 1)
next_obs, reward, done, _ = tst_env.step(action)
steps += 1
obs = next_obs
rewards += reward
if done:
reward_games.append(rewards)
steps_games.append(steps)
obs = tst_env.reset()
break
return np.mean(reward_games), np.mean(steps_games)
Memory = namedtuple('Memory', ['obs', 'action', 'new_obs', 'reward', 'done', 'value', 'adv'], verbose=False, rename=False)
# Hyperparameters
ENV_NAME = 'BipedalWalker-v2'
#ENV_NAME = 'BipedalWalkerHardcore-v2'
MAX_ITER = 500000
BATCH_SIZE = 64
PPO_EPOCHS = 7
device = 'cpu'
CLIP_GRADIENT = 0.2
CLIP_EPS = 0.2
TRAJECTORY_SIZE = 2049
GAE_LAMBDA = 0.95
GAMMA = 0.99
## Test Hyperparameters
test_episodes = 5
best_test_result = -1e5
save_video_test = True
N_ITER_TEST = 100
POLICY_LR = 0.0004
VALUE_LR = 0.001
now = datetime.datetime.now()
date_time = "{}_{}.{}.{}".format(now.day, now.hour, now.minute, now.second)
load_model = False
checkpoint_name = "checkpoints/..."
if __name__ == '__main__':
# Create the environment
env = Env(ENV_NAME, TRAJECTORY_SIZE, GAMMA, GAE_LAMBDA)
writer_name = 'PPO_'+ENV_NAME+'_'+date_time+'_'+str(POLICY_LR)+'_'+str(VALUE_LR)+'_'+str(TRAJECTORY_SIZE)+'_'+str(BATCH_SIZE)
writer = SummaryWriter(log_dir='content/runs/'+writer_name)
# create the test environment
test_env = gym.make(ENV_NAME)
if save_video_test:
test_env = gym.wrappers.Monitor(test_env, "VIDEOS/TEST_VIDEOS_"+writer_name, video_callable=lambda episode_id: episode_id%10==0)
# initialize the actor-critic NN
agent_policy = A2C_policy(test_env.observation_space.shape, test_env.action_space.shape).to(device)
agent_value = A2C_value(test_env.observation_space.shape).to(device)
# initialize policy and value optimizer
optimizer_policy = optim.Adam(agent_policy.parameters(), lr=POLICY_LR)
optimizer_value = optim.Adam(agent_value.parameters(), lr=VALUE_LR)
# Do you want to load a trained model?
if load_model:
print('> Loading checkpoint {}'.format(checkpoint_name))
checkpoint = torch.load(checkpoint_name)
agent_policy.load_state_dict(checkpoint['agent_policy'])
agent_value.load_state_dict(checkpoint['agent_value'])
optimizer_policy.load_state_dict(checkpoint['optimizer_policy'])
optimizer_value.load_state_dict(checkpoint['optimizer_value'])
experience = []
n_iter = 0
while n_iter < MAX_ITER:
n_iter += 1
batch = env.steps(agent_policy, agent_value)
# Compute the policy probability with the old policy network
old_log_policy = compute_log_policy_prob(batch, agent_policy, device)
# Gather the advantage from the memory..
batch_adv = np.array([m.adv for m in batch])
# .. and normalize it to stabilize network
batch_adv = (batch_adv - np.mean(batch_adv)) / (np.std(batch_adv) + 1e-7)
batch_adv = torch.tensor(batch_adv).to(device)
# variables to accumulate losses
pol_loss_acc = []
val_loss_acc = []
# execute PPO_EPOCHS epochs
for s in range(PPO_EPOCHS):
# compute the loss and optimize over mini batches of size BATCH_SIZE
for mb in range(0, len(batch), BATCH_SIZE):
mini_batch = batch[mb:mb+BATCH_SIZE]
minib_old_log_policy = old_log_policy[mb:mb+BATCH_SIZE]
minib_adv = batch_adv[mb:mb+BATCH_SIZE]
# Compute the PPO clipped loss and the value loss
pol_loss, val_loss = clipped_PPO_loss(mini_batch, agent_policy, agent_value, minib_old_log_policy, minib_adv, CLIP_EPS, writer, device)
# optimize the policy network
optimizer_policy.zero_grad()
pol_loss.backward()
optimizer_policy.step()
# optimize the value network
optimizer_value.zero_grad()
val_loss.backward()
optimizer_value.step()
pol_loss_acc.append(float(pol_loss))
val_loss_acc.append(float(val_loss))
# add scalars to the tensorboard
writer.add_scalar('pg_loss', np.mean(pol_loss_acc), n_iter)
writer.add_scalar('vl_loss', np.mean(val_loss_acc), n_iter)
writer.add_scalar('rew', env.last_game_rew, n_iter)
writer.add_scalar('10rew', np.mean(env.last_games_rews[-100:]), n_iter)
# Test the agent
if n_iter % N_ITER_TEST == 0:
test_rews, test_stps = test_game(test_env, agent_policy, test_episodes)
print(' > Testing..', n_iter,test_rews, test_stps)
# if it achieve the best results so far, save the models
if test_rews > best_test_result:
torch.save({
'agent_policy': agent_policy.state_dict(),
'agent_value': agent_value.state_dict(),
'optimizer_policy': optimizer_policy.state_dict(),
'optimizer_value': optimizer_value.state_dict(),
'test_reward': test_rews
}, 'checkpoints/checkpoint_'+writer_name+'.pth.tar')
best_test_result = test_rews
print('=> Best test!! Reward:{:.2f} Steps:{}'.format(test_rews, test_stps))
writer.add_scalar('test_rew', test_rews, n_iter)
writer.close()
``` |
{
"source": "johnny880624/stancode-project",
"score": 2
} |
#### File: stanCode_projects/anime/my_drawing.py
```python
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.graphics.gwindow import GWindow
def main():
"""
Facebook logo! xd~
"""
window = GWindow()
big_rect = GRect(300, 300)
big_rect.color = 'dark blue'
big_rect.filled = True
big_rect.fill_color = 'navy'
window.add(big_rect, 100, 100)
label = GLabel('F', 180, 449)
label.font = '-250'
label.color = 'snow'
window.add(label)
label2 = GLabel('facebook', 335, 395)
label2.color = 'ivory'
window.add(label2)
if __name__ == '__main__':
main()
``` |
{
"source": "johnnyapol/heart2spotify",
"score": 3
} |
#### File: johnnyapol/heart2spotify/cache_manager.py
```python
class CacheManager:
def __init__(self):
pass
def update(self, station, new_songs):
raise NotImplementedError("Error: Invocation on abstract CacheManager::update")
from pickle import load, dump
class FlatfileCacheManager(CacheManager):
def __init__(self):
try:
with open(".songcache", "rb") as f:
self.data = load(f)
except:
self.data = dict()
def update(self, station, new_songs):
if station not in self.data:
self.data[station] = set()
diff = new_songs - self.data[station]
self.data[station] |= diff
if len(diff) > 0:
with open(".songcache", "wb") as f:
dump(self.data, f)
return diff
``` |
{
"source": "johnnyapol/invasion-tracker-twitter-bot",
"score": 3
} |
#### File: johnnyapol/invasion-tracker-twitter-bot/twitter.py
```python
import tweepy
from tweepy.error import TweepError
class Twitter:
def __init__(self, _conKey, _conSecret, _accessToken, _secretToken):
print ("Authenticating with Twitter!")
creds = tweepy.OAuthHandler(_conKey, _conSecret)
creds.set_access_token(_accessToken, _secretToken)
self.tapi = tweepy.API(creds)
def postTweet(self, tweet):
if len(tweet) > 140:
raise ValueError("Tweet length must not be greater than 140 characters!")
print ("Posting tweet: " + tweet)
try:
self.tapi.update_status(tweet)
print(tweet)
except TweepError as error:
print ("Failed to post tweet, a TweepError has occurred.")
print ( error )
``` |
{
"source": "johnnyapol/saurh",
"score": 3
} |
#### File: saurh/package/package_manager.py
```python
import subprocess
"""End Imports"""
class Package:
def __init__(self, name, description, origin, version):
self._name = name
self._description = description
self._origin = origin
self._version = version
def get_name(self):
return self._name
def get_description(self):
return self._description
def get_origin(self):
return self._origin
def get_version(self):
return self._version
def __str__(self):
return f"{self.get_origin()}/{self.get_name()} (v{self.get_version()}) - {self.get_description()}"
def __repr__(self):
return self.__str__()
class PackageManager:
def __init__(self):
pass
def search(self, package_name):
pass
def install(self, package_name):
pass
def check_for_updates(self):
pass
def invoke_cmd(args, want_output=False):
return subprocess.run(
args, stdout=(subprocess.PIPE if want_output else None), encoding="utf-8"
).stdout
``` |
{
"source": "JohnnyB0Y/code-playground",
"score": 4
} |
#### File: algorithms/problems/double_pointer.py
```python
def domain():
arr1 = [1, 3, 5, 2, 1, 0]
arr2 = [2, 1, 0, 9, 11, 12, 4]
arr1.sort()
arr2.sort()
print(arr1, arr2)
# 测试合并
arr = mergeOrderlyArray(arr1, arr2)
print(arr)
# 测试查找第k位
tmparr = []
for i in range(0, len(arr)):
tmparr.append(findInOrderlyArray(arr1, arr2, i))
print(tmparr)
# 两个有序数组 合并成 有序数组
def mergeOrderlyArray(arr1, arr2):
if len(arr1) <= 0:
return arr2
if len(arr2) <= 0:
return arr1
idx1 = idx2 = 0
arr = []
while idx1 < len(arr1):
# arr2 的 idx2 走完,把 arr1 后续的元素直接放进arr 并退出循环
if idx2 >= len(arr2):
arr.append(arr1[idx1])
idx1 += 1
else:
while idx2 < len(arr2):
# arr1 的 idx1 走完,把 arr2 后续的元素直接放进arr 并退出循环
if idx1 >= len(arr1):
arr.append(arr2[idx2])
idx2 += 1
else:
if arr1[idx1] > arr2[idx2]:
arr.append(arr2[idx2])
idx2 += 1
else:
arr.append(arr1[idx1])
idx1 += 1
return arr
# 两个有序数组,中找出排 第k位 的数
def findInOrderlyArray(arr1, arr2, k):
if k >= len(arr1) + len(arr2):
return None
if len(arr1) <= 0:
return arr2[k]
if len(arr2) <= 0:
return arr1[k]
idx1 = idx2 = 0
loop = -1
while idx1 < len(arr1):
# arr2 的 idx2 走完,在 arr1 中找
if idx2 >= len(arr2):
loop += 1 # error ?
if loop == k:
return arr1[idx1]
idx1 += 1
else:
while idx2 < len(arr2):
loop += 1
# arr1 的 idx1 走完,在 arr2 中找
if idx1 >= len(arr1):
if loop == k:
return arr2[idx2]
idx2 += 1
else:
if arr1[idx1] > arr2[idx2]:
if loop == k:
return arr2[idx2]
idx2 += 1
else:
if loop == k:
return arr1[idx1]
idx1 += 1
# 测试
domain()
``` |
{
"source": "JohnnyB0Y/GeneratingFile",
"score": 3
} |
#### File: GeneratingFile/helper/file_operation.py
```python
import os
import codecs
import json
__author__ = 'JohnnyB0Y'
class FileOperation:
def __init__(self):
# py文件的当前路径
self.current_path = os.getcwd()
# 生成保存文件的文件夹
self.save_path = os.getcwd() + '/generating_object_files'
# 模板文件的文件夹
self.file_templates_path = 'file_templates'
# 生成文件和内容
def write_to_file(self, file_name, content, suffixes):
# 文件夹不存在就创建
dir_path = self.save_path
if not os.path.exists(dir_path):
os.makedirs(dir_path)
file_path = dir_path + '/' + file_name + '.' + suffixes
file = codecs.open(file_path, 'w', encoding='utf-8')
file.write(content)
file.close()
# 输出 成功与否 、生成文件的路径
print('-------------- ok --------------')
# 读取 json 配置文件,并转换成 字典
def dict_from_json_file(self, file_name, file_path):
if '.json' not in file_name:
file_name += '.json'
if not file_path:
file_path = self.current_path + '/' + self.file_templates_path + '/' + file_name
# print(file_path)
file = codecs.open(file_path, 'r+', encoding='utf-8')
# 配置文件的 Dict
info = json.loads(file.read())
file.close()
return info
# 读取文本字符串
def text_from_txt_file(self, file_name, file_path):
if '.txt' not in file_name:
file_name += '.txt'
if not file_path:
file_path = self.current_path + '/' + self.file_templates_path + '/' + file_name
# print(file_path)
file = codecs.open(file_path, 'r+', encoding='utf-8')
# 获取文本字符串
text = file.read()
file.close()
return text
``` |
{
"source": "JohnnyBannanis/admin_cementerio",
"score": 3
} |
#### File: JohnnyBannanis/admin_cementerio/main.py
```python
from flask import Flask, Response, request, render_template, redirect, url_for, flash
from bson.objectid import ObjectId
import pymongo
import json
app = Flask(__name__)
###################################################
#MONGO DATABASE CONNECTION
try:
mongo = pymongo.MongoClient(
host="localhost",
port=27017,
serverSelectionTimeoutMS = 1000
)
db = mongo.parroquia
mongo.server_info()
except:
print("ERROR - cannot connect to DB")
###################################################
###################################################
#SESSION SETINGS
app.secret_key = "mysecretkey"
###################################################
@app.route("/")
def index():
return render_template('landing.html')
@app.route("/add")
def add():
return render_template('add.html')
@app.route("/search")
def search():
return render_template('search.html')
@app.route("/difuntos")
def difuntos():
data = list(db.difuntos.find())
for difunto in data:
difunto["_id"] = str(difunto["_id"])
return render_template('index.html', difuntos = data)
@app.route("/difuntos", methods=["POST"])
def crear_difunto():
try:
difunto = {
"rut":request.form["rut"],
"nombre":request.form["nombre"],
"f_defuncion":request.form["f_defuncion"],
"f_nacimiento":request.form["f_nacimiento"],
"sepultura":request.form["sepultura"],
}
db.difuntos.insert_one(difunto)
flash("Registro añadido con exito")
return redirect( url_for("add") )
except Exception as ex:
print(ex)
@app.route("/delete/<id>")
def borrar_difunto(id):
try:
dbResponse = db.difuntos.delete_one(
{"_id":ObjectId(id)}
)
if(dbResponse.deleted_count == 1):
return redirect( url_for("difuntos") )
else:
return redirect( url_for("difuntos") )
except Exception as ex:
print(ex)
@app.route("/edit/<id>")
def edit_difunto(id):
try:
data = list(db.difuntos.find({"_id":ObjectId(id)}) )
print(data)
return render_template('edit.html', difunto = data[0])
except Exception as ex:
print(ex)
@app.route("/update/<id>", methods=["POST"])
def update_difunto(id):
try:
dbResponse = db.difuntos.update_one(
{"_id":ObjectId(id)},
{"$set": {
"rut":request.form["rut"],
"nombre":request.form["nombre"],
"f_defuncion":request.form["f_defuncion"],
"f_nacimiento":request.form["f_nacimiento"],
"sepultura":request.form["sepultura"]}}
)
if(dbResponse.modified_count == 1):
return redirect( url_for("difuntos") )
else:
return redirect( url_for("difuntos") )
except Exception as ex:
print(ex)
@app.route("/search/rut" ,methods=["POST"])
def search_rut():
param = request.form["rut_search"]
data = list(db.difuntos.find({"rut": param}) )
flash("ok")
return render_template('search.html', resultados = data)
@app.route("/search/name" ,methods=["POST"])
def search_name():
param = request.form["name_search"]
data = list(db.difuntos.find({"nombre": param}) )
flash("ok")
return render_template('search.html', resultados = data)
### # ### # ### # ### # ### # ### # ### # ### # ###
### # API # ###
### # ### # ### # ### # ### # ### # ### # ### # ###
@app.route("/api", methods=["GET"])
def get_difuntos():
try:
data = list(db.difuntos.find())
for difunto in data:
difunto["_id"] = str(difunto["_id"])
return Response(
response = json.dumps(data),
status = 200,
mimetype = "application/json"
)
except Exception as ex:
print(ex)
return Response(
response = json.dumps({"message":"Cannot read difuntos"}),
status = 500,
mimetype = "application/json"
)
@app.route("/api", methods=["POST"])
def create_difunto():
try:
difunto = {
"rut":request.form["rut"],
"nombre":request.form["nombre"],
"f_defuncion":request.form["f_defuncion"],
"f_nacimiento":request.form["f_nacimiento"],
"sepultura":request.form["sepultura"],
}
dbResponse = db.difuntos.insert_one(difunto)
return Response(
response = json.dumps(
{"message":"difunto creado",
"id":f"{dbResponse.inserted_id}"}
),
status = 200,
mimetype = "application/json"
)
except Exception as ex:
print(ex)
@app.route("/api/<id>", methods=["PATCH"])
def update(id):
try:
dbResponse = db.difuntos.update_one(
{"_id":ObjectId(id)},
{"$set": {
"rut":request.form["rut"],
"nombre":request.form["nombre"],
"f_defuncion":request.form["f_defuncion"],
"f_nacimiento":request.form["f_nacimiento"],
"sepultura":request.form["sepultura"]}}
)
if(dbResponse.modified_count == 1):
return Response(
response = json.dumps({"message":"User updated"}),
status = 200,
mimetype = "application/json"
)
else:
return Response(
response = json.dumps({"Nothing to update"}),
status = 200,
mimetype = "application/json"
)
except Exception as ex:
print(ex)
return Response(
response = json.dumps({"message":"Cannot update user"}),
status = 500,
mimetype = "application/json"
)
@app.route("/api/<id>", methods=["DELETE"])
def delete_difunto(id):
try:
dbResponse = db.difuntos.delete_one(
{"_id":ObjectId(id)}
)
if(dbResponse.deleted_count == 1):
return Response(
response = json.dumps(
{"message":"difunto deleted", "id" :f"{id}"}
),
status = 200,
mimetype = "application/json"
)
else:
return Response(
response = json.dumps(
{"message":"difunto not found"}
),
status = 200,
mimetype = "application/json"
)
except Exception as ex:
print(ex)
### # ### # ### # ### # ### # ### # ### # ### # ###
if __name__ == "__main__":
app.run(port=80, debug=True)
``` |
{
"source": "JohnnyBannanis/AI_puzzle8",
"score": 3
} |
#### File: AI_puzzle8/code/astar.py
```python
from heapq import heappush,heappop
import numpy as np
import time
from puzzle import Puzzle
class Astar(object):
def __init__(self,_heuristic,_start,_goal):
self.heuristic = _heuristic
self.start = _start
self.goal = _goal
self.parent = dict()
self.path = []
def start_node(self):
return self.start_node
def is_goal(self,node):
return node == self.goal
def get_children(self,node):
child_list = set()
dim = node.state.shape[0]
i,j = map(np.int,np.where(node.state == 0))
if (j > 0):
child = node.state.copy()
child[i,j] = node.state[i,j-1]
child[i,j-1] = 0
p = Puzzle(child)
child_list.add(p)
if (j < dim-1):
child = node.state.copy()
child[i,j] = node.state[i,j+1]
child[i,j+1] = 0
p = Puzzle(child)
child_list.add(p)
if (i > 0):
child = node.state.copy()
child[i,j] = node.state[i-1,j]
child[i-1,j] = 0
p = Puzzle(child)
child_list.add(p)
if (i < dim-1):
child = node.state.copy()
child[i,j] = node.state[i+1,j]
child[i+1,j] = 0
p = Puzzle(child)
child_list.add(p)
return child_list
def heuristic_sel(self,nodo,costoAcumulado):
if (self.heuristic == 'hamming'):
return (nodo).hamming(self.goal) + costoAcumulado
elif (self.heuristic == 'manhattan'):
return (nodo).manhattan(self.goal) + costoAcumulado
def trajectory(self):
if(len(self.parent) == 0):
return []
path = [self.goal]
nodo = self.goal
while self.parent[nodo] != self.start:
nodo = self.parent[nodo]
path.append(nodo)
path = path[::-1]
self.path = path
return path
def print_trajectory(self):
if(len(self.parent) == 0):
print('No existe ruta')
return None
for i in self.path:
print(i.pretty_print())
def search(self):
origen = self.start
historia = [(0,0)]
agenda = []
expandidos = set()
costoAcumulado = 0
contador = 0
if (self.is_goal(origen)):
return(historia)
self.parent[origen] = origen
heu = self.heuristic_sel(origen,costoAcumulado)
heappush(agenda,(heu,contador,costoAcumulado,origen))
while (len(agenda) > 0):
historia.append((len(agenda),len(expandidos)))
heu,c,ca,nodo = heappop(agenda)
expandidos.add(nodo)
if (self.is_goal(nodo)):
#print(historia)
return(historia)
for sucesor in self.get_children(nodo):
if(sucesor not in expandidos):
heu = self.heuristic_sel(sucesor,ca)
nc = []
for i,j,k,l in agenda:
if(i == heu):
nc.append(j)
if(len(nc) == 0):
nc = 0
else:
nc = max(nc) + 1
heappush(agenda,(heu,nc,ca+1,sucesor))
self.parent[sucesor] = nodo
return None
```
#### File: AI_puzzle8/code/idastar.py
```python
from heapq import heappush,heappop
from collections import deque
import numpy as np
import time
import math
from puzzle import Puzzle
class IDAstar(object):
def __init__(self,_heuristic,_start,_goal):
self.heuristic = _heuristic
self.start = _start
self.goal = _goal
self.path = []
def start_node(self):
return self.start_node
def is_goal(self,node):
return node == self.goal
def get_children(self,node):
child_list = set()
dim = node.state.shape[0]
i,j = map(np.int,np.where(node.state == 0))
if (j > 0):
child = node.state.copy()
child[i,j] = node.state[i,j-1]
child[i,j-1] = 0
p = Puzzle(child)
child_list.add(p)
if (j < dim-1):
child = node.state.copy()
child[i,j] = node.state[i,j+1]
child[i,j+1] = 0
p = Puzzle(child)
child_list.add(p)
if (i > 0):
child = node.state.copy()
child[i,j] = node.state[i-1,j]
child[i-1,j] = 0
p = Puzzle(child)
child_list.add(p)
if (i < dim-1):
child = node.state.copy()
child[i,j] = node.state[i+1,j]
child[i+1,j] = 0
p = Puzzle(child)
child_list.add(p)
return child_list
def heuristic_sel(self,nodo,costoAcumulado):
if (self.heuristic=='hamming'):
return (nodo).hamming(self.goal) + costoAcumulado
elif (self.heuristic=='manhattan'):
return (nodo).manhattan(self.goal) + costoAcumulado
def trajectory(self):
ruta = []
for i,j in self.path:
if (i,j) != (self.start,0):
ruta.append(i)
return ruta
def print_trajectory(self):
for i,j in self.path:
if (i,j) != (self.start,0):
print(i.pretty_print())
def search(self):
origen = self.start
historia = [(0,0)]
if (self.is_goal(origen)):
self.path = [(origen,0)]
return(historia)
c = self.heuristic_sel(origen,0)
while True:
minimo = math.inf
agenda = deque()
agenda.append((origen,0))
x = set()
while len(agenda)>0:
historia.append(((len(agenda),len(x))))
n,costo = agenda[-1]
if(self.is_goal(n)):
self.path = agenda
return(historia)
if n not in x:
x.add(n)
sucesores = self.get_children(n)
sucesoresAux = []
for i in sucesores:
heu = self.heuristic_sel(i,costo)
nc = []
for j,k,l in sucesoresAux:
if(j == heu):
nc.append(k)
if(len(nc)==0):
nc = 0
else:
nc = max(nc)+1
heappush(sucesoresAux,(heu,nc,i))
sucesores = sucesoresAux
for s1,s2,s in sucesores:
if s1 <= c:
if s not in x:
agenda.append((s,costo+1))
else:
if(s1 < minimo):
minimo = s1
else:
agenda.pop()
x.remove(n)
c = minimo
if c == math.inf:
self.path = agenda
return(historia)
``` |
{
"source": "johnnybarrels/unicode",
"score": 3
} |
#### File: app-dev/app/controllers.py
```python
from app import app, db
from flask import request
from app.models import User, Course, Test, Question, Result, enrolments, Submission
from app.forms import LoginForm, RegistrationForm, NewTestForm, NewCourseForm, RenameTestForm, QuestionForm, QuestionSubmissionForm, AddStudentToCourseForm, MarkTestForm, FeedbackForm
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
class UserController():
def login():
form = LoginForm()
if form.validate_on_submit():
# Check that user is in db and that password is correct
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user)
if user.is_admin:
return redirect(url_for('admin_portal'))
else:
return redirect(url_for('student_portal'))
# for GET request (browser loading page)
return render_template('index.html', form=form)
def show_portal():
if current_user.is_admin:
course_form = NewCourseForm()
return render_template('admin.html', course_form=course_form)
else:
return render_template('student.html')
def course_view(course_id):
course = Course.query.filter_by(id=course_id).first()
tests = course.tests
form = NewTestForm()
rename_test_form = RenameTestForm()
course_users = course.get_users()
new_test_form = NewTestForm()
course_form = NewCourseForm()
add_student_form = AddStudentToCourseForm()
if current_user.is_admin:
return render_template('admin-course.html', add_student_form=add_student_form,
rename_test_form=rename_test_form, course_users=course_users,
course_form=course_form, new_test_form=new_test_form, course=course,
tests=tests)
else:
live_tests = [test for test in tests if test.is_live]
return render_template('student-course.html', course=course, tests=live_tests)
def logout():
logout_user()
return redirect(url_for('login'))
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(first_name=form.first_name.data,
last_name=form.last_name.data, email=form.email.data,
is_admin=0)
# If submitted email is already in db
if User.query.filter_by(email=user.email).first() is not None:
flash('Email is already registered!')
flash('Please log in below')
return redirect(url_for('login'))
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash("You have registered")
flash("Please log in below")
return redirect(url_for('login'))
return render_template('register.html', title="Register", form=form)
class CourseController():
def aggregate_view():
courses = Course.query.all()
return render_template('general-dashboard.html', courses=courses)
def create_test(course_id):
form = NewTestForm()
course = Course.query.filter_by(id=course_id).first()
tests = course.tests
if form.validate_on_submit():
test = Test()
test.name = form.test_name.data
test.course_id = course.id
db.session.add(test)
db.session.commit()
return redirect(url_for('course_view', course_id=course.id))
return redirect(url_for('course_view', course_id=course.id))
def create_course():
course_form = NewCourseForm()
if course_form.validate_on_submit():
course = Course()
course.name = course_form.course_name.data
course.course_code = course_form.course_code.data
db.session.add(course)
current_user.courses.append(course)
db.session.commit()
return redirect(url_for('admin_portal'))
return redirect(url_for('admin_portal', course_form=course_form))
def add_student(course_id):
add_student_form = AddStudentToCourseForm()
course = Course.query.filter_by(id=course_id).first()
if add_student_form.validate_on_submit():
student_email = add_student_form.student_email.data
student = User.query.filter_by(email=student_email).first()
if student:
student.courses.append(course)
db.session.commit()
return redirect(url_for('course_view', course_id=course_id))
return redirect(url_for('course_view', course_id=course_id))
def remove_student(course_id, student_id):
course = Course.query.filter_by(id=course_id).first()
student = User.query.filter_by(id=student_id).first()
if student:
student.courses.remove(course)
db.session.commit()
return redirect(url_for('course_view', course_id=course_id))
return redirect(url_for('course_view', course_id=course_id))
class TestController():
def create_test():
form = NewTestForm()
pass
def delete_test(course_id, test_id):
test = Test.query.filter_by(id=test_id).first()
db.session.delete(test)
db.session.commit()
return redirect(url_for('course_view', course_id=course_id))
def show_test(course_id, test_id):
course = Course.query.filter_by(id=course_id).first()
test = Test.query.filter_by(id=test_id).first()
users = course.get_users()
max_mark = test.get_max_mark()
min_mark = test.get_min_mark()
test_avg = test.get_average_mark()
if current_user.is_admin:
num_results = test.get_num_results()
num_enrolled_students = course.get_num_enrolments()
aggregates = [num_results, num_enrolled_students,
test_avg, max_mark, min_mark]
submitted_users = test.get_submitted_users()
rename_test_form = RenameTestForm()
course_form = NewCourseForm()
return render_template('admin-test-view.html', course=course,
course_form=course_form, test=test,
rename_test_form=rename_test_form,
submitted_users=submitted_users, aggregates=aggregates) # results=results
else:
student_result = test.get_student_result(current_user.id)
aggregates = []
if student_result:
student_perc = round(student_result.score / test.total_marks() * 100, 2)
aggregates = [student_perc,
test_avg, max_mark, min_mark]
return render_template('student-test-view.html',
aggregates=aggregates,
test=test, course=course,
student_result=student_result)
def edit_test_view(course_id, test_id):
course = Course.query.filter_by(id=course_id).first()
test = Test.query.filter_by(id=test_id).first()
questions = test.questions
form = QuestionForm()
course_form = NewCourseForm()
return render_template('admin-test-edit.html', course=course,
test=test, questions=questions,
form=form, course_form=course_form)
def rename_test(course_id, test_id):
test = Test.query.filter_by(id=test_id).first()
form = RenameTestForm()
if form.validate_on_submit():
test.name = form.new_test_name.data
db.session.commit()
redirect(url_for('test_view', course_id=course_id, test_id=test_id))
return redirect(url_for('test_view', course_id=course_id, test_id=test_id))
def toggle_live(course_id, test_id):
test = Test.query.filter_by(id=test_id).first()
if test.is_live:
test.is_live = False
else:
test.is_live = True
db.session.commit()
return redirect(url_for('course_view', course_id=course_id))
def edit_question(course_id, test_id, question_id):
course = Course.query.filter_by(id=course_id).first()
test = Test.query.filter_by(id=test_id).first()
q = Question.query.filter_by(id=question_id).first()
form = QuestionForm()
if form.delete.data:
db.session.delete(q)
db.session.commit()
return redirect(url_for('edit_test_view', course_id=course_id,
test_id=test_id))
if form.validate_on_submit():
if form.save.data:
q.test_id = test_id
q.question_type = int(form.question_type.data)
q.question_string = repr(form.description.data.encode())[2:-1]
q.code_string = repr(form.code_string.data.encode())[2:-1]
q.mcq_1 = form.mcq_1.data
q.mcq_2 = form.mcq_2.data
q.mcq_3 = form.mcq_3.data
q.mcq_4 = form.mcq_4.data
q.mcq_answer = form.mcq_solution.data
q.answer = form.solution.data
q.mark_alloc = form.mark_alloc.data
db.session.commit()
return redirect(url_for('edit_test_view', course_id=course_id,
test_id=test_id))
def new_question(course_id, test_id):
form = QuestionForm()
if form.validate_on_submit():
q = Question()
q.test_id = test_id
q.question_type = int(form.question_type.data)
q.question_string = repr(form.description.data.encode())[2:-1]
q.code_string = repr(form.code_string.data.encode())[2:-1]
q.mcq_1 = form.mcq_1.data
q.mcq_2 = form.mcq_2.data
q.mcq_3 = form.mcq_3.data
q.mcq_4 = form.mcq_4.data
q.mcq_answer = form.mcq_solution.data
q.answer = form.solution.data
q.mark_alloc = form.mark_alloc.data
db.session.add(q)
db.session.commit()
return redirect(url_for('edit_test_view', course_id=course_id, test_id=test_id))
def take_test(course_id, test_id):
course = Course.query.filter_by(id=course_id).first()
test = Test.query.filter_by(id=test_id).first()
questions = test.questions
form = QuestionSubmissionForm()
return render_template('take-test.html', course=course, test=test, questions=questions, form=form)
def new_submission(course_id, test_id, question_id):
q = Question.query.filter_by(id=question_id).first()
sub = q.get_user_submission(current_user.id)
if not sub: # if no existing submission exists
sub = Submission()
sub.user_id = current_user.id
sub.test_id = test_id
sub.question_id = question_id
form = QuestionSubmissionForm()
if form.validate_on_submit():
if q.question_type == 1:
sub.output_sub = form.output_answer.data
elif q.question_type == 2:
sub.mcq_sub = form.mcq_answer.data
elif q.question_type == 3:
sub.code_sub = repr(form.code_answer.data)[1:-1]
db.session.add(sub)
db.session.commit()
return redirect(url_for('take_test', course_id=course_id, test_id=test_id))
def submit_test(course_id, test_id):
test = Test.query.filter_by(id=test_id).first()
user_id = current_user.id
questions = test.questions # Question.query.filter_by(test_id=test_id)
submissions = test.get_user_submissions(user_id)
total = 0
for submission in submissions:
submission.auto_mark()
total += submission.score
result = Result(user_id=user_id, test_id=test_id, score=total)
db.session.add(result)
db.session.commit()
if not any([q.question_type == 3 for q in questions]):
result.needs_marking = False
return redirect(url_for('course_view', course_id=course_id))
def mark_test(course_id, test_id, student_id):
course = Course.query.filter_by(id=course_id).first()
test = Test.query.filter_by(id=test_id).first()
questions = test.questions
student = User.query.filter_by(id=student_id).first()
submissions = test.get_user_submissions(student_id)
course_form = NewCourseForm()
feedback_form = FeedbackForm()
form = MarkTestForm()
return render_template('mark-test.html', course=course,
course_form=course_form, student=student,
test=test, questions=questions,
submissions=submissions, form=form,
feedback_form=feedback_form)
def mark_submission(course_id, test_id, student_id, submission_id):
submission = Submission.query.filter_by(id=submission_id).first()
result = Result.query.filter_by(
test_id=test_id, user_id=student_id).first()
form = MarkTestForm()
form = MarkTestForm()
if form.validate_on_submit():
score_diff = form.mark.data - submission.score
submission.score = form.mark.data
submission.needs_marking = False
# incrementally storing scores in case a teacher
# doesn't finish marking a student's test
result.score += score_diff
db.session.commit()
return redirect(url_for('mark_test', course_id=course_id, test_id=test_id,
student_id=student_id))
def submit_and_feedback(course_id, test_id, student_id):
test = Test.query.filter_by(id=test_id).first()
result = Result.query.filter_by(
test_id=test_id, user_id=student_id).first()
submissions = test.get_user_submissions(student_id)
form = FeedbackForm()
if form.validate_on_submit():
result.feedback = form.feedback.data
result.score = sum((sub.score for sub in submissions))
if not any(sub.needs_marking for sub in submissions):
result.needs_marking = False
db.session.commit()
return redirect(url_for('test_view', course_id=course_id, test_id=test_id))
```
#### File: app-dev/app/models.py
```python
from app import db, login
from flask_login import UserMixin, LoginManager
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import current_user
from statistics import stdev
@login.user_loader
def load_user(id):
return User.query.get(int(id))
enrolments = db.Table('enrolments',
db.Column('user_id', db.Integer, db.ForeignKey(
'users.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey(
'courses.id'), primary_key=True)
)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, nullable=False)
email = db.Column(db.String(64), index=True, unique=True)
password_hash = db.Column(db.String(128))
first_name = db.Column(db.String(32))
last_name = db.Column(db.String(32))
is_admin = db.Column(db.Boolean, nullable=False, default=False)
courses = db.relationship('Course', secondary=enrolments, lazy='subquery',
backref=db.backref('users', lazy=True))
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_test_submissions(self, test_id):
return Submission.query.join(User).filter(
(Submission.user_id == self.id) &
(Submission.test_id == test_id)
).all()
def get_result(self, test_id):
return Result.query.filter_by(user_id=self.id, test_id=test_id).first()
def has_submitted(self, test_id):
return self.get_result(test_id) is not None
def full_name(self):
return f'{self.first_name} {self.last_name}'
def __repr__(self):
return f'<User: {self.email}>'
class Course(db.Model):
__tablename__ = 'courses'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(64))
course_code = db.Column(db.String(32))
tests = db.relationship('Test', backref='course', lazy=True)
def get_num_enrolments(self):
students = []
for user in self.get_users():
if not user.is_admin:
students.append(user)
return len(students)
def get_users(self):
return User.query.join(enrolments).join(Course).filter(
enrolments.c.course_id == self.id).all()
def __repr__(self):
return f'<Course: {self.name}>'
class Test(db.Model):
__tablename__ = 'tests'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(64), nullable=False)
is_live = db.Column(db.Boolean, nullable=False, default=False)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
questions = db.relationship('Question', backref='test', lazy=True)
def total_marks(self):
total = sum((question.mark_alloc for question in self.questions))
if total:
return total
else:
return 1
def get_std_dev(self):
all_res = self.get_test_results()
marks = []
for res in all_res:
marks.append(res.score)
if len(marks) > 1:
return round(stdev(marks), 2)
else:
return 0
def get_average_mark(self):
all_res = self.get_test_results()
total = 0
for res in all_res:
total += res.score
print(total)
print(self.total_marks())
return round((total / max(len(all_res), 1)) / self.total_marks() * 100, 2)
def get_max_mark(self):
all_res = self.get_test_results()
all_res.sort(key=lambda r: r.score, reverse=True)
if all_res:
return round((all_res[0].score) / self.total_marks() * 100, 2)
else:
return 0
def get_min_mark(self):
all_res = self.get_test_results()
all_res.sort(key=lambda r: r.score)
if all_res:
return round(all_res[0].score / self.total_marks() * 100, 2)
else:
return 0
def get_num_results(self):
return len(self.get_test_results())
def get_submitted_users(self):
return User.query.join(Submission).join(Test).filter(
Submission.test_id == self.id).all()
def get_user_submissions(self, user_id):
return Submission.query.join(Test).filter(
(Submission.test_id == self.id)
& (Submission.user_id == user_id)).all()
def get_all_submissions(self):
return Submission.query.join(Test).filter(
Submission.test_id == self.id).all()
def has_result(self, user_id):
return Result.query.filter_by(user_id=user_id, test_id=self.id).first()
def get_test_results(self):
return Result.query.filter_by(test_id=self.id).all()
def get_student_result(self, user_id):
return Result.query.filter_by(test_id=self.id, user_id=user_id).first()
def __repr__(self):
return f'<Test: {self.name}>'
class Question(db.Model):
__tablename__ = 'questions'
id = db.Column(db.Integer, primary_key=True, nullable=False)
question_string = db.Column(db.String(256), nullable=False)
code_string = db.Column(db.String(1024))
answer = db.Column(db.String(256))
mcq_1 = db.Column(db.String(128))
mcq_2 = db.Column(db.String(128))
mcq_3 = db.Column(db.String(128))
mcq_4 = db.Column(db.String(128))
mcq_answer = db.Column(db.String(8))
test_id = db.Column(db.Integer, db.ForeignKey('tests.id'))
mark_alloc = db.Column(db.Integer, nullable=False)
# 1 = Output, 2 = MCQ, 3 = Write code
question_type = db.Column(db.Integer, nullable=False, default=1)
submissions = db.relationship('Submission', backref='question', lazy=True)
def get_mcq_options(self):
return [self.mcq_1, self.mcq_2, self.mcq_3, self.mcq_4]
def get_user_submission(self, user_id):
return Submission.query.join(Question).filter(
(Submission.question_id == self.id)
& (Submission.user_id == user_id)).first()
def get_all_submissions(self):
return Submission.query.join(Question).filter(
Submission.question_id == self.id).all()
def __repr__(self):
return f'<Question: {self.question_string}>'
class Submission(db.Model):
__tablename__ = 'submissions'
id = db.Column(db.Integer, primary_key=True, nullable=False)
output_sub = db.Column(db.String(128))
mcq_sub = db.Column(db.String(8))
code_sub = db.Column(db.String(1024))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
test_id = db.Column(db.Integer, db.ForeignKey('tests.id'))
question_id = db.Column(db.Integer, db.ForeignKey('questions.id'))
score = db.Column(db.Integer, default=0)
needs_marking = db.Column(db.Boolean, nullable=False, default=True)
def auto_mark(self):
q = Question.query.filter_by(id=self.question_id).first()
if q.question_type == 1:
if self.output_sub == q.answer:
self.score = q.mark_alloc
self.needs_marking = False
elif q.question_type == 2:
if self.mcq_sub == q.mcq_answer:
self.score = q.mark_alloc
self.needs_marking = False
db.session.commit()
def get_question(self):
return Question.query.filter_by(id=self.question_id).first()
def get_result(self):
return Result.query.filter_by()
def __repr__(self):
return f'<Submission: User ID: {self.user_id}, Question ID: {self.question_id}>'
class Result(db.Model):
__tablename__ = 'results'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
test_id = db.Column(db.Integer, db.ForeignKey('tests.id'), nullable=False)
score = db.Column(db.Integer)
needs_marking = db.Column(db.Boolean, nullable=False, default=True)
feedback = db.Column(db.String(1024))
def get_mark(user_id, test_id):
return Result.query.filter((user_id == user_id)&(test_id == test_id)).first().score
def __repr__(self):
return f'<Result {self.id}, User{self.user_id}, Test {self.test_id}, Score: {self.score}>'
```
#### File: app-dev/Tests/system.py
```python
import unittest, os, time
from app import app, db
from app.models import User, Course, Question, Test, Result
from selenium import webdriver
from config import Config, TestConfig
basedir = os.path.abspath(os.path.dirname(__file__))
class SystemTest(unittest.TestCase):
driver = None
def setUp(self):
self.driver = webdriver.Firefox(executable_path=os.path.join(basedir,'geckodriver'))
if not self.driver:
self.skiptest
else:
db.create_all()
db.session.query(User).delete()
#add other models in db
u = User(id = 1, email='<EMAIL>', first_name='testfirst', last_name='testlast')
admin = User(id = 1111, email='<EMAIL>', first_name='admintest', last_name='admintest', is_admin = True)
u.set_password('pw')
admin.set_password('pw')
db.session.add(u)
db.session.add(admin)
db.session.commit()
self.driver.maximize_window()
self.driver.get('http://localhost:5000')
def tearDown(self):
if self.driver:
self.driver.close()
#add other models in db
db.session.commit()
db.session.remove()
def test_login_user(self):
self.driver.get('http://localhost:5000')
time.sleep(1)
email_field = self.driver.find_element_by_id('email')
password_field = self.driver.find_element_by_id('password')
submit_field = self.driver.find_element_by_id('login-btn')
email_field.send_keys('<EMAIL>')
password_field.send_keys('pw')
submit_field.click()
time.sleep(1)
message = self.driver.find_element_by_class_name('portal-msg').get_attribute('innerHTML')
self.assertEqual(message, 'SELECT A COURSE TO VIEW TESTS / VIEW RESULTS')
def test_login_admin(self):
self.driver.get('http://localhost:5000')
time.sleep(1)
email_field = self.driver.find_element_by_id('email')
password_field = self.driver.find_element_by_id('password')
submit_field = self.driver.find_element_by_id('login-btn')
email_field.send_keys('<EMAIL>')
password_field.send_keys('pw')
submit_field.click()
time.sleep(1)
message = self.driver.find_element_by_class_name('portal-msg').get_attribute('innerHTML')
self.assertEqual(message, 'SELECT A COURSE TO CREATE A NEW TEST / VIEW RESULTS')
def test_user_registration(self):
self.driver.get('http://localhost:5000/register')
time.sleep(3)
first_name_field = self.driver.find_element_by_id('first_name')
last_name_field = self.driver.find_element_by_id('last_name')
email_field = self.driver.find_element_by_id('email')
password_field = self.driver.find_element_by_id('password')
password_again_field = self.driver.find_element_by_id('password_again')
registration_field = self.driver.find_element_by_id('login-btn')
first_name_field.send_keys('robot_test')
last_name_field.send_keys('robot_test_last')
email_field.send_keys('<EMAIL>[email protected]')
password_field.send_keys('pw')
password_again_field.send_keys('pw')
time.sleep(1)
registration_field.click()
time.sleep(2)
new_email_field = self.driver.find_element_by_id('email')
new_password_field = self.driver.find_element_by_id('password')
new_submit_field = self.driver.find_element_by_id('login-btn')
new_email_field.send_keys('<EMAIL>')
new_password_field.send_keys('pw')
new_submit_field.click()
time.sleep(2)
message = self.driver.find_element_by_class_name('portal-msg').get_attribute('innerHTML')
self.assertEqual(message, 'SELECT A COURSE TO VIEW TESTS / VIEW RESULTS')
if __name__=='__main__':
unittest.main(verbosity=2)
``` |
{
"source": "johnnybegood/ha-ksenia-lares",
"score": 2
} |
#### File: custom_components/ksenia_lares/base.py
```python
import asyncio
import logging
import re
import aiohttp
from aiohttp.http import RESPONSES
from getmac import get_mac_address
from lxml import etree
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC, format_mac
from .const import DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
class LaresBase:
"""The implementation of the Lares base class."""
def __init__(self, data: dict):
username = data["username"]
password = data["password"]
host = data["host"]
self._auth = aiohttp.BasicAuth(username, password)
self._ip = host
self._port = 4202
self._host = f"http://{host}:{self._port}"
async def info(self):
"""Get general info"""
response = await self.get("info/generalInfo.xml")
if response is None:
return None
mac = get_mac_address(ip=self._ip)
unique_id = str(mac)
if mac is None:
# Fallback to IP addresses when MAC cannot be determined
unique_id = f"{self._ip}:{self._port}"
info = {
"mac": mac,
"id": unique_id,
"name": response.xpath("/generalInfo/productName")[0].text,
"info": response.xpath("/generalInfo/info1")[0].text,
"version": response.xpath("/generalInfo/productHighRevision")[0].text,
"revision": response.xpath("/generalInfo/productLowRevision")[0].text,
"build": response.xpath("/generalInfo/productBuildRevision")[0].text,
}
return info
async def device_info(self):
"""Get device info"""
device_info = await self.info()
if device_info is None:
return None
info = {
"identifiers": {(DOMAIN, device_info["id"])},
"name": device_info["name"],
"manufacturer": MANUFACTURER,
"model": device_info["name"],
"sw_version": f'{device_info["version"]}.{device_info["revision"]}.{device_info["build"]}',
}
mac = device_info["mac"]
if mac is not None:
info["connections"] = {(CONNECTION_NETWORK_MAC, format_mac(mac))}
return info
async def zoneDescriptions(self):
"""Get available zones"""
response = await self.get("zones/zonesDescription48IP.xml")
if response is None:
return None
zones = response.xpath("/zonesDescription/zone")
return [zone.text for zone in zones]
async def zones(self):
"""Get available zones"""
response = await self.get("zones/zonesStatus48IP.xml")
if response is None:
return None
zones = response.xpath("/zonesStatus/zone")
return [
{
"status": zone.find("status").text,
"bypass": zone.find("bypass").text,
"alarm": zone.find("alarm").text,
}
for zone in zones
]
async def get(self, path):
"""Generic send method."""
url = f"{self._host}/xml/{path}"
try:
async with aiohttp.ClientSession(auth=self._auth) as session:
async with session.get(url=url) as response:
xml = await response.read()
content = etree.fromstring(xml)
return content
except aiohttp.ClientConnectorError as conn_err:
_LOGGER.debug("Host %s: Connection error %s", self._host, str(conn_err))
except: # pylint: disable=bare-except
_LOGGER.debug("Host %s: Unknown exception occurred.", self._host)
return
```
#### File: custom_components/ksenia_lares/binary_sensor.py
```python
import asyncio
import datetime
from datetime import timedelta
import logging
import async_timeout
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .base import LaresBase
from .const import (
DEFAULT_TIMEOUT,
ZONE_BYPASS_ON,
ZONE_STATUS_ALARM,
ZONE_STATUS_NOT_USED,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DEFAULT_DEVICE_CLASS = "motion"
DOOR_DEVICE_CLASS = "door"
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up binary sensors attached to a Lares alarm device from a config entry."""
client = LaresBase(config_entry.data)
descriptions = await client.zoneDescriptions()
device_info = await client.device_info()
async def async_update_data():
"""Perform the actual updates."""
async with async_timeout.timeout(DEFAULT_TIMEOUT):
return await client.zones()
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="lares_zones",
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
async_add_devices(
LaresSensor(coordinator, idx, descriptions[idx], device_info)
for idx, zone in enumerate(coordinator.data)
)
class LaresSensor(CoordinatorEntity, BinarySensorEntity):
"""An implementation of a Lares door/window/motion sensor."""
def __init__(self, coordinator, idx, description, device_info):
"""Initialize a the switch."""
super().__init__(coordinator)
self._coordinator = coordinator
self._description = description
self._idx = idx
self._device_info = device_info
@property
def unique_id(self):
"""Return Unique ID string."""
return f"lares_zones_{self._idx}"
@property
def name(self):
"""Return the name of this camera."""
return self._description
@property
def is_on(self):
"""Return the state of the sensor."""
return self._coordinator.data[self._idx]["status"] == ZONE_STATUS_ALARM
@property
def available(self):
"""Return True if entity is available."""
status = self._coordinator.data[self._idx]["status"]
return status != ZONE_STATUS_NOT_USED or status == ZONE_BYPASS_ON
@property
def device_class(self):
"""Return the class of this device."""
return DEFAULT_DEVICE_CLASS
@property
def device_info(self):
"""Return basic information of this device."""
return self._device_info
``` |
{
"source": "johnnybigoo/django-30-min",
"score": 2
} |
#### File: tasks/tests/test_tasks_editing.py
```python
import pytest
from django.urls import reverse
from pytest_django.asserts import assertContains
from webdev.tasks.models import Task
@pytest.fixture
def pending_task(db):
return Task.objects.create(name='Task 1', done='False')
@pytest.fixture
def response_with_pending_task(client, pending_task):
resp = client.post(
reverse('tasks:detail', kwargs={'task_id': pending_task.id}),
data={'done': 'true', 'name': f'{pending_task.name}-edited'}
)
return resp
def test_status_code(response_with_pending_task):
assert response_with_pending_task.status_code == 302
def test_task_done(response_with_pending_task):
assert Task.objects.first().done
@pytest.fixture
def task_done(db):
return Task.objects.create(name='Task 1', done='True')
@pytest.fixture
def response_with_task_done(client, task_done):
resp = client.post(
reverse('tasks:detail', kwargs={'task_id': task_done.id}),
data={'name': f'{task_done.name}-edited'}
)
return resp
def test_pending_task(response_with_task_done):
assert not Task.objects.first().done
```
#### File: webdev/tasks/views.py
```python
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from webdev.tasks.forms import NewTaskForm, TaskForm
from webdev.tasks.models import Task
def home(request):
if request.method == 'POST':
form = NewTaskForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('tasks:home'))
else:
pending_tasks = Task.objects.filter(done=False).all()
tasks_done = Task.objects.filter(done=True).all()
return render(
request, 'tasks/home.html',
{
'form': form,
'pending_tasks': pending_tasks,
'tasks_done': tasks_done,
},
status=400)
pending_tasks = Task.objects.filter(done=False).all()
tasks_done = Task.objects.filter(done=True).all()
return render(request, 'tasks/home.html',
{
'pending_tasks': pending_tasks,
'tasks_done': tasks_done,
})
def detail(request, task_id):
if request.method=='POST':
task = Task.objects.get(id=task_id)
form = TaskForm(request.POST, instance=task)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('tasks:home'))
def delete(request, task_id):
if request.method == 'POST':
Task.objects.filter(id=task_id).delete()
return HttpResponseRedirect(reverse('tasks:home'))
``` |
{
"source": "johnnyboiii3020/matchmaking-bot",
"score": 3
} |
#### File: johnnyboiii3020/matchmaking-bot/mybot.py
```python
import discord
import json
import random
import os
from discord.ext import commands
TOKEN = ""
client = commands.Bot(command_prefix = '--')
os.chdir(r'D:\Programming\Projects\Discord bot\jsonFiles')
SoloCounter = 30
SolominCounter = 10
Queueiter = 1
T_Queueiter = 1
TeamCounter = 50
TeamminCounter = 20
extensions = [
"cogs.Matchmaking",
"cogs.Moderator"
]
@client.event
async def on_ready():
botInfo = await client.application_info()
oauthlink = discord.utils.oauth_url(botInfo.id)
print('---------')
print('Username: {}'.format(client.user.name))
print('ID: {}'.format(client.user.id))
print('Server count: {}'.format(str(len(client.servers))))
print('Member count: {}'.format(str(len(set(client.get_all_members())))))
print('OAuth URL: {}'.format(oauthlink))
print('Cogs: {}'.format(client.cogs))
print('---------')
######################### Register Team #################################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def registerTeam( ctx , teamName , player1: discord.Member , player2: discord.Member , player3: discord.Member , player4: discord.Member , player5: discord.Member):
if ctx.message.channel.id == "549911021511245834":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
players = [player1 , player2 , player3 , player4 , player5]
await update_data_Team(ctx , Teams , teamName , players)
with open('Teams.json' , 'w') as f:
json.dump(Teams , f , indent = 2)
async def update_data_Team(ctx , Teams , teamName , players):
if not teamName in Teams:
Teams[teamName] = {}
Teams[teamName]["teamElo"] = 0
Teams[teamName]["Players"] = []
Role = teamName
await client.create_role(ctx.message.server , name = Role, hoist = True , mentionable = True )
TeamRole = discord.utils.get(ctx.message.server.roles , name = Role)
for player in players:
print(player)
Teams[teamName]["Players"].append(player.mention)
await client.add_roles(player , TeamRole)
await client.say("{} is Registered as Team Cheers!!!!".format(teamName))
else:
await client.say("you are already registered")
############################ Register Solo ###################################
@client.command(pass_context = True)
async def registersolo( ctx , name: discord.Member):
if ctx.message.channel.id == "549911021511245834":
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
await update_data_solo(Solo , name , ctx)
with open('Solo.json' , 'w') as f:
json.dump(Solo , f , indent = 2)
async def update_data_solo( Solo , name , player):
if not player.message.author.mention in Solo:
author = player.message.author.mention
member = player.message.author
Solo[author] = {}
Solo[author]["name"] = name
Solo[author]["Elo"] = 0
nickname = str(Solo[author]["Elo"]) + "~" + Solo[author]["name"]
Role = discord.utils.get(player.message.server.roles , name = 'Registered')
member.nick = nickname
await client.add_roles(member , Role)
await client.say("{} is Registered as Solo Cheers Guys!!!!".format(author))
else:
await client.say("you are already registered")
############################### Win Team ################################
@client.command(pass_context = True)
@commands.has_role('Mod')
async def winT(ctx , T_Queueno , Team , Team2):
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
Teams[Team]["teamElo"] = Teams[Team]["teamElo"] + TeamCounter
Teams[Team2]["teamElo"] = Teams[Team2]["teamElo"] - TeamminCounter
await display_win_team(Team , Team2)
with open('Teams.json' , 'r') as f:
json.dump(Teams , f , indent = 2)
###############CReate Team Queue Channel###########################
@client.command(pass_context = True)
@commands.has_role('Mod')
async def CreateTQueueChannel(ctx):
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
Teams_Queue["1"] = []
with open('Teams_Queue.json' , 'w') as f:
json.dump(Teams_Queue , f , indent = 2)
########################## Join Team Queue ###################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def joinQT(ctx , TeamName):
if ctx.message.channel.id == "549910313995206687":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
if "{}".format(TeamName) in Teams:
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
await update_data_Team_Queue(Teams_Queue , TeamName)
with open('Teams_Queue.json' , 'w') as f:
json.dump(Teams_Queue , f , indent = 2)
else:
await client.say("{} is not registerd".format(TeamName))
async def update_data_Team_Queue(Teams_Queue , TeamName):
global T_Queueiter
T_Queueno = T_Queueiter
if len(Teams_Queue["{}".format(T_Queueno)]) >= 1:
Teams_Queue[str(T_Queueno)].append(TeamName)
await display_Team_Queue(T_Queueno , Teams_Queue , TeamName)
await display_match(T_Queueno , Teams_Queue)
T_Queueiter += 1
T_Queueno = T_Queueiter
Teams_Queue[str(T_Queueno)] = []
else:
if not TeamName in Teams_Queue[str(T_Queueno)]:
Teams_Queue[str(T_Queueno)].append(TeamName)
await display_Team_Queue(T_Queueno , Teams_Queue , TeamName)
else:
await client.say("{} is already in queue" .format(TeamName))
async def display_Team_Queue(T_Queueno , Teams_Queue , TeamName):
embed = discord.Embed(
title = "Team Queue : {}".format(T_Queueno),
description = "5 v 5 Custom Games"
)
embed.add_field(name = 'Team:' , value = "\n".join("<@{}>".format(Teams_Queue[T_Queueno])) , inline = False)
await client.say(embed = embed)
async def display_match(T_Queueno , Teams_Queue):
embed = discord.Embed(
title= "Team Matchup Queue : {}".format(T_Queueno),
description = "5 v 5 Custom Games"
)
embed.add_field(name = 'Teams:' , value = "\n".join(Teams_Queue[str(T_Queueno)]) , inline = False)
with open('Maps.json' , 'r') as f:
Maps = json.load(f)
embed.add_field(name = 'Map:' , value = random.choice(Maps["Maps"]))
await client.say(embed = embed)
################Show Queue#################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def showQ(ctx , Queueno):
if ctx.message.channel.id == "549910313995206687":
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
if len(Queue[str(Queueno)]) < 0 :
await client.say("Queue is empty")
else:
await DisplayQueue(Queue , Queueno)
###############Show Team Points##########
@client.command(pass_context = True)
@commadns.has_role('Registered')
async def pointsT(ctx , TeamName):
if ctx.message.channel.id == "551095980251021323":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
if TeamName in Teams:
await client.say("{}".format(Teams[TeamName][teamElo]))
####################Show Points ###############
@client.command(pass_context = True)
@commands.has_role('Registered')
async def points(ctx):
if ctx.message.channel.id == "551095980251021323":
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
if ctx.message.author.mention in Solo:
await client.say("{}".format(Solo[ctx.message.author.mention]["Elo"]) + " points{}".format(ctx.message.author.mention))
######################### Win Solo ##############################
@client.command(pass_context = True)
@commands.has_role('Mod' )
async def winS(ctx , Queueno , Teamno , Teamno2):
with open('Solo_Teams.json' , 'r') as f:
Solo_Teams = json.load(f)
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
await update_winS(Solo_Teams , Solo , Queueno , Teamno , Teamno2)
with open('Solo.json' , 'w') as f:
json.dump(Solo , f , indent = 2)
async def update_winS(Solo_Teams , Solo , Queueno , Teamno , Teamno2):
for player in Solo_Teams[str(Queueno)][str(Teamno)]:
Solo[player]["Elo"] = Solo[player]["Elo"] + SoloCounter
await update_nick(player)
for players in Solo_Teams[str(Queueno)][str(Teamno2)]:
Solo[players]["Elo"] = Solo[players]["Elo"] - SolominCounter
await update_nick(player)
await display_updates(Solo_Teams , Teamno , Teamno2 , Queueno)
async def update_nick(name):
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
nickname = str(Solo[name]["Elo"]) + "~" + str(Solo[name]["name"])
server = client.get_server("549553345044545536")
member = server.get_member(name[2:len(name)-1])
member.nick = nickname
async def display_updates(Solo_Teams , Teamno , Teamno2 , Queueno):
embed = discord.Embed(
title = "Updates:"
)
embed.add_field(name = 'Winning Team + {}'.format(SoloCounter) , value = '\n'.join(Solo_Teams[str(Queueno)][str(Teamno)]))
embed.add_field(name = 'Losing Team - {}'.format(SolominCounter) , value = '\n'.join(Solo_Teams[str(Queueno)][str(Teamno2)]))
await client.say(embed = embed)
####Leave Queue #####
@client.command(pass_context = True)
@commands.has_role('Registered')
async def leaveQ(ctx):
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
await update_data_lQueue(Queue , ctx.message.author)
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
async def update_data_lQueue( Queue , author):
print(Queueiter)
if author.mention in Queue[str(Queueiter)]:
Queue[str(Queueiter)].remove(author.mention)
await client.say("{} has left the queue".format(author.mention))
else:
await client.say("{} is not in the queue".format(author.mention))
###Create Queue Channel ####
@client.command(pass_context = True)
@commands.has_role('Mod')
async def CreateQueueChannel(ctx):
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
Queue[Queueiter] = []
await client.say("Queue Channel is Created")
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
#############Join Queue#########
@client.command(pass_context = True)
@commands.has_role('Registered')
async def joinQ(ctx):
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
if ctx.message.author.mention in Solo:
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
await update_data_Queue( Queue , ctx.message.author)
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
else:
await client.say("{} is not registered".format(ctx.message.author))
async def update_data_Queue(Queue , author):
global Queueiter
Queueno = Queueiter
if len(Queue["{}".format(Queueno)]) >= 9:
Queue[str(Queueno)].append(author.mention)
await DisplayQueue(Queue , Queueno)
await Create_solo_teams(Queue , Queueno)
Queueiter = Queueiter + 1
Queueno = Queueiter
Queue[str(Queueno)] = []
else:
if not author.mention in Queue[str(Queueno)]:
Queue[str(Queueno)].append(author.mention)
await client.say("{} joined".format(author.mention))
await DisplayQueue( Queue , Queueno)
else:
await client.say("{} already in queue" .format(author.mention))
async def DisplayQueue( Queue , Queueno):
embed = discord.Embed(
title = 'Queue:{}'.format(Queueno),
description = "5 v 5 Custom Games:"
)
embed.add_field(name = "Lobby" , value = '\n'.join(Queue[str(Queueno)]), inline = True)
await client.say(embed = embed)
async def Create_solo_teams(Queue , Queueno):
with open('Solo_Teams.json' , 'r') as f:
Solo_Teams = json.load(f)
await update_Solo_teams(Solo_Teams , Queueno , Queue)
with open('Solo_Teams.json' , 'w') as f:
json.dump(Solo_Teams , f , indent = 2)
async def update_Solo_teams( Solo_Teams , Queueno , Queue):
if not Queueno in Solo_Teams:
Solo_Teams[str(Queueno)] = {}
Solo_Teams[str(Queueno)]["Team1"] = []
Solo_Teams[str(Queueno)]["Team2"] = []
for x in range(0 , 5):
Queuerand = random.choice(Queue[str(Queueno)])
Queue[str(Queueno)].remove(Queuerand)
Solo_Teams[str(Queueno)]["Team1"].append(Queuerand)
for x in range(0 , 5):
Queuerand = random.choice(Queue[str(Queueno)])
Queue[str(Queueno)].remove(Queuerand)
Solo_Teams[str(Queueno)]["Team2"].append(Queuerand)
await Display_solo_teams(Solo_Teams , Queueno)
async def Display_solo_teams( Solo_Teams , Queueno):
embed = discord.Embed(
title = 'Queueno.:{}'.format(Queueno),
description = '5 v 5 Custom Games'
)
embed.add_field(name = "Team1:", value = '\n'.join(Solo_Teams[str(Queueno)]["Team1"]) , inline = True)
embed.add_field(name = "Team2:", value = '\n'.join(Solo_Teams[str(Queueno)]["Team2"]) , inline = False)
with open('Maps.json' , 'r') as f:
Maps = json.load(f)
embed.add_field(name = "Map:", value = random.choice(Maps["Maps"]) , inline = False)
embed.add_field(name = "Host of The Match" , value = random.choice(Solo_Teams[str(Queueno)]["Team1"]) , inline = False)
await client.say(embed = embed)
if __name__ == '__main__':
for extension in extensions:
try:
client.load_extension(extension)
except Exception as e:
print('Failed to load extension {}\n{}: {}'.format(extension, type(e).__name__, e))
client.run(TOKEN)
``` |
{
"source": "JohnnyBro/tytus",
"score": 2
} |
#### File: fase2/team24/c3d.py
```python
from datetime import date
from variables import tabla as ts
from variables import NombreDB
from variables import cont as ncont
import tablaDGA as TAS
import sql as sql
import mathtrig as mt
from reportTable import *
cont = ncont
pila = []
for i in range(100):
pila.append(i)
def ejecutar():
global cont
sql.execute("CREATE DATABASE local;")
n_db = ts.buscarIDTB(NombreDB)
NuevoSimbolo = TAS.Simbolo(cont,'ValidaRegistros',TAS.TIPO.FUNCTION,n_db)
ts.agregar(NuevoSimbolo)
cont+=1
ambitoFuncion = ts.buscarIDF()
NuevoSimbolo = TAS.Simbolo(cont,'resultado',TAS.TIPO.INTEGER,ambitoFuncion,None, None, None, None, None, None, None ,None,None,None, None,False,False)
ts.agregar(NuevoSimbolo)
cont+=1
ambitoFuncion = ts.buscarIDF()
NuevoSimbolo = TAS.Simbolo(cont,'retorna',TAS.TIPO.INTEGER,ambitoFuncion,None, None, None, None, None, None, None ,None,None,None, None,False,False)
ts.agregar(NuevoSimbolo)
cont+=1
ts.modificar_valor('resultado', 7777.0)
ts.modificar_valor('retorna', 1.0)
ts.modificar_valor('retorna', 0.0)
ts.modificar_valor('resultado', 8888.0)
ts.modificar_valor('retorna', 1.0)
ts.modificar_valor('retorna', 0.0)
ts.modificar_valor('resultado', 9999.0)
ts.modificar_valor('retorna', 1.0)
ts.modificar_valor('retorna', 0.0)
sql.execute('3D')
graphTable(ts)
def ValidaRegistros():
resultado = 0
retorna = 0
tabla = pila[0]
cantidad = pila[1]
t0 = tabla
t1 = 'tbProducto'
t2 = t0 == t1
if t2:
t3 = 7777
resultado = t3
t4 = cantidad
t5 = resultado
t6 = t4 == t5
if t6:
t7 = 1
retorna = t7
else:
t8 = 0
retorna = t8
t9 = tabla
t10 = 'tbProductoUp'
t11 = t9 == t10
if t11:
t12 = 8888
resultado = t12
t13 = cantidad
t14 = resultado
t15 = t13 == t14
if t15:
t16 = 1
retorna = t16
else:
t17 = 0
retorna = t17
t18 = tabla
t19 = 'tbbodega'
t20 = t18 == t19
if t20:
t21 = 9999
resultado = t21
t22 = cantidad
t23 = resultado
t24 = t22 == t23
if t24:
t25 = 1
retorna = t25
else:
t26 = 0
retorna = t26
t27 = retorna
pila[10] = t27
ejecutar()
n_db = ts.buscarIDTB(NombreDB)
NuevoSimbolo = TAS.Simbolo(
cont, 'sp_validainsert', TAS.TIPO.FUNCTION, n_db)
ts.agregar(NuevoSimbolo)
cont += 1
sp_validainsert()
sql.execute('3D')
graphTable(ts)
def sp_validainsert():
sql.execute(
'''insert into tbbodega values ( 1.0,'BODEGA CENTRAL',1.0 ) ;''')
sql.execute(
'''insert into tbbodega (idbodega,bodega) values ( 2.0,'BODEGA ZONA 12' ) ;''')
sql.execute(
'''insert into tbbodega (idbodega,bodega,estado) values ( 3.0,'BODEGA ZONA 11',1.0 ) ;''')
sql.execute(
'''insert into tbbodega (idbodega,bodega,estado) values ( 4.0,'BODEGA ZONA 1',1.0 ) ;''')
sql.execute(
'''insert into tbbodega (idbodega,bodega,estado) values ( 5.0,'BODEGA ZONA 10',1.0 ) ;''')
ejecutar()
```
#### File: C3D/operations/datatype.py
```python
from analizer_pl.abstract.expression import Expression
from analizer_pl.modules.expressions import C3D
from analizer_pl.C3D.operations import operation
from analizer_pl.reports.Nodo import Nodo
from analizer_pl.abstract.global_env import GlobalEnvironment
class Identifier(Expression):
def __init__(self, id, isBlock, row, column) -> None:
super().__init__(row, column)
self.id = id
self.isBlock = isBlock
def execute(self, environment):
if self.isBlock:
return C3D("", self.id, self.row, self.column)
if environment.isBlock:
return C3D("", self.id, self.row, self.column)
if not isinstance(environment, GlobalEnvironment):
if environment.getVar(self.id):
return C3D("", '"+str(' + self.id + ')+"', self.row, self.column)
return C3D("", self.id, self.row, self.column)
def dot(self):
nod = Nodo(self.id)
return nod
class TernaryExpression(Expression):
def __init__(self, temp, exp1, exp2, exp3, operator, isBlock, row, column):
super().__init__(row, column)
self.temp = temp
self.exp1 = exp1
self.exp2 = exp2
self.exp3 = exp3
self.operator = operator
self.isBlock = isBlock
def execute(self, environment):
if self.isBlock:
op = operation.Ternary(
self.temp, self.exp1, self.exp2, self.exp3, self.operator, self.row, self.column
)
return op.execute(environment)
c3d = ""
val1 = self.exp1.execute(environment)
c3d += val1.temp
c3d += " " + operators.get(self.operator, self.operator) + " "
val2 = self.exp2.execute(environment)
c3d += val2.temp
c3d += " AND "
val3 = self.exp3.execute(environment)
c3d += val3.temp
return C3D(val1.value + val2.value + val3.value, c3d, self.row, self.column)
class BinaryExpression(Expression):
def __init__(self, temp, exp1, exp2, operator, isBlock, row, column):
super().__init__(row, column)
self.temp = temp
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.isBlock = isBlock
def execute(self, environment):
if self.isBlock:
op = operation.Binary(
self.temp, self.exp1, self.exp2, self.operator, self.row, self.column
)
return op.execute(environment)
space = ""
if self.operator == 'AND' or self.operator == 'OR':
space = " "
c3d = ""
val1 = self.exp1.execute(environment)
c3d += val1.temp
c3d += space+self.operator+space
val2 = self.exp2.execute(environment)
c3d += val2.temp
return C3D(val1.value + val2.value, c3d, self.row, self.column)
def dot(self):
n1 = self.exp1.dot()
n2 = self.exp2.dot()
new = Nodo(self.operator)
new.addNode(n1)
new.addNode(n2)
return new
class UnaryExpression(Expression):
def __init__(self, temp, exp, operator, isBlock, row, column):
super().__init__(row, column)
self.temp = temp
self.exp = exp
self.operator = operator
self.isBlock = isBlock
def execute(self, environment):
if self.isBlock:
op = operation.Unary(
self.temp, self.exp, self.operator, self.row, self.column
)
return op.execute(environment)
val = self.exp.execute(environment)
if self.operator == '-' or self.operator == '+' or self.operator == 'NOT':
c3d = self.operator
c3d += val.temp
else:
c3d = val.temp
c3d += operators.get(self.operator, self.operator)
return C3D(val.value, c3d, self.row, self.column)
def dot(self):
n = self.exp.dot()
new = Nodo(self.operator)
new.addNode(n)
return new
operators = {
"ISNULL": " IS NULL ",
"ISTRUE": " IS TRUE ",
"ISFALSE": " IS FALSE ",
"ISUNKNOWN": " IS UNKNOWN ",
"ISNOTNULL": " IS NOT NULL ",
"ISNOTTRUE": " IS NOT TRUE ",
"ISNOTFALSE": " IS NOT FALSE ",
"ISNOTUNKNOWN": " IS NOT UNKNOWN ",
"NOTBETWEEN": "NOT BETWEEN",
"BETWEENSYMMETRIC": "BETWEEN SYMMETRIC"
}
```
#### File: C3D/operations/drop_func.py
```python
from analizer_pl.abstract.instruction import Instruction
from analizer_pl.abstract.environment import Environment
from analizer_pl.statement.expressions import code
from analizer_pl import grammar
from analizer_pl.reports.Nodo import Nodo
class DropFunction(Instruction):
def __init__(self, id, row, column) -> None:
super().__init__(row, column)
self.id = id
def execute(self, environment):
c3d = ""
tab = ""
tab1 = False
if isinstance(environment, Environment):
tab += "\t"
tab1 = True
func = environment.globalEnv.dropFunction(self.id)
else:
func = environment.dropFunction(self.id)
if func:
c3d += tab + "del " + self.id + "\n"
grammar.optimizer_.addIgnoreString(str("del " + self.id), self.row, tab1)
return code.C3D(c3d, "drop_func", self.row, self.column)
def dot(self):
new = Nodo("DROP_FUNCTION")
new.addNode(Nodo(str(self.id)))
return new
```
#### File: sql_statement/select/union.py
```python
from analizer_pl.abstract import instruction
from analizer_pl.statement.expressions import code
from analizer_pl.reports.Nodo import Nodo
from analizer_pl.abstract.environment import Environment
from analizer_pl import grammar
class Select(instruction.Instruction):
def __init__(self, type_, select1, select2, all, row, column):
instruction.Instruction.__init__(self, row, column)
self.type = type_
self.select1 = select1
self.select2 = select2
self.all = all
def execute(self, environment):
out = "fase1.execution(dbtemp + "
select1 = self.select1.execute(environment).value
select1 = select1[25:len(select1)-5]
select2 = self.select2.execute(environment).value
select2 = select2[27:len(select1)-5]
out += select1 + " "
out += self.type + " "
out += self.all + " "
out += select2 + " ;"
out += '")\n'
if isinstance(environment, Environment):
grammar.optimizer_.addIgnoreString(out, self.row, True)
out = "\t" + out
else:
grammar.optimizer_.addIgnoreString(out, self.row, False)
return code.C3D(out, "select", self.row, self.column)
def dot(self):
return Nodo("SQL_INSTRUCTION:_SELECT")
``` |
{
"source": "johnnybus/aurora-tf-autoscaling",
"score": 3
} |
#### File: aurora-tf-autoscaling/libs/cloudwatch.py
```python
import datetime
class RDS(object):
def __init__(self, cloudwatch_con, window=240, period=60):
self.cloudwatch_con = cloudwatch_con
self.window = window
self.period = period
def get_rds_cluster_metric(self, cluster, role, metric):
now_datetime = datetime.datetime.utcnow()
metrics = self.cloudwatch_con.get_metric_statistics(
Namespace='AWS/RDS',
MetricName=metric,
Dimensions=[
{
'Name': 'DBClusterIdentifier',
'Value': cluster,
},
{
'Name': 'Role',
'Value': role
}
],
StartTime=now_datetime - datetime.timedelta(seconds=self.window),
EndTime=now_datetime,
Statistics=[
'Average'
],
Period=self.period,
)
metrics_dp = sorted(metrics['Datapoints'], key=lambda dp: dp['Timestamp'], reverse=True)
return metrics_dp[0]
```
#### File: aurora-tf-autoscaling/libs/terraform.py
```python
import re
class Terraform(object):
def __init__(self, project):
self.project = project
def change_n_nodes(self, vars_file, count):
for l in open(self.project + vars_file, 'rw'):
l = re.sub(r"(?i)^.*AURORA_NODES_COUNT.*$", "AURORA_NODES_COUNT = %s" % count, l)
print l
def get_n_nodes(self, vars_file):
print vars_file
return 2
def plan(self):
pass
def apply(self):
pass
```
#### File: aurora-tf-autoscaling/libs/utils.py
```python
import boto3
def cloudwatch_connection(profile, region):
if profile:
session = boto3.Session(profile_name=profile)
con = session.client('cloudwatch', region_name=region)
return con
``` |
{
"source": "johnnyc2/FOS_View",
"score": 3
} |
#### File: johnnyc2/FOS_View/familyplotter.py
```python
import argparse
import cStringIO
from fosfile import Vault
from codecs import open
def getColorFromId(Id):
Id = Id % 10
if Id == 0:
return 'red'
elif Id == 1:
return 'green'
elif Id == 2:
return 'blue'
elif Id == 3:
return 'cyan'
elif Id == 4:
return 'magenta'
elif Id == 5:
return 'yellow'
elif Id == 6:
return 'pink'
elif Id == 7:
return 'palegreen'
elif Id == 8:
return 'navy'
elif Id == 9:
return 'sienna'
def incCoupleCounter(dweller):
try:
dweller.couplesCount = dweller.couplesCount + 1
except AttributeError:
dweller.couplesCount = 1
def getCoupleCounter(dweller):
try:
return dweller.couplesCount
except AttributeError:
return 0
def setDwellerFirstCoupleRole(dweller, role):
if role == 'couple0':
raise Exception("pills here")
try:
return dweller.firstCouple
except AttributeError:
dweller.firstCouple = role
def getDwellerFirstCouple(dweller):
return dweller.firstCouple
def setAsChild(dweller):
dweller.isAChild = True
def isAChild(dweller):
try:
return dweller.isAChild
except AttributeError:
return False
def addOutputNodeToDweller(dweller, name):
try:
if name not in dweller.roles:
dweller.roles.append(name)
except AttributeError:
dweller.roles = [name]
def getRoles(dweller):
try:
return dweller.roles
except AttributeError:
return ['dweller_%s_solonely' % dweller.serializeId]
class Couple(object):
counter = 0
def __init__(self, father, mother):
self.father = father
self.mother = mother
[incCoupleCounter(x) for x in (father,mother)]
self.index = Couple.counter
Couple.counter = Couple.counter + 1
def isParentOf(self, dweller):
try:
rel = dweller.relations
l = rel.ascendants[0]
r = rel.ascendants[1]
return l == self.father and r == self.mother
except AttributeError:
return False
def coupleId(self):
return 'couple%s' % self.index
def mergedDotName(self):
fatherDot = dwellerDotName(self.father, self.coupleId())
motherDot = dwellerDotName(self.mother, self.coupleId())
return '%sAnd%s' % (fatherDot, motherDot)
def dotOutput(self, output):
fatherDot = dwellerDotName(self.father, self.coupleId())
motherDot = dwellerDotName(self.mother, self.coupleId())
mergeNode = self.mergedDotName()
output.write('subgraph couple_%s_graph {\n' % self.index)
output.write('rankdir=LR\n')
output.write('style=invis\n')
output.write('rank=same\n')
output.write('%s\n' % fatherDot)
output.write('%s\n' % motherDot)
output.write('%s [shape=point]\n' % mergeNode)
output.write('%s -> %s [dir=none,weight=1000,penwidth=2,color=%s]\n' % (fatherDot, mergeNode, getColorFromId(self.index)))
output.write('%s -> %s [dir=none,weight=1000,penwidth=2,color=%s]\n' % (mergeNode, motherDot, getColorFromId(self.index)))
output.write('}\n')
@staticmethod
def create(dwellers):
couplesDwellers = []
for dweller in dwellers:
try:
rel = dweller.relations
father = rel.ascendants[0]
mother = rel.ascendants[1]
except AttributeError:
father = None
mother = None
if father:
couple = {'father': father, 'mother': mother}
if couple not in couplesDwellers:
couplesDwellers.append(couple)
result = []
for coupleDwellers in couplesDwellers:
result.append(Couple(**coupleDwellers))
return result
class Brotherhoods(object):
counter = 0
def __init__(self, brothers, couple):
self.brothers = brothers[:]
self.parents = couple
[setAsChild(x) for x in brothers]
self.index = Brotherhoods.counter
Brotherhoods.counter = Brotherhoods.counter + 1
def dotOutput(self, output):
lvl1Node = '%sSons' % self.parents.mergedDotName()
output.write('subgraph brotherhood_lvl1_%s_graph {\n' % self.index)
output.write('rankdir=LR\n')
output.write('style=invis\n')
output.write('rank=same\n')
output.write('%s [shape=point]\n' % lvl1Node)
index = 1
count = len(self.brothers)
needMiddle = count % 2 == 1
if needMiddle:
middle = count / 2 + 1
else:
middle = 0
leftLink = count / 2
rightLink = count / 2 + 1
right = None
for brother in self.brothers:
if index != middle:
name = dwellerDotName(brother, 'topnode')
output.write('%s [shape=point]\n' % name)
else:
name = lvl1Node
if not needMiddle:
if index == leftLink:
output.write('%s->%s [dir=none,color=gray]\n' % (name, lvl1Node))
if index == rightLink:
output.write('%s->%s [dir=none,color=gray]\n' % (lvl1Node, name))
left = right
right = name
if index > 1:
if needMiddle or index != rightLink:
output.write('%s->%s [dir=none,color=gray]\n' %(left, right))
index = index + 1
output.write('}\n')
if False:
output.write('subgraph brotherhood_%s_graph {\n' % self.index)
output.write('rankdir=LR\n')
output.write('style=invis\n')
output.write('rank=same\n')
for brother in self.brothers:
output.write('%s\n' % dwellerDotName(brother, 'child'))
output.write('}\n')
index = 1
for brother in self.brothers:
if index == middle:
topName = lvl1Node
else:
topName = dwellerDotName(brother, 'topnode')
output.write('%(top)s->%(id)s [dir=none,color="gray"]\n' % {'top': topName, 'id': dwellerDotName(brother, 'child')})
index = index + 1
output.write('%s->%s [dir=none]\n' % (self.parents.mergedDotName(), lvl1Node))
@staticmethod
def create(dwellers, couples):
result = []
for couple in couples:
brothers = []
for dweller in dwellers:
if couple.isParentOf(dweller):
brothers.append(dweller)
if brothers:
result.append(Brotherhoods(brothers, couple))
return result
def dwellerDotName(dweller, role):
# Here's how I'll do it:
# If we want a "topnode" node, always give it. It is used for structure.
# If we want a "child" node, always give id. Child nodes can only happen once.
# If we want a coupleX node...
# - if the dweller is only in ONE couple, return either the "child" node, or the "unique" node, whichever exist.
# - if the dweller is part of multiple couples, produce multiple "coupleX" nodes. Later, if:
# - the dweller is also a child, link them to the child node
# - the dweller is not a child, link all "secondary" coupleX nodes to the couple0 node
if role == 'topnode':
name = 'dweller_%s_topnode' % dweller.serializeId
elif role == 'child':
name = 'dweller_%s_child' % dweller.serializeId
addOutputNodeToDweller(dweller, name)
else:
if getCoupleCounter(dweller) == 1:
if isAChild(dweller):
name = dwellerDotName(dweller, 'child')
else:
name = 'dweller_%s_%s' % (dweller.serializeId, role)
addOutputNodeToDweller(dweller, name)
setDwellerFirstCoupleRole(dweller, name)
else:
name = 'dweller_%s_%s' % (dweller.serializeId, role)
addOutputNodeToDweller(dweller, name)
setDwellerFirstCoupleRole(dweller, name)
return name
def specialString(dweller):
try:
stats = dweller.stats
except AttributeError:
return ''
merged = {}
for statName in 'SPECIAL':
merged[statName] = stats.get(statName).getFullValue()
return 'S:%(S)s P:%(P)s E:%(E)s\\nC:%(C)s I:%(I)s A:%(A)s\\nL%(L)s' % merged
def dotOutputDweller(dweller, output):
roles = getRoles(dweller)
for role in roles:
try:
if dweller.gender == 1:
outlineColor = 'pink'
else:
outlineColor = 'blue'
if dweller.health.healthValue <= 0:
backgroundColor = 'gray'
else:
backgroundColor = 'white'
except AttributeError:
outlineColor = 'black'
backgroundColor = 'red'
label = '%s\\n%s' % (dweller.getFullName(), specialString(dweller))
output.write('%(id)s [shape=box,label="%(label)s",color="%(outline)s",bgcolor="%(bg)s"]\n' % {'id': role, 'label': label, 'outline': outlineColor, 'bg': backgroundColor})
if getCoupleCounter(dweller) > 10000:
for role in roles:
if role[-5:] == 'child':
continue
if isAChild(dweller):
output.write('%s -> %s [weight=-1000,style=dotted]\n' % (role, dwellerDotName(dweller, 'child')))
def main(config):
vault = Vault(config['input'])
couples = Couple.create(vault.dwellers.dwellers)
brotherhoods = Brotherhoods.create(vault.dwellers.dwellers, couples)
sio = cStringIO.StringIO()
outputDot(vault, couples, brotherhoods, sio)
if config['type'] == 'dot':
with open(config['output'], 'w') as output:
output.write(sio.getvalue())
elif config['type'] == 'png':
import pydot
graph = pydot.graph_from_dot_data(sio.getvalue())
graph.write_png(config['output'])
def outputDot(vault, couples, brotherhoods, output):
output.write('digraph A {\n')
output.write('rankdir=TB\n')
for couple in couples:
couple.dotOutput(output)
for brotherhood in brotherhoods:
brotherhood.dotOutput(output)
for dweller in vault.dwellers.dwellers:
dotOutputDweller(dweller, output)
output.write('}\n')
def parseCli():
parser = argparse.ArgumentParser(description = 'Produce a family tree from a Fallout Shelter vault save')
parser.add_argument('--input', '-i', type=argparse.FileType('rb'), required=True, help='Path to the vault file')
parser.add_argument('--output', '-o', help='Path for the output file')
parser.add_argument('--type', '-t', choices=['png', 'dot'], default='png', help='Change the type of output (default to png)')
result = vars(parser.parse_args())
if result['output'] is None:
result['output'] = 'family.%s' % result['type']
return result
if __name__ == '__main__':
cliConfig = parseCli()
main(cliConfig)
```
#### File: johnnyc2/FOS_View/imager.py
```python
import argparse
from fosfile import Vault
from PIL import Image, ImageDraw
def getCoordinates(col, row, width, config):
x1 = col * (config['roomWidth'] + config['roomSpaceX']) + config['roomOffsetX']
x2 = x1 + width * (config['roomWidth'] + config['roomSpaceX']) - config['roomSpaceX']
y1 = row * (config['roomHeight'] + config['roomSpaceY']) + config['roomOffsetY']
y2 = y1 + config['roomHeight']
return ((x1, y1), (x2, y2))
def enlargeRect(rect, point):
if point[0] < rect[0]:
rect[0] = point[0]
if point[0] > rect[2]:
rect[2] = point[0]
if point[1] < rect[1]:
rect[1] = point[1]
if point[1] > rect[3]:
rect[3] = point[1]
def main(config):
vault = Vault(config['input'])
rect = [0, 0, 0, 0]
for room in vault.vault.rooms:
pos = getCoordinates(room.col, room.row, room.getRoomWidth(), config)
enlargeRect(rect, pos[0])
enlargeRect(rect, pos[1])
for rock in vault.vault.rocks:
pos = getCoordinates(rock.c, rock.r, 2, config)
enlargeRect(rect, pos[0])
enlargeRect(rect, pos[1])
img = Image.new('RGB', (rect[2], rect[3]))
drawer = ImageDraw.Draw(img)
for room in vault.vault.rooms:
pos = getCoordinates(room.col, room.row, room.getRoomWidth(), config)
drawer.rectangle(pos, fill='red', outline='white')
for rock in vault.vault.rocks:
pos = getCoordinates(rock.c, rock.r, 2, config)
drawer.ellipse(pos, outline = 'white', fill='gray')
img.save(config['output'], 'PNG')
def parseCli():
parser = argparse.ArgumentParser(description = 'Produce a picture showing the rooms layout')
parser.add_argument('--input', type=argparse.FileType('rb'), required=True, help='Path to the vault file')
parser.add_argument('--output', type=argparse.FileType('wb'), default='output.png', help='Path for the output PNG file')
parser.add_argument('--roomWidth', type=int, default=30, help='Width of an elevator, in pixel (=1/3 or a room)')
parser.add_argument('--roomHeight', type=int, default=60, help='Height of a room, in pixel')
parser.add_argument('--roomSpaceX', type=int, default=3, help='Horizontal spacing between rooms')
parser.add_argument('--roomSpaceY', type=int, default=3, help='Vertical spacing between rooms')
parser.add_argument('--roomOffsetX', type=int, default=0, help='X Offset to start putting the rooms on the output')
parser.add_argument('--roomOffsetY', type=int, default=0, help='Y Offset to start putting the rooms on the output')
return vars(parser.parse_args())
if __name__ == '__main__':
main(parseCli())
``` |
{
"source": "johnnycakes79/pyops",
"score": 3
} |
#### File: pyops/dashboard/upload.py
```python
from compute import timeline
from flask import Flask, request, redirect, url_for, send_from_directory
from flask import render_template
from model import InputForm
from werkzeug import secure_filename
import os
import sys
app = Flask(__name__)
TEMPLATE_NAME = 'view_highcharts'
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['txt', 'out'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file',
filename=filename))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# @app.route('/uploads/#')
# def uploaded_file(filename):
# print(filename)
# return send_from_directory(app.config['UPLOAD_FOLDER'],
# filename)
# @app.route('/uploads/#')
def index(filename):
filepath = os.path.join(UPLOAD_FOLDER, filename)
result = timeline(filepath)
return render_template(TEMPLATE_NAME + '.html',
form=form, result=result)
if __name__ == '__main__':
app.run(debug=True)
```
#### File: pyops/pyops/edf.py
```python
import pandas as pd
import os
# from prettytable import PrettyTable
class EDF:
"""Experiment Description File Parser
Attributes:
ACTIONS (Object): Contains all the actions data in the file
AREAS (Object): Contains all the Areas data in the file
CONSTRAINTS (Object): Contains all the constraints found in the file
DATA_BUSES (Object): Contains all the data buses information
DATA_STORES (Object): Contains all the data stores data in the file
experiment (str): Contains the name of the experiment
fname (str): Contains the name of the EDF file
FOVS (Object): Contains all the Fields of View information
FTS (Object): Contains all the FTS data in the file
GLOBAL_PROPERTIES (dict): Contains all the possible global properties
header (list): Contains all the header information in the file
include_files (list): Contains all the include files in the file
keywords (list): Contains the keywords that indicate that a new Object
has to be created.
meta (dict): Contains all the metadata information contained in header
MODES (Object): Contains all the modes data from the file
MODULES (Object): Contains all the modules data from the file
PARAMETERS (Object): Contains all the parameters data from the file
PIDS (Obje): Contains all the PIDs information from the file
variables (dict): Contains all other variables found that don't belong
to an Object
WTF (list): Things found in the file that don't belong to anyother
field of this class
"""
def __init__(self, fname=None):
"""Constructor
Args:
fname (str, optional): Path name of the EDF file
"""
# Variable initialization
self.WTF = list()
self.meta = dict()
self.header = list()
self.variables = dict()
self.experiment = "Not said or not in the proper format."
self.include_files = list()
# Tables to fill in order as they appear in the file
self.DATA_BUSES = DataBuses()
self._global_properties = dict.fromkeys(
["Local_memory", "Dataflow", "Dataflow_PID", "Dataflow_aux_PID",
"Data_volume_data_rate", "HK_data_volume", "TM_frame_overhead",
"Power_profile_check", "Data_rate_profile_check",
"Exclusive_subsystems", "Global_actions", "Global_constraints"])
# For now we just represent the dictionary...
self.GLOBAL_PROPERTIES = self._global_properties
self.DATA_STORES = DataStores()
self.PIDS = PIDs()
self.FTS = FTS()
self.FOVS = FOVs()
self.AREAS = Areas()
self.MODULES = Modules()
self.MODES = Modes()
self.PARAMETERS = Parameters()
self.ACTIONS = Actions()
self.CONSTRAINTS = Constraints()
# Keywords to detect in the file linked to their reading functions
self.keywords = {'DATA_BUS': self.DATA_BUSES._read,
'DATA_STORE': self.DATA_STORES._read,
'PID': self.PIDS._read,
'FTS': self.FTS._read,
'AREA': self.AREAS._read,
'FOV': self.FOVS._read,
'MODULE': self.MODULES._read,
'MODE': self.MODES._read,
'PARAMETER': self.PARAMETERS._read,
'ACTION': self.ACTIONS._read,
'CONSTRAINT': self.CONSTRAINTS._read}
# Loading the given file
if fname is not None:
self._load(fname)
def _load(self, fname):
""" Reading the file and extracting the data.
Args:
fname (str): Path name of the file
"""
# Storing the name of the file for editting purposes
self.fname = fname
with open(fname) as f:
content = f.readlines()
content = self._concatenate_lines(content)
lines_to_remove = 0
# Read Header
for line in content:
if len(line) > 1:
if '\n' in line[0]:
pass
elif '#' in line.split()[0][0]:
self.header.append(line)
self._read_metada(line)
else:
break
# Removing the line from content
lines_to_remove += 1
content = content[lines_to_remove:]
pos = 0
while pos < len(content):
l = content[pos].split()
if len(l) > 0:
if '\n' in content[pos][0]:
pos += 1
else:
# We have found a variable
if ':' in l[0][-1]:
# Checking if we have found a global property
if l[0][:-1] in self._global_properties:
self._global_properties[l[0][:-1]] = \
' '.join(l[1:])
pos += 1
# Checking if we have found a keyword
elif l[0][:-1].upper() not in self.keywords:
pos += self._read_variables(l)
elif len(l) > 1 and l[0][:-1].upper() in self.keywords:
pos += self.keywords[l[0][:-1].upper()](
content[pos:])
else:
pos += 1
# We have found a comment
elif '#' in l[0][0]:
self.WTF.append(line)
pos += 1
else:
pos += 1
# Removing the content from memory
content = None
# Creating the pandas tables
self._convert_dictionaries_into_dataframes()
def _check_consistency(self):
"""Checks consistency of the file
"""
if self._check_if_included_files_exist_in_directory():
print ("Everything seems to be ok, congratulations! :)")
def _check_if_included_files_exist_in_directory(self):
"""Checks whether included files exist in the same directory as
fname or not
Returns:
bool: True if all of them exist, False otherwise
"""
files_exist = True
# Getting the path of the directory where we are working
path = os.path.dirname(os.path.abspath(self.fname))
for fname in self.include_files:
# Removing possible problematic characters
fname = fname[0].strip('"')
if not os.path.isfile(os.path.join(path, fname)):
files_exist = False
output = "***[WARNING]***: "
output += "It seems as if " + fname + " is not in the same "
output += "directory as " + os.path.basename(self.fname)
print (output)
# Perhaps raising an exception here in the future...
return files_exist
def _concatenate_lines(self, content):
"""Concatenate all the lines that have a '\\' element.
Args:
content (list): list of lines to concatenate
Returns:
list: list of lines already concatenated
"""
out = list()
line = ""
for l in content:
# Concatening lines if '\' found
if '\\' in l and '#' not in l[0] and \
'\\' not in l[l.index('\\') + 1]:
line += ' ' + l[:l.index('\\')]
# Continues with the next iteration of the loop
continue
# If there was no concatenation of lines
if len(line) == 0:
line = l
# If we were concatenating, we concatenate the last one
else:
if '#' in l:
line += ' ' + l[:l.index('#')]
else:
line += ' ' + l
if line[0] == '\n':
out.append(line)
else:
out.append(' '.join(line.split()))
line = ""
return out
def _read_metada(self, line):
"""Function to read the metadata of the file
Args:
line (str): line to analyze
"""
if ': ' in line:
self.meta[line[1:line.index(': ')].strip()] = \
line[line.index(': ') + 1:-1].strip()
def _read_variables(self, line):
"""Function that read other variables that are not included in the
Objects.
Args:
line (str): line to be analyzed
Returns:
int: Number of lines read
"""
if 'Include_file:' in line or 'Include:' in line:
self.include_files.append(line[1:])
elif 'Experiment:' in line:
self.experiment = ' '.join(line[1:])
else:
self.variables[line[0][:-1]] = ' '.join(line[1:])
return 1
def _how_many_brackets_following(self, line):
"""Counts how many words starting with '[' and ending with ']' are
in a row from the beginning of the given line.
Args:
line (list): line to be analyzed
Returns:
int: Number of words found
"""
count = 0
for words in line:
if words[0] == '[' and words[-1] == ']':
count += 1
else:
break
return count
def _add_none_to_empty_fields(self, dictionary):
"""Adds None to the list which length is one unit less than the others.
Args:
dictionary (dict): Dictionary containing lists for every key
Returns:
dict: Dictionary modified
"""
# Adding None value to the empty fields
maximum = max(
[len(dictionary[x]) for x in dictionary])
for x in dictionary:
if len(dictionary[x]) < maximum:
dictionary[x].append(None)
return dictionary
def _convert_dictionaries_into_dataframes(self):
"""Convert the created dictionaries into pandas DataFrames
"""
self.DATA_BUSES._create_pandas()
self.DATA_STORES._create_pandas()
self.PIDS._create_pandas()
self.FTS._create_pandas()
self.FOVS._create_pandas()
self.AREAS._create_pandas()
self.MODES._create_pandas()
self.MODULES._create_pandas()
self.PARAMETERS._create_pandas()
self.ACTIONS._create_pandas()
self.CONSTRAINTS._create_pandas()
class DataBuses(EDF):
"""Data Buses class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
"""Constructor"""
self.Table = None
self._data_buses = {"Data_bus": [], "Data_bus_rate_warning": [],
"Data_bus_rate_limit": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._data_buses:
# If another Data Bus detected we ensure to keep same
# length of all the elements in the dictionary
if line[0] == 'Data_bus:':
self._data_buses = \
self._add_none_to_empty_fields(self._data_buses)
self._data_buses[line[0][:-1]].append(
' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._data_buses = \
self._add_none_to_empty_fields(self._data_buses)
break
counter += 1
self._data_buses = \
self._add_none_to_empty_fields(self._data_buses)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
self.Table = pd.DataFrame(self._data_buses)
class DataStores(EDF):
"""Data Stores class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._data_stores = {"Label": [], "Memory size": [],
"Packet size": [], "Priority": [],
"Identifier": [], "Comment": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0] == 'Data_store:':
# If another Data Store detected we ensure to keep same
# length of all the elements in the dictionary
self._data_stores = \
self._add_none_to_empty_fields(self._data_stores)
pos = self._how_many_brackets_following(line[2:]) + 2
if line[pos].upper() == 'SELECTIVE':
pos += 1
self._data_stores['Label'].append(' '.join(line[1:pos]))
prev_pos, pos = pos, \
self._how_many_brackets_following(
line[pos + 1:]) + pos + 1
self._data_stores['Memory size'].append(
' '.join(line[prev_pos:pos]))
prev_pos, pos = pos, \
self._how_many_brackets_following(
line[pos + 1:]) + pos + 1
self._data_stores['Packet size'].append(
' '.join(line[prev_pos:pos]))
if len(line) > pos:
if '#' in line[pos]:
self._data_stores['Comment'].append(
' '.join(line[pos:]))
continue
else:
self._data_stores['Priority'].append(line[pos])
if len(line) > pos + 1:
if '#' in line[pos + 1]:
self._data_stores['Comment'].append(
' '.join(line[pos + 1:]))
continue
else:
self._data_stores['Identifier'].append(
line[pos + 1])
if len(line) > pos + 2:
self._data_stores['Comment'].append(
' '.join(line[pos + 2:]))
elif '#' in line[0][0]:
pass
else:
self._data_stores = \
self._add_none_to_empty_fields(self._data_stores)
break
counter += 1
self._data_stores = \
self._add_none_to_empty_fields(self._data_stores)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ['Label', 'Memory size', 'Packet size', 'Priority',
'Identifier', 'Comment']
self.Table = pd.DataFrame(self._data_stores, columns=cols)
class PIDs(EDF):
"""PIDs class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._pids = {"PID number": [], "Status": [], "Data Store ID": [],
"Comment": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0] == 'PID:':
# If another PID detected we ensure to keep same length
# of all the elements in the dictionary
self._pids = \
self._add_none_to_empty_fields(self._pids)
self._pids['PID number'].append(line[1])
self._pids['Status'].append(line[2])
self._pids['Data Store ID'].append(line[3])
if len(line) > 4:
self._pids['Comment'].append(' '.join(line[4:]))
elif '#' in line[0][0]:
pass
else:
self._pids = \
self._add_none_to_empty_fields(self._pids)
break
counter += 1
self._pids = \
self._add_none_to_empty_fields(self._pids)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ['PID number', 'Status', 'Data Store ID', 'Comment']
self.Table = pd.DataFrame(self._pids, columns=cols)
class FTS(EDF):
"""FTS class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._fts = {"Data Store ID": [], "Status": [], "Data Volume": [],
"Comment": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0] == 'FTS:':
# If another FTS detected we ensure to keep same length
# of all the elements in the dictionary
self._fts = \
self._add_none_to_empty_fields(self._fts)
self._fts['Data Store ID'].append(line[1])
self._fts['Status'].append(line[2])
if len(line) > 4:
self._fts['Data Volume'].append(' '.join(line[3:4]))
else:
self._fts['Data Volume'].append(line[3])
if len(line) > 5:
self._fts['Comment'].append(' '.join(line[5:]))
elif '#' in line[0][0]:
pass
else:
self._fts = \
self._add_none_to_empty_fields(self._fts)
break
counter += 1
self._fts = \
self._add_none_to_empty_fields(self._fts)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ['Data Store ID', 'Status', 'Data Volume', 'Comment']
self.Table = pd.DataFrame(self._fts, columns=cols)
class FOVs(EDF):
"""Field of Views class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._fov = {"FOV": [], "FOV_lookat": [], "FOV_upvector": [],
"FOV_type": [], "FOV_algorithm": [],
"FOV_geometric_angles": [], "FOV_geometric_pixels": [],
"FOV_sub_view": [], "FOV_straylight_angles": [],
"FOV_straylight_duration": [], "FOV_active": [],
"FOV_image_timing": [], "FOV_imaging": [],
"FOV_pitch": [], "FOV_yaw": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._fov:
# If another FOV detected we ensure to keep same length
# of all the elements in the dictionary
if line[0] == 'FOV:':
self._fov = \
self._add_none_to_empty_fields(self._fov)
self._fov[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._fov = \
self._add_none_to_empty_fields(self._fov)
break
counter += 1
self._fov = \
self._add_none_to_empty_fields(self._fov)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["FOV", "FOV_lookat", "FOV_upvector", "FOV_type",
"FOV_algorithm", "FOV_geometric_angles",
"FOV_geometric_pixels", "FOV_sub_view",
"FOV_straylight_angles", "FOV_straylight_duration",
"FOV_active", "FOV_image_timing", "FOV_imaging",
"FOV_pitch", "FOV_yaw"]
self.Table = pd.DataFrame(self._fov, columns=cols)
class Areas(EDF):
"""Areas class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._areas = {"Area": [], "Area_orientation": [],
"Area_lighting_angle": [], "Area_lighting_duration": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._areas:
# If another AREA detected we ensure to keep same length
# of all the elements in the dictionary
if line[0] == 'Area:':
self._areas = \
self._add_none_to_empty_fields(self._areas)
self._areas[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._areas = \
self._add_none_to_empty_fields(self._areas)
break
counter += 1
self._areas = \
self._add_none_to_empty_fields(self._areas)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Area", "Area_orientation", "Area_lighting_angle",
"Area_lighting_duration"]
self.Table = pd.DataFrame(self._areas, columns=cols)
class Modes(EDF):
"""Modes class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._modes = {"Mode": [], "Mode_class": [], "Module_states": [],
"Internal_clock": [], "PID_enable_flags": [],
"Nominal_power": [], "Power_parameter": [],
"Nominal_data_rate": [], "Data_rate_parameter": [],
"Mode_aux_data_rate": [], "Equivalent_power": [],
"Equivalent_data_rate": [], "Mode_transitions": [],
"Mode_actions": [], "Mode_constraints": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._modes:
# If another MODE detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'MODE':
self._modes = \
self._add_none_to_empty_fields(self._modes)
self._modes[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._modes = \
self._add_none_to_empty_fields(self._modes)
break
counter += 1
self._modes = \
self._add_none_to_empty_fields(self._modes)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Mode", "Mode_class", "Module_states", "Internal_clock",
"PID_enable_flags", "Nominal_power", "Power_parameter",
"Nominal_data_rate", "Data_rate_parameter",
"Mode_aux_data_rate", "Equivalent_power",
"Equivalent_data_rate", "Mode_transitions", "Mode_actions",
"Mode_constraints"]
self.Table = pd.DataFrame(self._modes, columns=cols)
class Modules(EDF):
"""Modules Class
Attributes:
Module_states_Table (DataFrame): Pandas DataFrame containing the
information of the Module States
Table (DataFrame): Pandas DataFrame containing the information of the
Modules
"""
def __init__(self):
self.Table = None
self._modules = {"Module": [], "Module_level": [],
"Module_dataflow": [], "Module_PID": [],
"Module_aux_PID": [], "Sub_modules": [],
"Nr_of_module_states": []}
self.Module_states_Table = None
self._module_states = {"Module_state": [], "MS_PID": [],
"MS_aux_PID": [], "MS_power": [],
"MS_power_parameter": [], "MS_data_rate": [],
"MS_data_rate_parameter": [],
"MS_aux_data_rate": [], "MS_constraints": [],
"Repeat_action": [], "MS_pitch": [],
"MS_yaw": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._modules:
# If another MODULE detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'MODULE':
self._modules = \
self._add_none_to_empty_fields(self._modules)
self._modules[line[0][:-1]].append(' '.join(line[1:]))
elif line[0][:-1] in self._module_states:
# If another MODULE_STATE detected we ensure to keep
# same length of all the elements in the dictionary
if line[0][:-1].upper() == 'MODULE_STATE':
# Adding module name for every module state
if isinstance(self._modules['Module'][-1], list):
line[1] = self._modules['Module'][-1][0] \
+ " - " + line[1]
else:
line[1] = self._modules['Module'][-1] \
+ " - " + line[1]
self._module_states = \
self._add_none_to_empty_fields(self._module_states)
self._module_states[line[0][:-1]].append(
' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._modules = \
self._add_none_to_empty_fields(self._modules)
self._module_states = \
self._add_none_to_empty_fields(self._module_states)
break
counter += 1
self._modules = \
self._add_none_to_empty_fields(self._modules)
self._module_states = \
self._add_none_to_empty_fields(self._module_states)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Module", "Module_level", "Module_dataflow", "Module_PID",
"Module_aux_PID", "Sub_modules", "Nr_of_module_states"]
self.Table = pd.DataFrame(self._modules, columns=cols)
cols = ["Module_state", "MS_PID", "MS_aux_PID", "MS_power",
"MS_power_parameter", "MS_data_rate",
"MS_data_rate_parameter", "MS_aux_data_rate",
"MS_constraints", "Repeat_action", "MS_pitch", "MS_yaw"]
self.Module_states_Table = pd.DataFrame(
self._module_states, columns=cols)
class Parameters(EDF):
"""Parameters Class
Attributes:
Parameter_values_Table (DataFrame): Pandas DataFrame containing the
information of the parameter values
Table (DataFrame): Pandas DataFrame containing the information of the
parameters
"""
def __init__(self):
self.Table = None
self._parameters = {"Parameter": [], "Parameter_alias": [],
"State_parameter": [], "Parameter_action": [],
"Raw_type": [], "Eng_type": [],
"Default_value": [], "Unit": [], "Raw_limits": [],
"Eng_limits": [], "Resource": [],
"Value_alias": [], "Nr_of_parameter_values": []}
self.Parameter_values_Table = None
self._parameter_values = {"Parameter_value": [], "Parameter_uas": [],
"Parameter_uwr": [], "Parameter_run": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._parameters:
# If another PARAMETER detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'PARAMETER':
self._parameters = \
self._add_none_to_empty_fields(self._parameters)
self._parameters[line[0][:-1]].append(' '.join(line[1:]))
elif line[0][:-1] in self._parameter_values:
# If another PARAMETER VALUE detected we ensure to keep
# same length of all the elements in the dictionary
if line[0][:-1].upper() == 'PARAMETER_VALUE':
# Adding module name for every module state
if isinstance(self._parameters['Parameter'][-1], list):
line[1] = self._parameters['Parameter'][-1][0] \
+ " - " + line[1]
else:
line[1] = self._parameters['Parameter'][-1] \
+ " - " + line[1]
self._parameter_values = \
self._add_none_to_empty_fields(
self._parameter_values)
self._parameter_values[line[0][:-1]].append(
' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._parameters = \
self._add_none_to_empty_fields(self._parameters)
self._parameter_values = \
self._add_none_to_empty_fields(self._parameter_values)
break
counter += 1
self._parameters = \
self._add_none_to_empty_fields(self._parameters)
self._parameter_values = \
self._add_none_to_empty_fields(self._parameter_values)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Parameter", "Parameter_alias", "State_parameter",
"Parameter_action", "Raw_type", "Eng_type", "Default_value",
"Unit", "Raw_limits", "Eng_limits", "Resource",
"Value_alias", "Nr_of_parameter_values"]
self.Table = pd.DataFrame(self._parameters, columns=cols)
cols = ["Parameter_value", "Parameter_uas", "Parameter_uwr",
"Parameter_run"]
self.Parameter_values_Table = pd.DataFrame(
self._parameter_values, columns=cols)
class Actions(EDF):
"""Actions class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._actions = {"Action": [], "Action_alias": [], "Action_level": [],
"Action_type": [], "Action_subsystem": [],
"Action_parameters": [], "Internal_variables": [],
"Computed_parameters": [], "Duration": [],
"Minimum_duration": [], "Compression": [],
"Separation": [], "Action_dataflow": [],
"Action_PID": [], "Power_increase": [],
"Data_rate_increase": [], "Data_volume": [],
"Power_profile": [], "Data_rate_profile": [],
"Write_to_Z_record": [], "Action_power_check": [],
"Action_data_rate_check": [], "Obs_ID": [],
"Update_at_start": [], "Update_when_ready": [],
"Action_constraints": [], "Run_type": [],
"Run_start_time": [], "Run_actions": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._actions:
# If another ACTION detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'ACTION':
self._actions = \
self._add_none_to_empty_fields(self._actions)
self._actions[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._actions = \
self._add_none_to_empty_fields(self._actions)
break
counter += 1
self._actions = \
self._add_none_to_empty_fields(self._actions)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Action", "Action_alias", "Action_level", "Action_type",
"Action_subsystem", "Action_parameters", "Internal_variables",
"Computed_parameters", "Duration", "Minimum_duration",
"Compression", "Separation", "Action_dataflow", "Action_PID",
"Power_increase", "Data_rate_increase", "Data_volume",
"Power_profile", "Data_rate_profile", "Write_to_Z_record",
"Action_power_check", "Action_data_rate_check", "Obs_ID",
"Update_at_start", "Update_when_ready", "Action_constraints",
"Run_type", "Run_start_time", "Run_actions"]
self.Table = pd.DataFrame(self._actions, columns=cols)
class Constraints(EDF):
"""Constraints class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._constraints = {"Constraint": [], "Constraint_type": [],
"Severity": [], "Constraint_group": [],
"Condition": [], "Resource_constraint": [],
"Resource_mass_memory": [],
"Parameter_constraint": [],
"Condition_experiment": [], "Expression": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._constraints:
# If another CONSTRAINT detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'CONSTRAINT':
self._constraints = \
self._add_none_to_empty_fields(self._constraints)
self._constraints[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._constraints = \
self._add_none_to_empty_fields(self._constraints)
break
counter += 1
self._constraints = \
self._add_none_to_empty_fields(self._constraints)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Constraint", "Constraint_type", "Severity",
"Constraint_group", "Condition", "Resource_constraint",
"Resource_mass_memory", "Parameter_constraint",
"Condition_experiment", "Expression"]
self.Table = pd.DataFrame(self._constraints, columns=cols)
```
#### File: pyops/pyops/maps.py
```python
from __future__ import print_function
from wand.image import Image
from lxml import etree
from datetime import datetime
from events import getMonth
import sys
import os
import re
import zipfile
import shutil
import utils
import binascii
try:
from pykml.factory import KML_ElementMaker as KML
NOKMLLIB = False
except:
NOKMLLIB = True
def _makeTime(time):
"""
extracts time from an input lineand returns a datetime object.
"""
date, time = time.split('_')
day, month, year = date.split('-')
month = getMonth(month)
hour, minute, second = time.split(':')
return datetime(int(year), int(month), int(day),
int(hour), int(minute), int(second))
def _swathCenter(line):
"""
calculate center of swath
"""
lats = [float(line[3]), float(line[5]), float(line[7]), float(line[9])]
lngs = [float(line[4]), float(line[6]), float(line[8]), float(line[10])]
clat = min(lats) + (max(lats) - min(lats)) / 2.
clng = min(lngs) + (max(lngs) - min(lngs)) / 2.
return '{}'.format(clat), '{}'.format(clng)
def _buildSwath(line, data, polyalt=5000):
"""
So far, an ugly hack on building the KML elements
"""
# split input line
line = line.split()
# add polygon altitude to end of line tuple
line.append(polyalt)
# parse the time element of the line
dt = _makeTime(line[1])
date = dt.date()
time = dt.time()
# define time format string
format = "%Y-%m-%dT%H:%M:%SZ"
# ensure longitude is between -/+ 180 degrees
for i in [4, 6, 8, 10]:
if float(line[i]) > 180.0:
val = float(line[i]) - 360.0
line[i] = str(val)
# build the vertices of the swath (remember the first vertex has to
# repeat at the end.
vertices = []
for c in [3, 5, 7, 9, 3, 5]:
vertices.append(",".join([line[i] for i in [c + 1, c, -1]]))
# get center of swath
clat, clng = _swathCenter(line)
# create an image placemark for the kml
image = KML.Placemark(
# define name based on experiment and filter/channel
KML.name('{}: {}'.format(data['experiment'], data['filter'])),
# define description
# TODO: come up with a more flexible way of doing this...
KML.description(
'Orbit no.: {}\n'.format(
data['orbit']),
'Pericenter time (UTC): {}\n'.format(
data['pericenter time'].replace('_', ' ')),
'First image time (UTC): {}\n'.format(
data['first image time'].replace('_', ' ')),
'First image time (from pericenter): {}\n'.format(
data['first image time from pericenter'].replace('_',
' ')),
'Last image time (UTC): {}\n'.format(
data['last image time'].replace('_', ' ')),
'Last image time (from pericenter): {}\n\n'.format(
data['last image time from pericenter'].replace('_', ' ')),
'Image sequence: {}\n'.format(line[0]),
'Image date: {}\n'.format(date),
'Image time: {}\n'.format(time),
'Orbit no.: {}\n\n'.format(
data['orbit']),
'Pericentre relative time: {}\n'.format(
line[2].replace('_', ' ')),
'Duration: {}\n\n'.format(line[20]),
'S/C altitude: {}\n'.format(line[21]),
'S/C latitude: {}\n'.format(line[22]),
'S/C longitude: {}\n'.format(line[23]),
'S/C target elevation: {}\n'.format(line[24]),
'S/C target azimuth: {}\n\n'.format(line[25]),
'Reflection Angle: {}\n'.format(line[27]),
'Sun target elevation: {}\n'.format(line[28]),
'Sun target azimuth: {}\n'.format(line[29]),
'Target phase: {}\n'.format(line[30]),
'Target elongation: {}\n'.format(line[31]),
'Local Time: {}\n'.format(line[32]),
'Image smear: {}\n'.format(line[33]),
'Mercury True Anomaly: {}\n'.format(line[35])
),
# specify appearance time
KML.TimeSpan(
#KML.begin(str(tempTime))
KML.begin(dt.strftime(format))
),
# the style for this swath has been mapped in <swath>stylemap
KML.styleUrl('#{}stylemap'.format(data['filter'])),
#KML.styleUrl('#{}1'.format(data['filter'])),
# define where the 'eye' looks when this swath is double clicked
KML.LookAt(
KML.longitude(clng),
KML.latitude(clat),
KML.altitude('5000'),
KML.heading('0'),
KML.tilt('30'),
KML.roll('0'),
KML.altitudeMode('relativeToGround'),
KML.range('1500000')
),
# defined the geometry object that will hold the swath polygon
KML.MultiGeometry(
# defined the swath polygon
KML.Polygon(
#KML.tessellate('1'),
KML.altitudeMode('relativeToGround'),
#KML.altitudeMode('clampedToGround'),
KML.outerBoundaryIs(
KML.LinearRing(
KML.coordinates(
" ".join(vertices)
)
)
)
)
)
)
return image
def _docSkel(name):
name = re.sub('[^a-zA-Z0-9\n\.]', ' ', name.split('.')[0]).title()
doc = KML.kml(
KML.Document(
KML.name(name),
KML.open('1'),
KML.visibility('1'),
# uncomment the following if you want to hide the children
# KML.Style(
# KML.ListStyle(
# KML.listItemType('checkHideChildren')
# ),
# id='check-hide-children',
# ),
# KML.styleUrl('#check-hide-children'),
)
)
return doc
def _makeStyle(name, color):
"""
Build swath pairs and map...
"""
# style for the normal state of a swath
stylen = KML.Style(
KML.IconStyle(
KML.scale('0.4'),
KML.Icon(
KML.href('http://maps.google.com/mapfiles/kml/shapes/star.png')
)
),
KML.LineStyle(
KML.color('ff{}'.format(color)),
KML.width(2.0)
),
KML.LabelStyle(
KML.color('990000ff'),
KML.width('2')
),
KML.PolyStyle(
KML.color('00{}'.format(color)),
KML.fill('1'),
KML.outline('1')
),
id='{}n'.format(name)
)
# style for the 'mouse-over' state of a swath
styleh = KML.Style(
KML.IconStyle(
KML.scale('0.8'),
KML.Icon(
KML.href('http://maps.google.com/mapfiles/kml/shapes/star.png')
)
),
KML.LineStyle(
KML.color('ffff4158'),
KML.width(1.5)
),
KML.LabelStyle(
KML.color('990000ff'),
KML.width('2')
),
KML.PolyStyle(
KML.color('fff7fff'),
KML.fill('1'),
KML.outline('1')
),
id='{}h'.format(name)
)
# mapping of above styles
stylem = KML.StyleMap(
KML.Pair(
KML.key('normal'),
KML.styleUrl('#{}n'.format(name))
),
KML.Pair(
KML.key('highlight'),
KML.styleUrl('#{}h'.format(name))
),
id='{}stylemap'.format(name)
)
# Expand to make the style simpler...
# kurl = 'http://maps.google.com/mapfiles/kml/shapes/star.png'
# style1 = KML.Style(
# KML.IconStyle(
# KML.scale('0.4'),
# KML.Icon(
# KML.href(kurl)
# )
# ),
# KML.LabelStyle(
# KML.color('990000ff'),
# KML.width('2')
# ),
# KML.LineStyle(
# KML.color('ff0000ff'),
# KML.width(2.0)
# ),
# KML.PolyStyle(
# KML.color('997f7fff'),
# KML.fill('1'),
# KML.outline('1')
# ),
# id='{}1'.format(name),
# )
return stylen, styleh, stylem
def _buildKML(input, styles, experiments):
"""
Put all the pieces together...
"""
# create a KML file skeleton
doc = _docSkel(input)
# add the styles
for style in styles:
for part in style:
doc.Document.append(part)
# add the experiments
for experiment in experiments.keys():
doc.Document.append(experiments[experiment])
return doc
def _writeKML(input, doc):
"""
create and write to a KML file with the same name as
the input file.
"""
kmlName = "{}.kml".format(input.rsplit(".", 1)[0])
outfile = open(kmlName, 'w')
print(etree.tostring(doc, pretty_print=True), file=outfile)
print("\nXML Doc written to {}.\n".format(kmlName))
outfile.close()
def _gmapsusage(filename):
"""
Construct a usage string.
"""
usage = "{} <mapps_image_dump_file> [path_to_mapps_config_file]".format(
filename)
# Print usage string.
print("\n[Usage]: python {}\n".format(usage))
def gmaps(input, configFile):
"""
Check and deal with command line agruments.
"""
if NOKMLLIB:
print("\nOoops! 'gmaps' needs KML_ElementMaker from pykml.factory")
print(" Try: pip install pykml\n")
else:
# # Check input arguments
# if len(sys.argv) < 2 or len(sys.argv) > 3:
# # ... show usage hint...
# _gmapsusage(sys.argv[0])
# # ... exit!
# raise SystemExit(1)
# input = sys.argv[1]
# if sys.argv[2]:
# configFile = sys.argv[2]
# else:
# configFile = False
# create containers
experiments = {}
filters = {}
currents = {}
styles = []
# Open input file for reading.
infile = open(input, 'r')
# Scan through the file line-by-line.
# TODO: look into moving this for loop into a function
for line in infile.readlines():
if line.startswith('Trail'):
break
# TODO: Replace the crude pattern matching below with RegEx...
if line.startswith('Experiment:'):
expr = line.split(': ')[1].strip()
if expr not in experiments:
experiments[expr] = KML.Folder(KML.name(expr.replace('_',
' ')),
KML.open('1'),
id='expr_{}'.format(expr))
currents['experiment'] = expr
if line.startswith('Swath:'):
fltr = line.split(': ')[1].strip()
if fltr not in filters:
filters[fltr] = KML.Folder(KML.name(fltr.replace('_', ' ')),
KML.open('0'),
KML.visibility('1'),
KML.Style(KML.ListStyle(KML.listItemType('checkHideChildren')), id='check-hide-children'),
KML.styleUrl('#check-hide-children'),
id='fltr_{}'.format(fltr))
experiments[currents['experiment']].append(filters[fltr])
currents['filter'] = fltr
if line.startswith('Orbit:'):
orbit = line.split()[1].strip()
currents['orbit'] = orbit
if line.startswith('Pericenter time (UTC):'):
peric_time = line.split(': ')[1].strip()
currents['pericenter time'] = peric_time
if line.startswith('First image time (UTC):'):
first_image_t = line.split(': ')[1].strip()
currents['first image time'] = first_image_t
if line.startswith('First image time (from pericenter):'):
first_image_t_frm_peric = line.split(': ')[1].strip()
currents['first image time from pericenter'] = first_image_t_frm_peric
if line.startswith('Last image time (UTC):'):
last_image_t = line.split(': ')[1].strip()
currents['last image time'] = last_image_t
if line.startswith('Last image time (from pericenter):'):
last_image_t_frm_peric = line.split(': ')[1].strip()
currents['last image time from pericenter'] = last_image_t_frm_peric
# build an 'image' placemark element
if line.startswith(' '):
image = _buildSwath(line, currents)
filters[currents['filter']].append(image)
infile.close()
# the styles for the different swaths
colors = {}
# if the MAPPS ini has been provided get colours from it.
if configFile:
inifile = open(configFile, 'r')
for line in inifile.readlines():
if '\swathColorName=' in line:
cHTML = line.rsplit("=#", 1)[1].strip()
cKML = '{}{}{}'.format(cHTML[4:6], cHTML[2:4], cHTML[0:2])
#print(cHTML, cKML)
bits = line.split('\\')[4]
#print(bits)
colors[bits] = cKML
else:
for fltr in filter.keys():
cKML = binascii.b2a_hex(os.urandom(4))
colors[fltr] = cKML
# colors = ['641400E6', '6414F03C', '647828F0',
# '647828F0', '64F0FF14', '6478FFF0']
for fltr in filters.keys():
styles.append(_makeStyle(fltr, colors[fltr]))
# build the KML file
doc = _buildKML(input, styles, experiments)
# TODO: fix schema checking...
# schema_gx = Schema("kml22gx.xsd")
# print(schema_gx.assertValid(doc))
# write xml structure to kml file
_writeKML(input, doc)
def splitter(originalFile, no_levels=3, zip=False, inichunk=False,
demo=False):
if demo:
print('\nMAPPS Map Spliter **DEMO**, v0.1, 2014.')
else:
print('\nMAPPS Map Spliter, v0.1, 2014.')
sys.stdout.write("\n Importing original image...")
sys.stdout.flush()
img = Image(filename=originalFile)
sys.stdout.write(" complete.\n")
sys.stdout.flush()
imgwidth = img.width
imgheight = img.height
if imgwidth / imgheight != 2:
print('\n Ooops!!! The Image Width to Height ratio should be 2!!!')
return
else:
stem = originalFile.split('.')[0]
if not os.path.exists(stem):
os.makedirs(stem)
else:
print('\n Uh-oh! The directory {} already exists.'.format(stem))
if utils.yesno(' Do you want to replace it?'):
shutil.rmtree(stem)
os.makedirs(stem)
else:
return
levels = range(1, no_levels + 1)
for level in levels:
print('\n Processing Level {}'.format(level))
split = 2 ** level
segs = range(split)
div = 1. / split
for h in segs:
for w in segs:
w1 = int(imgwidth * div * w)
w2 = int(imgwidth * div * (w + 1))
h1 = int(imgheight * div * h)
h2 = int(imgheight * div * (h + 1))
imgtmp = img[w1:w2, h1:h2]
# print(w1, w2, h1, h2)
imgtmp.transform(resize='1440x720!')
imgtmp.format = 'jpeg'
hlevel = '{0:03d}'.format(h + 1)
wlevel = '{0:03d}'.format(w + 1)
saveas = os.path.join(stem, '{}_{}_{}_{}.jpg'.format(
stem, level, hlevel, wlevel))
print(' Writing: {}'.format(saveas))
imgtmp.save(filename=saveas)
if imgtmp.width != 1440:
print('ERROR: image width = {}\n'.format(imgtmp.width))
if imgtmp.height != 720:
print('ERROR: image height = {}\n'.format(imgtmp.height))
# process input image
img.transform(resize='1440x720')
img.format = 'jpeg'
img.save(filename=os.path.join(stem, '{}_0_001_001.jpg'.format(
stem)))
# create ini file segment
if inichunk:
utils.inifix(stem, no_levels)
# zip output
if zip:
print('\n Zipping output to {}.zip'.format(stem))
zipf = zipfile.ZipFile('{}.zip'.format(stem), 'w')
utils.zipdir('{}/'.format(stem), zipf)
zipf.close()
shutil.rmtree(stem)
print('\nFinished!\n')
return
def tabular(input, configFile):
"""
Check and deal with command line agruments.
"""
# create containers
# experiments = {}
filters = {}
currents = {}
styles = []
first = True
# Open input file for reading.
infile = open(input, 'r')
header = ['seqnr','image time (UTC)','peri reltime','p1 lat','p1 long',
'p2 lat','p2 long','p3 lat','p3 long','p4 lat','p4 long',
'type','p5 lat','p5 long','p6 lat','p6 long','p7 lat','p7 long',
'p8 lat','p8 long','duration','altitude','SC latitude',
'SC longitude','SC target elev','SC target azimuth',
'distance','reflection','Sun tgt elev','Sun tgt azimuth',
'tgt phase','tgt elongation','local time','img smear',
'tg phase','perihelion','attdata']
# Scan through the file line-by-line.
# TODO: look into moving this for loop into a function
for line in infile.readlines():
if line.startswith('Trail'):
break
# TODO: Replace the crude pattern matching below with RegEx...
if line.startswith('Experiment:'):
expr = line.split(': ')[1].strip()
currents['experiment'] = expr
if line.startswith('Swath:'):
fltr = line.split(': ')[1].strip()
currents['filter'] = fltr
if line.startswith('Orbit:'):
orbit = line.split()[1].strip()
currents['orbit'] = orbit
if line.startswith('Pericenter time (UTC):'):
peric_time = line.split(': ')[1].strip()
currents['pericenter time'] = peric_time
if line.startswith('First image time (UTC):'):
first_image_t = line.split(': ')[1].strip()
currents['first image time'] = first_image_t
if line.startswith('First image time (from pericenter):'):
first_image_t_frm_peric = line.split(': ')[1].strip()
currents['first image time from pericenter'] = first_image_t_frm_peric
if line.startswith('Last image time (UTC):'):
last_image_t = line.split(': ')[1].strip()
currents['last image time'] = last_image_t
if line.startswith('Last image time (from pericenter):'):
last_image_t_frm_peric = line.split(': ')[1].strip()
currents['last image time from pericenter'] = last_image_t_frm_peric
if line.startswith('seqnr') and first:
print(','.join(currents.keys()+header))
first = False
# build an 'image' placemark element
if line.startswith(' '):
# image = _buildSwath(line, currents)
# filters[currents['filter']].append(image)
print(','.join(currents.values()+line.split()))
infile.close()
# the styles for the different swaths
colors = {}
# if the MAPPS ini has been provided get colours from it.
if configFile:
inifile = open(configFile, 'r')
for line in inifile.readlines():
if '\swathColorName=' in line:
cHTML = line.rsplit("=#", 1)[1].strip()
cKML = '{}{}{}'.format(cHTML[4:6], cHTML[2:4], cHTML[0:2])
#print(cHTML, cKML)
bits = line.split('\\')[4]
#print(bits)
colors[bits] = cKML
else:
for fltr in filter.keys():
cKML = binascii.b2a_hex(os.urandom(4))
colors[fltr] = cKML
# colors = ['641400E6', '6414F03C', '647828F0',
# '647828F0', '64F0FF14', '6478FFF0']
for fltr in filters.keys():
styles.append(_makeStyle(fltr, colors[fltr]))
# build the KML file
# doc = _buildKML(input, styles, experiments)
# TODO: fix schema checking...
# schema_gx = Schema("kml22gx.xsd")
# print(schema_gx.assertValid(doc))
# write xml structure to kml file
# _writeKML(input, doc)
# demo wrappers for testing the above...
def gmapsdemo():
gmaps('imagedatadump.dat', '/Users/jmcaulif/Code/bepic/ESA/bepi_mapps_v65.ini')
def splitterdemo():
shutil.rmtree("demo")
splitter("demo.png", demo=True, no_levels=2)
def tabulardemo():
tabular('../sample_data/imagedatadump.dat', '/Users/jmcaulif/Code/bepic/ESA/bepi_mapps_v65.ini')
if __name__ == '__main__':
# gmapsdemo()
# splitterdemo()
tabulardemo()
```
#### File: pyops/pyops/plots.py
```python
import pandas as pd
from bokeh.plotting import figure, show, output_notebook, gridplot
from bokeh.palettes import brewer
from collections import OrderedDict
from bokeh.models import HoverTool
import numpy as np
from bokeh.models import ColumnDataSource, Range1d, FactorRange
from datetime import datetime
# BREWER_PLOT
def brewer_plot(defata, instruments_all, instruments=None):
"""
This function shows two bokeh brewer plots into the ipython notebook using
the data given as a parameter. In the second one only the instruments
given in the third parameter are plotted. In the first one all of them
are plotted.
:param data: power_avg table
:type data: pandas DataFrame
:param instruments_all: All the instruments in the power_avg file
:type instruments_all: List of strings
:param instruments: Instruments to be plotted in the second plot
:type instruments: List of strings
:returns: Nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
# Creating both figures
big_figure = create_plot(data, instruments_all)
small_figure = create_plot(data, instruments, big_figure.x_range)
# Plotting them together
p = gridplot([[big_figure], [small_figure]])
show(p)
def create_plot(data, instruments, x_range=None):
"""
This function creates a plot given a power_avg table and the instruments
to be plotted. Optionally an x_range to be linked to another plot can be
passed as a parameter.
:param data: module_states or modes table
:type data: pandas DataFrame
:param instruments: Instruments to be plotted
:type instruments: List of strings
:param x_range: x_range to be linked with
:type x_range: figure x_range
:returns: bokeh figure
"""
# Create a set of tools to use
tools = "resize,hover,save,pan,box_zoom,wheel_zoom,reset"
# Creating the areas to be plotted
areas = stacked(data, instruments)
# Selecting the colors for the calculated areas
colors = palette(len(areas))
# Stacking the values of each instrument
x2 = np.hstack((data.index.values[::-1], data.index.values))
# Creating the figure
if x_range is None:
f = figure(x_axis_label=data.index.name, y_axis_label='Watts',
x_axis_type="datetime", tools=tools, logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)))
else:
f = figure(x_axis_label=data.index.name, y_axis_label='Watts',
x_axis_type="datetime", x_range=x_range, tools=tools,
logo=None)
for pos in range(len(colors)):
f.patch(x2, list(areas.values())[pos], color=colors[pos],
legend=instruments[pos], line_color=None, alpha=0.8)
# Setting the color of the line of the background
f.grid.minor_grid_line_color = '#eeeeee'
return f
def palette(number):
"""
This function returns a palette of hex colors of size number.
:param number: Amount of different colors needed
:type number: integer
:returns: list of strings
"""
if number > 40:
print ("Ooops, too many parameters, not enough colors...")
# Selecting the colors from different bokeh palettes
palette = brewer["Spectral"][11]
palette += list(reversed(brewer["RdBu"][11]))
palette += brewer["YlGnBu"][9]
palette += list(reversed(brewer["YlGn"][9]))
palette += brewer["PiYG"][11]
return palette[:number]
def stacked(df, categories):
"""
This function stacks all the power information for each instrument.
:param df: power_avg pandas DataFrame
:type df: pandas DataFrame
:param categories: categories in which the plot is going to be divided
:type categories: list of values
:returns: pandas DataFrame
"""
areas = OrderedDict()
last = np.zeros(len(df[categories[0]]))
for cat in categories:
next = last + df[cat]
areas[cat] = np.hstack((last[::-1], next))
last = next
return areas
# MODES_SCHEDULE
def modes_schedule(data):
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_modes_schedule(data))
def get_modes_schedule(data, x_range=None):
"""
This function create a time line plot based on the data form modes or
module_states files.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: Nothing
"""
# Adding new column to see which instruments are changing in each entry
data = add_difference_column(data)
# Building a new table to make the data plotable by bokeh
start_end_table = build_start_end_table(data)
source = ColumnDataSource(start_end_table)
# Selecting the instruments detected in the data
instruments = [colum for colum in data if colum.upper() == colum]
instruments.sort(reverse=True)
# Creating the figure
if x_range is None:
p = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(start_end_table['Start_time']),
max(start_end_table['End_time'])),
y_range=FactorRange(factors=instruments),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
p = figure(x_axis_type="datetime", logo=None,
x_range=x_range,
y_range=FactorRange(factors=instruments),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
p.quad(left='Start_time', right='End_time', top='Instrument_top',
bottom='Instrument_bottom', color='Color', source=source)
# Adding the hover tool to see info when putting the mouse over the plot
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('Mode', '@Mode'),
('Time', '@Time')
])
return p
def add_difference_column(data):
"""
This function returns the same pandas DataFrame that it receives as a
parameter but with a new column, which contains which instrument has
changed since the last recorded state in the table.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: pandas DataFrame
"""
# We create a list of lists, which will be the new column to add to to data
difference = [[]]
# We take the first row of the table to have the starting values
data_aux = data.transpose()
prev_row = data_aux[data_aux.columns.values[0]]
difference[0] = [element for element in prev_row.index
if prev_row[element] is not None]
# For each entry in the table we detect which instruments are changig
# since the previous row
pos = 0
for row in data_aux:
for element in data_aux.index:
if not prev_row[element] == data_aux[row][element]:
if not len(difference) == pos + 1:
difference.append([element])
else:
difference[pos].append(element)
if not len(difference) == pos + 1:
difference.append([])
prev_row = data_aux[row]
pos += 1
# Finally we add the calculated column
data["Change"] = difference
return data
def build_start_end_table(data):
"""
This function returns a pandas DataFrame which will be used to make a Bokeh
directly from it. This DataFrame will be created from the data received as
a parameter.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: pandas DataFrame
"""
# Creating the DataFrame manually
di = {"End_time": [], "Instrument": [],
"Mode": [], "Start_time": [], "Time": []}
# Filling the new DataFrame with the instrument, mode and start time
data_aux = data.transpose()
for row in data_aux:
row_t = data_aux[row].transpose()
for instrument in row_t["Change"]:
di["End_time"].append(None)
di["Instrument"].append(instrument)
di["Mode"].append(row_t[instrument])
di["Start_time"].append(row)
di["Time"] = [str(x) for x in di["Start_time"]]
df = pd.DataFrame(di)
df = df.sort(["Start_time"], ascending=True)
instruments = [colum for colum in data if colum.upper() == colum]
# Calculating and adding the end time for each task
for ins in instruments:
shift = df.loc[df["Instrument"] == ins].shift(-1)
if len(shift) > 1:
for i in range(len(shift.index.values)):
di["End_time"][shift.index.values[i]] = \
shift["Start_time"][shift.index.values[i]]
df = pd.DataFrame(di)
# Calculating and adding the end time for tasks without unespecified end
for pos in range(len(df["End_time"])):
if not type(df["End_time"][pos]) is pd.tslib.Timestamp:
df.loc[pos, "End_time"] = df["Start_time"].max()
# Deleting OFF states, we don't want to plot it
df = df[df.Mode != "OFF"]
df[["End_time", "Start_time"]] = \
df[["End_time", "Start_time"]].astype(datetime)
# Creating new rows needed for making the bars wider in the plot
df["Instrument_bottom"] = [row + ":0.25" if " " in row else row + ":0.1"
for row in df["Instrument"].values]
df["Instrument_top"] = [row + ":0.75" if " " in row else row + ":0.9" for
row in df["Instrument"].values]
# Setting different colors for each different mode in the DataFrame
modes = df["Mode"].unique()
colors = dict(zip(modes, palette(len(modes))))
df["Color"] = [colors[row] for row in df["Mode"].values]
return df
# DATA_PLOT
def data_plot(data, instruments):
"""
This function shows a data plot in the ipython notebook using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_data_plot(data, instruments))
def get_data_plot(data, instruments, x_range=None):
"""
This function returns a data rate plot bokeh figure using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:param x_range: x_range from another figure to link with
:type x_range: x_range bokeh format
:returns: bokeh figure
"""
# Creating the figure depending if we want to link it to another figure
if x_range is None:
r = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
r = figure(x_axis_type="datetime", x_range=x_range, logo=None,
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
# Getting the appropiate list of colors
colors = palette(len(instruments))
i = 0
# Transforming the multiindex dataframe into a normal one to use hover tool
d = transform_multiindex_df(data, instruments)
# Inserting the lines in the plot
for ins in instruments:
r.line(d['index'], d[ins[0] + "_" + ins[1]], color=colors[i],
legend=ins[0] + " - " + ins[1], line_width=3)
# I don't know why, but if this source is not rebuilt every single
# time, it doesn't plot correctly
source = ColumnDataSource(d)
# WARNING: THIS IS A HACK
# Hover tool doesn't work over lines show I have created points
r.scatter(d['index'], d[ins[0] + "_" + ins[1]], color=colors[i],
source=source, fill_color=None, size=8)
i += 1
r.title = "Data Rate"
r.grid.grid_line_alpha = 0.3
# Adding the hover tool to see info when putting the mouse over the plot
hover = r.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([("Time", "@Time")] +
[(ins[0] + " - " + ins[1], "@" + ins[0] + "_"
+ ins[1]) for ins in instruments])
return r
def transform_multiindex_df(data, instruments):
"""
This function returns a pandas DataFrame without a multiindex and prepared
to be plotted and used by the hover tool when converted to the proper
format.
:param data: power usage pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: pandas DataFrame
"""
d = {}
d['Time'] = [str(x) for x in pd.to_datetime(data.index.values)]
d['index'] = data.index.values
for ins in instruments:
d[ins[0] + "_" + ins[1]] = \
[x[0] for x in data[ins[0], ins[1]].values.tolist()]
df = pd.DataFrame(d)
return df
# POWER_PLOT
def power_plot(data, instruments):
"""
This function shows a power plot in the ipython notebook using the given
data for the given instruments.
:param data: power usage pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_power_plot(data, instruments))
def get_power_plot(data, instruments, x_range=None):
"""
This function returns a power plot bokeh figure using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:param x_range: x_range from another figure to link with
:type x_range: x_range bokeh format
:returns: bokeh figure
"""
# Creating the figure depending if we want to link it to another figure
if x_range is None:
r = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
r = figure(x_axis_type="datetime", x_range=x_range, logo=None,
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
# Getting the appropiate list of colors
colors = palette(len(instruments))
i = 0
# Preparing a set of data to convert into a source for the hover tool
d = data.copy(deep=True)
d['Time'] = [str(x) for x in pd.to_datetime(data.index.values)]
# Inserting the lines in the plot
for ins in instruments:
r.line(data.index.values, data[ins], color=colors[i],
legend=ins, line_width=3)
# I don't know why, but if this source is not rebuilt every single
# time, it doesn't plot correctly
source = ColumnDataSource(d)
# WARNING: THIS IS A HACK
# Hover tool doesn't work over lines show I have created points
r.scatter(data.index.values, data[ins], color=colors[i], source=source,
fill_color=None, size=8)
i += 1
r.title = "Power"
r.grid.grid_line_alpha = 0.3
# Adding the hover tool to see info when putting the mouse over the plot
hover = r.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([("Time", "@Time")] +
[(ins, "@" + ins) for ins in instruments])
return r
```
#### File: pyops/pyops/time.py
```python
from __future__ import print_function
import spiceypy as spice
from datetime import datetime, timedelta
def oem_to_datetime(oem_time_string):
"""
converts oem datetime record to python datetime object
Args:
oem_time (string): datetime string
"""
date, time = oem_time_string.split('T')
year, month, day = date.split('-')
hour, minute, second_fraction = time.split(':')
second, fraction = second_fraction.split('.')
return datetime(int(year), int(month), int(day),
int(hour), int(minute), int(second),
int(fraction[0:3])*1000)
def datetime_to_et(dtime, scale='UTC'):
"""
convert a python datetime to SPICE ephemerides seconds (TBD)
Args:
dtime (datetime): python datetime
scale (str, optional): time scale of input time (default: UTC)
Returns:
float: SPICE ephemerides sceonds (TBD)
"""
return spice.str2et(dtime.strftime(
'%m/%d/%y %H:%M:%S.%f ({})'.format(scale)))
def et_to_datetime(et, scale='TDB'):
"""
convert a SPICE ephemerides epoch (TBD seconds) to a python datetime
object. The default time scale returned will be TDB but can be set
to any of the accepted SPICE time scales.
Args:
et (float): SPICE ephemerides sceonds (TBD)
scale (str, optional): time scale of output time (default: TDB)
Returns:
datetime: python datetime
"""
t = spice.timout(et, 'YYYY-MON-DD HR:MN:SC.### ::{}'.format(scale), 41)
return datetime.strptime(t, '%Y-%b-%d %H:%M:%S.%f')
def et_to_utc(et):
"""Summary
convert SPICE epoch in Ephemerides seconds (TDB) to a
UTC time string.
Args:
et (float): SPICE epoch in Ephemerides seconds (TDB)
Returns:
string: UTC time
"""
return spice.et2utc(et, 'ISOC', 3, 30)
def itl_to_datetime(itltime):
"""
convert EPS ITL time format to python datetime object
Args:
itltime (string): EPS ITL time string formt
Returns:
datetime: python datetime
"""
return datetime.strptime(itltime, '%d-%b-%Y_%H:%M:%S')
def xldate_to_datetime(xldate):
"""
convert an Excel format time to python datetime object
Args:
xldate (float): days in Excel format
Returns:
datetime: python datetime
"""
temp = datetime(1900, 1, 1)
delta = timedelta(days=xldate)
return temp+delta
# def mjd20002datetime(mjd2000):
# y, m, d, fd = jdcal.jd2gcal(
# jdcal.MJD_0,jdcal.MJD_JD2000+float(mjd2000)-0.5)
# hour = 24* fd
# mins = 60*(hour - int(hour))
# sec = 60*(mins - int(mins))
# usec = 1000000*(sec-int(sec))
# return dt(y, m, d, int(hour), int(mins), int(sec), int(usec))
# def datetime2et(dtime):
# return spice.str2et(dtime.strftime("%m/%d/%y %H:%M:%S.%f"))
# def mjd20002et(mjd2000):
# return datetime2et(mjd20002datetime(mjd2000))
# def et2mjd2000(et):
# return float(spice.et2utc(et, 'J', 7, 30).split(' ')[1]) - \
# jdcal.MJD_0 - jdcal.MJD_JD2000 + 0.5
# def mjd20002datetime(mjd2000):
# y, m, d, fd = jdcal.jd2gcal(jdcal.MJD_0,jdcal.MJD_JD2000+float(mjd2000)-0.5)
# hour = 24* fd
# mins = 60*(hour - int(hour))
# sec = 60*(mins - int(mins))
# usec = 1000000*(sec-int(sec))
# return dt(y, m, d, int(hour), int(mins), int(sec), int(usec))
# def datetime2et(dtime):
# return spice.str2et(dtime.strftime("%m/%d/%y %H:%M:%S.%f"))
# def mjd20002et(mjd2000):
# return datetime2et(mjd20002datetime(mjd2000))
# def et2utc(et):
# return spice.et2utc(et, 'ISOC', 3, 30)
# def et2datetime(et):
# utc = et2utc(et)
# utc_date, utc_time = utc.split('T')
# y, m, d = utc_date.split('-')
# hour, mins, sec = utc_time.split(':')
# sec, usec = sec.split('.')
# return dt(int(y), int(m), int(d), int(hour), int(mins), int(sec), int(usec))
# def et2mjd2000(et):
# return float(spice.et2utc(et, 'J', 7, 30).split(' ')[1]) - jdcal.MJD_0 - jdcal.MJD_JD2000 + 0.5
# Main function
def main():
"""
does nothing for now...
"""
print('This is a random collection of functions... TBS - to be sorted.')
if __name__ == "__main__":
main()
```
#### File: pyops/test/test_events.py
```python
from pyops import events
# Time functions
# =================================
# 1. getMonth(month) tests
def test_getMonthFromStringToInt():
"""
It should return the number of the month if given a string
and returns the name of the month if given and int
"""
# months = ['January', 'February', 'March', 'April', 'May', 'June',
# 'July', 'August', 'September', 'October' 'November',
# 'December']
mons = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
number = 1
for month in mons:
assert events.getMonth(month) == number
number += 1
def test_getMonthFromIntToString():
"""
It should return the number of the month if given a string
and returns the name of the month if given and int
"""
# months = ['January', 'February', 'March', 'April', 'May', 'June',
# 'July', 'August', 'September', 'October' 'November',
# 'December']
mons = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
number = 1
for month in mons:
assert events.getMonth(number) == month
number += 1
def test_getMonthUnexpectedInputs():
# TODO
assert 1 == 1
# 1. extractTime(line) tests
"""
From data.events.notes.evf
Time values
===========
- a time value may be given in the POR or EPS relative time format
- an EPS <time> field is in the format [[sign][ddd_]hh:mm:ss]
- [sign] is an optional sign ('-' or '+')
- [ddd] is an optional day number
- [ddd] may consist of one, two or three digits, and may be zero
- [hh] is the number of hours (00..23)
- [mm] is the number of minutes (00..59)
- [ss] is the number of seconds (00..59)
- [hh], [mm], [ss] must be specified and must have two characters each
- a POR relative time value is in the format [[-][ddd.]hh:mm:ss[.mmm]]
- [ddd] is the optional number of days
- [hh], [mm], [ss] is defined similarly as above
- [.mmm] is optional and specifies the number of milliseconds
- the EPS software will always ignore the [.mmm] value
"""
# def test_extract_time_eps_format():
"""
extracts time from an event file input line
and returns a datetime object.
"""
"""
eps_line = '01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 0
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
eps_line = '0_01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 0
assert time.hou == 1
assert time.minut == 2
assert time.second == 3
eps_line = '23_01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 23
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
eps_line = '223_01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 223
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
eps_line = '+23_01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 23
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
eps_line = '-23_01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == -23
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
def test_extract_time_por_format():
"""
"""
extracts time from an event file input line
and returns a datetime object.
"""
"""
por_line = '01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.day == 0
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
por_line = '0.01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.day == 0
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
por_line = '23.01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.day == 23
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
por_line = '223.01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.day == 223
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
por_line = '-23.01:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.day == -23
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
por_line = '-23.01:02:03.385 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.day == -23
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
assert time.millisecond == 385
"""
# 1. extractDate(line) tests
"""
From data.events.notes.evf
Date values
===========
- a date value may be given in the POR or EPS absolute time format
- an EPS <date> value is in the format [dd-month-yyyy[_hh:mm:ss]]
- [dd] is the day number
- [dd] may consist of one or two digits, and may start with zero
- [month] is the full (spelled out) month or any abbreviation
with a minimum of 3 characters
- [yyyy] is the full year number
- [_hh:mm:ss] is optional and is defined similarly as in the time format
- the '_' character is mandatory here if the time of the day is given
- the time of the day defaults to _00:00:00
- a POR absolute time value is in the format [yy-dddThh:mm:ss[.mmm]Z]
- [yy] is the year in the 21st century and must have two characters
- [ddd] is the day number within the year, counting from 1
- [hh:mm:ss] is defined similarly as in the time format
- [.mmm] is optional and specifies the number of milliseconds
- the EPS software will always ignore the [.mmm] value
"""
# def test_extract_date_eps_format():
"""
extracts date from an event file input line
and returns a datetime object.
"""
"""
eps_line = '1-Feb-1995 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 1
assert time.month == 2
assert time.year == 1995
eps_line = '01-Feb-1995_1:02:03 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 1
assert time.month == 2
assert time.year == 1995
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
eps_line = '1-February-1995_01:02:3 SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(eps_line)
assert time.day == 1
assert time.month == 3
assert time.year == 1995
assert time.hour == 1
assert time.minute == 2
assert time.second == 3
"""
# def test_extract_date_por_format():
"""
extracts date from an event file input line
and returns a datetime object.
"""
"""
por_line = '02-23T01:02:03Z SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.year == 2002
assert time.day == 23
assert time.month == 1
assert time.hours == 1
assert time.minutes == 2
assert time.seconds == 3
assert time.microsecond == 0
por_line = '02-223T01:02:03Z SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.year == 2002
assert time.day == 11
assert time.month == 8
assert time.hours == 1
assert time.minutes == 2
assert time.seconds == 3
assert time.microsecond == 0
por_line = '02-223T01:02:03.125Z SUN_IN_FOV (EXP = OSIRIS ITEM = NAC)'
time = events.extractTime(por_line)
assert time.year == 2002
assert time.day == 11
assert time.month == 8
assert time.hours == 1
assert time.minutes == 2
assert time.seconds == 3
assert time.microsecond == 125000
"""
``` |
{
"source": "johnnycakes79/SpiceyPy",
"score": 2
} |
#### File: johnnycakes79/SpiceyPy/setup.py
```python
__author__ = 'AndrewAnnex'
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
import getspice
import test.gettestkernels as getTestKernels
import os
import subprocess
# Get current working directory
root_dir = os.path.dirname(os.path.realpath(__file__))
# Make the directory path for cspice
cspice_dir = os.path.join(root_dir, 'cspice')
# Make the directory path for cspice/lib
lib_dir = os.path.join(cspice_dir, 'lib')
data_files = []
# py.test integration from pytest.org
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
getTestKernels.downloadKernels()
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def check_for_spice():
if not os.path.exists(cspice_dir):
message = 'Unable to find CSPICE at {0}. Attempting to Download CSPICE For you:'.format(cspice_dir)
print(message)
# Download cspice using getspice.py
getspice.getSpice()
if not os.path.exists(cspice_dir):
message = 'Unable to find CSPICE at {0}. Exiting'.format(cspice_dir)
sys.exit(message)
def unpack_cspicelib():
libfile_path = os.path.join(cspice_dir, 'lib', 'cspice.a')
if not os.path.exists(libfile_path):
messageerr = 'Error, cannot find %s/lib/cspice.a , exiting' % cspice_dir
sys.exit(messageerr)
currentDir = os.getcwd()
try:
os.chdir(lib_dir)
unpackCspice = subprocess.Popen('ar -x cspice.a', shell=True)
status = os.waitpid(unpackCspice.pid, 0)[1]
if status != 0:
raise BaseException('%d' % status)
except BaseException as errorInst:
status = errorInst.args
sys.exit('Error: cspice .o file extraction failed with exit status: %d' % status)
finally:
os.chdir(currentDir)
def unpack_csupportlib():
libfile_path = os.path.join(cspice_dir, 'lib', 'csupport.a')
if not os.path.exists(libfile_path):
messageerr = 'Error, cannot find %s/lib/csupport.a , exiting' % cspice_dir
sys.exit(messageerr)
currentDir = os.getcwd()
try:
os.chdir(lib_dir)
unpackCsupport = subprocess.Popen('ar -x csupport.a', shell=True)
status = os.waitpid(unpackCsupport.pid, 0)[1]
if status != 0:
raise BaseException('%d' % status)
except BaseException as errorInst:
status = errorInst.args
sys.exit('Error: csupport .o file extraction failed with exit status: %d' % status)
finally:
os.chdir(currentDir)
def build_library():
currentDir = os.getcwd()
try:
os.chdir(lib_dir)
#find a way to make this work via Extension and setuptools, not using popen.
build_lib = subprocess.Popen('gcc -shared -fPIC -lm *.o -o spice.so', shell=True)
status = os.waitpid(build_lib.pid, 0)[1]
if status != 0:
raise BaseException('%d' % status)
except BaseException as errorInst:
status = errorInst.args
sys.exit('Error: compilation of shared spice.so build exit status: %d' % status)
finally:
os.chdir(currentDir)
def move_to_root_directory():
try:
os.rename(os.path.join(cspice_dir, 'lib', 'spice.so'), os.path.join(root_dir, 'SpiceyPy', 'spice.so'))
except FileNotFoundError:
sys.exit('spice.so file not found, what happend?')
def cleanup():
# Delete the extra files created by this install script
os.chdir(lib_dir)
currentDir = os.getcwd()
cleanupList = [file for file in os.listdir(currentDir) if file.endswith('.o') or file.endswith('.so')]
for file in cleanupList:
os.remove(file)
pass
try:
#First check for spice
check_for_spice()
#Next unpack cspice.a
unpack_cspicelib()
#Next unpack csupport.a
unpack_csupportlib()
#Build the shared Library
build_library()
#Move to correct location (root of the distribution)
move_to_root_directory()
setup(
name='SpiceyPy',
version='0.5.3',
description='A Python Wrapper for the NAIF CSPICE Toolkit made using ctypes',
url='https://github.com/AndrewAnnex/SpiceyPy',
author='<NAME>',
packages=['SpiceyPy'],
tests_require=['pytest', 'numpy', 'six'],
cmdclass={'test': PyTest},
test_suite='test.test_wrapper.py',
requires=['numpy', 'pytest', 'coveralls', 'coverage', 'six'],
package_data={'SpiceyPy': ['*.so']},
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux"
],
extras_require={
'testing': ['pytest'],
}
)
finally:
cleanup()
``` |
{
"source": "johnnychiuchiu/Machine-Learning",
"score": 3
} |
#### File: Machine-Learning/RecommenderSystem/collaborativeFiltering.py
```python
import pandas as pd
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.metrics import mean_squared_error
import os
class collaborativeFiltering():
def __init__(self):
pass
def readSongData(self, top):
"""
Read song data from targeted url
"""
if 'song.pkl' in os.listdir('_data/'):
song_df = pd.read_pickle('_data/song.pkl')
else:
# Read userid-songid-listen_count triplets
# This step might take time to download data from external sources
triplets_file = 'https://static.turi.com/datasets/millionsong/10000.txt'
songs_metadata_file = 'https://static.turi.com/datasets/millionsong/song_data.csv'
song_df_1 = pd.read_table(triplets_file, header=None)
song_df_1.columns = ['user_id', 'song_id', 'listen_count']
# Read song metadata
song_df_2 = pd.read_csv(songs_metadata_file)
# Merge the two dataframes above to create input dataframe for recommender systems
song_df = pd.merge(song_df_1, song_df_2.drop_duplicates(['song_id']), on="song_id", how="left")
# Merge song title and artist_name columns to make a merged column
song_df['song'] = song_df['title'].map(str) + " - " + song_df['artist_name']
n_users = song_df.user_id.unique().shape[0]
n_items = song_df.song_id.unique().shape[0]
print(str(n_users) + ' users')
print(str(n_items) + ' items')
song_df.to_pickle('_data/song.pkl')
# keep top_n rows of the data
song_df = song_df.head(top)
song_df = self.drop_freq_low(song_df)
return(song_df)
def drop_freq_low(self, song_df):
freq_df = song_df.groupby(['user_id']).agg({'song_id': 'count'}).reset_index(level=['user_id'])
below_userid = freq_df[freq_df.song_id <= 5]['user_id']
new_song_df = song_df[~song_df.user_id.isin(below_userid)]
return(new_song_df)
def utilityMatrix(self, song_df):
"""
Transform dataframe into utility matrix, return both dataframe and matrix format
:param song_df: a dataframe that contains user_id, song_id, and listen_count
:return: dataframe, matrix
"""
song_reshape = song_df.pivot(index='user_id', columns='song_id', values='listen_count')
song_reshape = song_reshape.fillna(0)
ratings = song_reshape.as_matrix()
return(song_reshape, ratings)
def fast_similarity(self, ratings, kind='user', epsilon=1e-9):
"""
Calculate the similarity of the rating matrix
:param ratings: utility matrix
:param kind: user-user sim or item-item sim
:param epsilon: small number for handling dived-by-zero errors
:return: correlation matrix
"""
if kind == 'user':
sim = ratings.dot(ratings.T) + epsilon
elif kind == 'item':
sim = ratings.T.dot(ratings) + epsilon
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def predict_fast_simple(self, ratings, kind='user'):
"""
Calculate the predicted score of every song for every user.
:param ratings: utility matrix
:param kind: user-user sim or item-item sim
:return: matrix contains the predicted scores
"""
similarity = self.fast_similarity(ratings, kind)
if kind == 'user':
return similarity.dot(ratings) / np.array([np.abs(similarity).sum(axis=1)]).T
elif kind == 'item':
return ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
def get_overall_recommend(self, ratings, song_reshape, user_prediction, top_n=10):
"""
get the top_n predicted result of every user. Notice that the recommended item should be the song that the user
haven't listened before.
:param ratings: utility matrix
:param song_reshape: utility matrix in dataframe format
:param user_prediction: matrix with predicted score
:param top_n: the number of recommended song
:return: a dict contains recommended songs for every user_id
"""
result = dict({})
for i, row in enumerate(ratings):
user_id = song_reshape.index[i]
result[user_id] = {}
zero_item_list = np.where(row == 0)[0]
prob_list = user_prediction[i][np.where(row == 0)[0]]
song_id_list = np.array(song_reshape.columns)[zero_item_list]
result[user_id]['recommend'] = sorted(zip(song_id_list, prob_list), key=lambda item: item[1], reverse=True)[
0:top_n]
return (result)
def get_user_recommend(self, user_id, overall_recommend, song_df):
"""
Get the recommended songs for a particular user using the song information from the song_df
:param user_id:
:param overall_recommend:
:return:
"""
user_score = pd.DataFrame(overall_recommend[user_id]['recommend']).rename(columns={0: 'song_id', 1: 'score'})
user_recommend = pd.merge(user_score,
song_df[['song_id', 'title', 'release', 'artist_name', 'song']].drop_duplicates(),
on='song_id', how='left')
return (user_recommend)
def createNewObs(self, artistName, song_reshape, index_name):
"""
Append a new row with userId 0 that is interested in some specific artists
:param artistName: a list of artist names
:return: dataframe, matrix
"""
interest = []
for i in song_reshape.columns:
if i in song_df[song_df.artist_name.isin(artistName)]['song_id'].unique():
interest.append(1)
else:
interest.append(0)
print(pd.Series(interest).value_counts())
newobs = pd.DataFrame([interest],
columns=song_reshape.columns)
newobs.index = [index_name]
new_song_reshape = pd.concat([song_reshape, newobs])
new_ratings = new_song_reshape.as_matrix()
return (new_song_reshape, new_ratings)
class ExplicitMF:
"""
Train a matrix factorization model using Alternating Least Squares
to predict empty entries in a matrix
Parameters
----------
n_iters : int
number of iterations to train the algorithm
n_factors : int
number of latent factors to use in matrix
factorization model, some machine-learning libraries
denote this as rank
reg : float
regularization term for item/user latent factors,
since lambda is a keyword in python we use reg instead
"""
def __init__(self, n_iters, n_factors, reg):
self.reg = reg
self.n_iters = n_iters
self.n_factors = n_factors
def fit(self, train):#, test
"""
pass in training and testing at the same time to record
model convergence, assuming both dataset is in the form
of User x Item matrix with cells as ratings
"""
self.n_user, self.n_item = train.shape
self.user_factors = np.random.random((self.n_user, self.n_factors))
self.item_factors = np.random.random((self.n_item, self.n_factors))
# record the training and testing mse for every iteration
# to show convergence later (usually, not worth it for production)
# self.test_mse_record = []
# self.train_mse_record = []
for _ in range(self.n_iters):
self.user_factors = self._als_step(train, self.user_factors, self.item_factors)
self.item_factors = self._als_step(train.T, self.item_factors, self.user_factors)
predictions = self.predict()
print(predictions)
# test_mse = self.compute_mse(test, predictions)
# train_mse = self.compute_mse(train, predictions)
# self.test_mse_record.append(test_mse)
# self.train_mse_record.append(train_mse)
return self
def _als_step(self, ratings, solve_vecs, fixed_vecs):
"""
when updating the user matrix,
the item matrix is the fixed vector and vice versa
"""
A = fixed_vecs.T.dot(fixed_vecs) + np.eye(self.n_factors) * self.reg
b = ratings.dot(fixed_vecs)
A_inv = np.linalg.inv(A)
solve_vecs = b.dot(A_inv)
return solve_vecs
def predict(self):
"""predict ratings for every user and item"""
pred = self.user_factors.dot(self.item_factors.T)
return pred
def compute_mse(y_true, y_pred):
"""ignore zero terms prior to comparing the mse"""
mask = np.nonzero(y_true)
mse = mean_squared_error(y_true[mask], y_pred[mask])
return mse
def create_train_test(ratings):
"""
split into training and test sets,
remove 3 ratings from each user
and assign them to the test set
"""
test = np.zeros(ratings.shape)
train = ratings.copy()
for user in range(ratings.shape[0]):
test_index = np.random.choice(
np.flatnonzero(ratings[user]), size=3, replace=False)
train[user, test_index] = 0.0
test[user, test_index] = ratings[user, test_index]
# assert that training and testing set are truly disjoint
assert np.all(train * test == 0)
return (train, test)
if __name__=="__main__":
cf = collaborativeFiltering()
song_df = cf.readSongData(top=10000)
# Get the utility matrix
song_reshape, ratings = cf.utilityMatrix(song_df)
# Append new rows to simulate a users who love coldplay and Eninem
song_reshape, ratings = cf.createNewObs(['Beyoncé', '<NAME>', 'Alicia Keys'], song_reshape, 'GirlFan')
song_reshape, ratings = cf.createNewObs(['Metallica', 'Guns N\' Roses', 'Linkin Park', 'Red Hot Chili Peppers'],
song_reshape, 'HeavyFan')
song_reshape, ratings = cf.createNewObs(['Daft Punk','<NAME>','Hot Chip','Coldplay','Alicia Keys'],
song_reshape, 'Johnny')
song_reshape = song_reshape.copy()
train, test = create_train_test(ratings)
# Calculate user-user collaborative filtering
user_prediction = cf.predict_fast_simple(train, kind='user')
user_overall_recommend = cf.get_overall_recommend(train, song_reshape, user_prediction, top_n=10)
user_recommend_girl = cf.get_user_recommend('GirlFan', user_overall_recommend, song_df)
user_recommend_heavy = cf.get_user_recommend('HeavyFan', user_overall_recommend, song_df)
user_recommend_johnny = cf.get_user_recommend('Johnny', user_overall_recommend, song_df)
user_mse = compute_mse(test, user_prediction)
# Calculate item-item collaborative filtering
item_prediction = cf.predict_fast_simple(train, kind='item')
item_overall_recommend = cf.get_overall_recommend(train, song_reshape, item_prediction, top_n=10)
item_recommend_girl = cf.get_user_recommend('GirlFan', item_overall_recommend, song_df)
item_recommend_heavy = cf.get_user_recommend('HeavyFan', item_overall_recommend, song_df)
item_recommend_johnny = cf.get_user_recommend('Johnny', item_overall_recommend, song_df)
item_mse = compute_mse(test, item_prediction)
# Recommend using Latent Factor Model
als = ExplicitMF(n_iters=50, n_factors=3, reg=0.01)
als.fit(train)
latent_prediction = als.predict()
latent_overall_recommend = cf.get_overall_recommend(train, song_reshape, latent_prediction, top_n=10)
latent_recommend_girl = cf.get_user_recommend('GirlFan', latent_overall_recommend, song_df)
latent_recommend_heavy = cf.get_user_recommend('HeavyFan', latent_overall_recommend, song_df)
latent_recommendjohnny = cf.get_user_recommend('Johnny', latent_overall_recommend, song_df)
latent_mse = compute_mse(test, latent_prediction)
pass
``` |
{
"source": "johnnychiuchiu/Music-Recommender",
"score": 4
} |
#### File: src/database/insert_data.py
```python
import pandas as pd
import os
from schema import db
import random
class ReadData():
"""
Acquire song data from the url provided by Turi and save it into local database.
"""
def __init__(self):
self.SEED = 12345
def readSongData(self):
"""
read song data from the url provided by Turi. If the data has already exist, then read data from pickle file.
Returns:
pd.DataFrame: a dataframe contain the data needed for building the recommender system
"""
if 'song.pkl' in os.listdir('../../data'):
song_df = pd.read_pickle('../../data/song.pkl')
else:
# Read userid-songid-listen_count triplets
# This step might take time to download data from external sources
triplets_file = 'https://static.turi.com/datasets/millionsong/10000.txt'
songs_metadata_file = 'https://static.turi.com/datasets/millionsong/song_data.csv'
song_df_1 = pd.read_table(triplets_file, header=None)
song_df_1.columns = ['user_id', 'song_id', 'listen_count']
# Read song metadata
song_df_2 = pd.read_csv(songs_metadata_file)
# Merge the two dataframes above to create input dataframe for recommender systems
song_df = pd.merge(song_df_1, song_df_2.drop_duplicates(['song_id']), on="song_id", how="left")
# Merge song title and artist_name columns to make a merged column
song_df['song'] = song_df['title'].map(str) + " - " + song_df['artist_name']
n_users = song_df.user_id.unique().shape[0]
n_items = song_df.song_id.unique().shape[0]
print(str(n_users) + ' users')
print(str(n_items) + ' items')
song_df.to_pickle('../data/song.pkl')
# # keep top_n rows of the data
# song_df = song_df.head(top)
song_df = self.drop_freq_low(song_df)
return(song_df)
def drop_freq_low(self, song_df):
"""
delete user who listen to less than 5 songs
Args:
song_df (pd.DataFrame): a dataframe containing song data
Returns:
pd.DataFrame: a dataframe without users who listen to less than 5 songs
"""
freq_df = song_df.groupby(['user_id']).agg({'song_id': 'count'}).reset_index(level=['user_id'])
below_userid = freq_df[freq_df.song_id <= 5]['user_id']
new_song_df = song_df[~song_df.user_id.isin(below_userid)]
return(new_song_df)
def random_select_user(self, song_df, n):
"""
randomly select n users from the song dataframe
Args:
song_df (pd.DataFrame): a dataframe containing song data
n (int): number of users
Returns:
pd.DataFrame: a dataframe containing song data from n number of users
"""
# random sample n users from song_df
user_list = list(song_df.user_id.unique())
random.seed(self.SEED)
random.shuffle(user_list)
song_df = song_df[song_df.user_id.isin(user_list[0:n])]
return song_df
if __name__=='__main__':
# read song data as dataframe
song_df = ReadData().readSongData()
# random sample n users
randomsong_df = ReadData().random_select_user(song_df, 10)
# # connect to sqlite database
# conn = dbConn('../../data/song2.sqlite')
#
# # insert the dataframe into local database
# song_df.to_sql(name='Song', con=conn, if_exists='replace', index=True)
# insert the dataframe into RDS database
song_df.to_sql("Song", db.engine, if_exists='replace', index=False)
print("Song Data Inserted")
```
#### File: Music-Recommender/test/test_svd.py
```python
import pandas as pd
import sys
sys.path.append('../')
from src.models.svd import mySVD
import surprise
# set testing cases
targetSongidList = ['SOAKIMP12A8C130995','SOBBMDR12A8C13253B','SOBXHDL12A81C204C0','SOBYHAJ12A6701BF1D','SODACBL12A8C13C273']
# create svd object
svd = mySVD()
# create testdata and transform into the required format from the recommender package
newObs = svd.createNewObs(targetSongidList)
testset = svd.testGenerator(newObs)
# transform into the required format from the recommender package
trainset = svd.trainGenerator(svd.song_df, newObs)
# fit model
algo_svd = svd.fitModel(trainset)
# make final recommendation
user_recommend = svd.predictTopSong(algo_svd, testset, targetSongidList)
def test_readSongData():
"""Test method readSongData from class mySVD"""
# make sure the number of columns pull out from the database is correct
assert svd.song_df.shape[1] == 8
def test_createNewObs():
"""Test method createNewObs from class mySVD"""
# check type
assert isinstance(newObs, pd.DataFrame)
# check if there are exactly 5 songs has listen count and 3 columns in the generated df
assert newObs.query("listen_count!=0").shape == (5,3)
# check if the number of songs in the song_df is the same as the generated new df
assert len(svd.song_df.song_id.unique()) == len(newObs.song_id.unique())
def test_testGenerator():
"""Test method testGenerator from class mySVD"""
# check type
assert isinstance(testset, list)
# check the shape
assert len(testset)==newObs.shape[0]
def test_trainGenerator():
"""Test method trainGenerator from class mySVD"""
# check type
assert isinstance(trainset, surprise.trainset.Trainset)
# the number of users in trainset should be equal to the user from database plus 1
assert len(trainset.all_users()) == len(svd.song_df.user_id.unique())+1
def test_fitModel():
"""Test method fitModel from class mySVD"""
# check type
assert isinstance(algo_svd, surprise.prediction_algorithms.matrix_factorization.SVD)
def test_predictTopSong():
"""Test method predictTopSong from class mySVD"""
user_recommend = svd.predictTopSong(algo_svd, testset, targetSongidList)
# check type
assert isinstance(user_recommend, pd.DataFrame)
# check shape
assert user_recommend.shape == (10, 6)
# check sorted
assert user_recommend.loc[0]['score'] == max(user_recommend.score)
``` |
{
"source": "JohnnyConstantin/TB_01",
"score": 3
} |
#### File: TB_01/app/logger.py
```python
import logging
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
green = "\x1b[32m;21m"
lblue = "\x1b[36m"
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
pink = "\x1b[35m;21m"
reset = "\x1b[0m"
format = "%(asctime)s %(levelname)s %(name)s %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: lblue + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
``` |
{
"source": "JohnnyCosta/mlexp",
"score": 3
} |
#### File: mlexp/mlexp/predict.py
```python
import argparse
import sys
from io import BytesIO
import matplotlib
import numpy as np
import requests
from PIL import Image
matplotlib.use('agg')
import matplotlib.pyplot as plt
from keras.preprocessing import image
from keras.models import load_model
from keras.applications.inception_v3 import preprocess_input
target_size = (299, 299) # fixed size for InceptionV3 architecture
def predict(model, img, target_size):
"""Run model prediction on image
Args:
model: keras model
img: PIL format image
target_size: (w,h) tuple
Returns:
list of predicted labels and their probabilities
"""
if img.size != target_size:
img = img.resize(target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
return preds[0]
def plot_preds(image, preds):
"""Displays image and the top-n predicted probabilities in a bar graph
Args:
image: PIL image
preds: list of predicted labels and their probabilities
"""
"""# For Spyder
plt.imshow(image)
plt.axis('off')"""
plt.imshow(image)
plt.axis('off')
plt.figure()
labels = ("cat")
plt.barh([0, 1], preds, alpha=0.5)
plt.yticks([0, 1], labels)
plt.xlabel('Probability')
plt.xlim(0, 1.01)
plt.tight_layout()
plt.savefig('out.png')
if __name__ == "__main__":
a = argparse.ArgumentParser()
a.add_argument("--image", help="path to image")
a.add_argument("--image_url", help="url to image")
a.add_argument("--model")
args = a.parse_args()
if args.image is None and args.image_url is None:
a.print_help()
sys.exit(1)
model = load_model(args.model)
if args.image is not None:
img = Image.open(args.image)
preds = predict(model, img, target_size)
plot_preds(img, preds)
if args.image_url is not None:
response = requests.get(args.image_url)
img = Image.open(BytesIO(response.content))
preds = predict(model, img, target_size)
plot_preds(img, preds)
``` |
{
"source": "johnny-cy/URLShort-Tutorial-YT",
"score": 2
} |
#### File: URLShort-Tutorial-YT/urlshort/views.py
```python
from django.shortcuts import render
from .models import ShortURL
from .forms import CreateNewShortURL
from datetime import datetime
import random, string
# Create your views here.
def home(request):
return render(request, 'home.html')
def redirect(request, url):
current_obj = ShortURL.objects.filter(short_url=url)
if len(current_obj) == 0:
return render(request, 'pagenotfound.html')
context = {'obj':current_obj[0]}
return render(request, 'redirect.html', context)
def createShortURL(request):
if request.method == 'POST':
form = CreateNewShortURL(request.POST)
if form.is_valid():
original_website = form.cleaned_data['original_url']
random_chars_list = list(string.ascii_letters)
random_chars=''
for i in range(6):
random_chars += random.choice(random_chars_list)
while len(ShortURL.objects.filter(short_url=random_chars)) != 0:
for i in range(6):
random_chars += random.choice(random_chars_list)
d = datetime.now()
s = ShortURL(original_url=original_website, short_url=random_chars, time_date_created=d)
s.save()
return render(request, 'urlcreated.html', {'chars':random_chars})
else:
form=CreateNewShortURL()
context={'form': form}
return render(request, 'create.html', context)
``` |
{
"source": "johnnyczhong/es-graph-conn-py",
"score": 3
} |
#### File: johnnyczhong/es-graph-conn-py/connectionsES.py
```python
from flask import Flask
import Connections
import time
from elasticsearch import client, Elasticsearch
app = Flask(__name__)
es = Elasticsearch(['http://localhost:9200'])
ic = client.IndicesClient(es)
host = 'localhost'
port = '9200'
@app.route('/')
def hello():
return "hello world!"
@app.route('/<index>/user/<int:uid>/connections/all', methods = ['GET'])
def getAllUserConnections(index, uid):
if not validateId(index, uid):
return 'Please check bounds of uid.'
iTime = time.time()
# init objects
queryUser = Connections.User(uid)
gc = Connections.GraphApi(host, port, index)
queryUser.set1and2dConn(gc)
queryUser.set3dConn(gc)
fTime = time.time()
str1st = 'Num 1st degree Connections: {}'.format(len(queryUser.conn1d))
str2nd = 'Num 2nd degree Connections: {}'.format(len(queryUser.conn2d))
str3rd = 'Num 3rd degree connections: {}'.format(len(queryUser.conn3d))
return '{} \n{} \n{} \nTook {}s'.format(str1st, str2nd, str3rd, (fTime - iTime))
@app.route('/<index>/user/<int:uid>/connections/<int:conn>', methods = ['GET'])
def getSingleDegreeConnections(index, uid, conn):
if not validateId(index, uid):
return 'Please check bounds of uid.'
iTime = time.time()
queryUser = Connections.User(uid)
gc = Connections.GraphApi(host, port, index)
if conn == 1:
queryUser.set1dConn(gc)
retString = 'Num 1st Degree Connections: {}'.format(len(queryUser.conn1d))
elif conn == 2:
queryUser.set1and2dConn(gc)
retString = 'Num 2nd Degree Connections: {}'.format(len(queryUser.conn2d))
elif conn == 3:
queryUser.set1and2dConn(gc)
queryUser.set3dConn(gc)
retString = 'Num 3rd Degree Connections: {}'.format(len(queryUser.conn3d))
else:
return 'Please provide a number between 1-3.'
fTime = time.time()
retString += ' Took: {}'.format(fTime - iTime)
return retString
def validateId(index, uid):
upperLimit = (ic.stats(index = index))['indices'][index]['primaries']['docs']['count']
return (uid <= upperLimit and uid >= 0)
if __name__ == '__main__':
app.run()
```
#### File: johnnyczhong/es-graph-conn-py/Connections.py
```python
import json
import urllib.request
import random
import time
random.seed()
esIndex = "prod"
esDoctype = "user"
host = 'localhost'
port = '9200'
timeout = 5000
sampleSize = 2000000 # population count
returnSize = 200000 # max data points to return
testId = random.randint(0, sampleSize)
class User:
def __init__(self, uid):
self.uid = uid
self.conn1d = None
self.conn2d = None
self.conn3d = None
def set1dConn(self, graphConn):
resp = graphConn.makeRequest((self.uid,), 1)
connections = graphConn.parseVertices(resp)
self.conn1d = connections['conn']
def set1and2dConn(self, graphConn):
resp = graphConn.makeRequest((self.uid,), 2)
connections = graphConn.parseVertices(resp)
self.conn1d = connections['uid']
self.conn2d = connections['conn']
# requires that conn2d and conn1d be set
def set3dConn(self, graphConn):
resp = graphConn.makeRequest(self.conn2d, 1, excludes = self.conn1d)
connections = graphConn.parseVertices(resp)
self.conn3d = connections['conn']
class GraphApi:
def __init__(self, host, port, index):
self.reqEndpoint = 'http://{0}:{1}/{2}/_graph/explore'.format(host, port, index)
# self.reqHeaders = {'Content-Type': 'application/json'}
def buildReqBody(self, uidList, degree, excludes = None):
strList = [str(x) for x in uidList]
# remove false positives (looping back to self)
if not excludes:
excludes = [-1]
# 1st degree = connections
if degree is 1:
spiderOrigin = 'uid'
# 2nd degree = connections of connections
elif degree is 2:
spiderOrigin = 'conn'
reqBody = {
"controls" : {
"use_significance" : False,
"sample_size" : returnSize,
"timeout" : timeout
},
"vertices" : [
{
"field" : spiderOrigin,
"include" : strList,
"shard_min_doc_count" : 1,
"min_doc_count" : 1,
"size" : returnSize
}
],
"connections" : {
"vertices" : [
{
"field" : "conn",
"exclude" : excludes,
"size" : returnSize,
"shard_min_doc_count" : 1,
"min_doc_count" : 1
},
{
"field": "uid",
"size": returnSize,
"shard_min_doc_count": 1,
"min_doc_count": 1
}
]
}
}
return reqBody
# purpose: interface to elasticsearch /_graph/explore endpoint
def makeRequest(self, uidList, degree, excludes = None):
reqBody = self.buildReqBody(uidList, degree, excludes)
jsonEncodedBody = json.dumps(reqBody)
bytesEncodedBody = jsonEncodedBody.encode('utf-8')
resp = urllib.request.urlopen(self.reqEndpoint, data=bytesEncodedBody)
respContent = json.loads(resp.read().decode())
return respContent
# purpose: returns connections that are not self
def parseVertices(self, resp):
vertices = resp['vertices']
connections = {
'uid' : [],
'conn' : []
}
# list of dicts
for i in vertices:
if i['depth'] != 0:
if i['field'] == 'uid':
connections['uid'].append(i['term'])
elif i['field'] == 'conn':
connections['conn'].append(i['term'])
return connections
def main():
# todo: pass query params as args via cmdline
# tests
initTime = time.time()
print('userId: {}'.format(testId))
newUser = User(testId)
gc = GraphApi(host, port, esIndex)
newUser.set1dConn(gc)
sorted1 = newUser.conn1d.sort()
print('Just 1d connection: {}'.format(newUser.conn1d))
newUser.set1and2dConn(gc)
newUser.set3dConn(gc)
# print('Num 1st degree Connections: {}'.format(len(newUser.conn1d)))
sorted2 = newUser.conn1d.sort()
print('1st degree Connection: {}'.format(newUser.conn1d))
# print('Num 2nd degree Connections: {}'.format(len(newUser.conn2d)))
print('2nd degree Connection: {}'.format(newUser.conn2d[10]))
# print('Num 3rd degree connections: {}'.format(len(newUser.conn3d)))
print('3rd degree Connection: {}'.format(newUser.conn3d[10]))
finalTime = time.time()
print('Elapsed Time: {}'.format(finalTime - initTime))
if __name__ == '__main__':
main()
``` |
{
"source": "Johnnydaszhu/vector-python-sdk",
"score": 3
} |
#### File: vector-python-sdk/anki_vector/nav_map.py
```python
__all__ = ['EvtNavMapUpdate',
'NavMapComponent', 'NavMapGrid', 'NavMapGridNode',
'NavNodeContentTypes']
import asyncio
from concurrent.futures import CancelledError
from enum import Enum
from logging import Logger
from typing import List
from . import util
from .events import Events
from .messaging import protocol
class EvtNavMapUpdate(): # pylint: disable=too-few-public-methods
"""Dispatched when a new nav map is received.
:param nav_map: The current state of the robot's nav map.
"""
def __init__(self, nav_map):
self.nav_map = nav_map
class NavNodeContentTypes(Enum): # pylint: disable=too-few-public-methods
"""The content types for a :class:`NavMapGridNode`.
"""
#: The contents of the node is unknown.
Unknown = protocol.NavNodeContentType.Value("NAV_NODE_UNKNOWN")
#: The node is clear of obstacles, because Vector has seen objects on the
#: other side, but it might contain a cliff. The node will be marked as
#: either :attr:`Cliff` or :attr:`ClearOfCliff` once Vector has driven there.
ClearOfObstacle = protocol.NavNodeContentType.Value("NAV_NODE_CLEAR_OF_OBSTACLE")
#: The node is clear of any cliffs (a sharp drop) or obstacles.
ClearOfCliff = protocol.NavNodeContentType.Value("NAV_NODE_CLEAR_OF_CLIFF")
#: The node contains a :class:`~anki_vector.objects.LightCube`.
ObstacleCube = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_CUBE")
#: The node contains a proximity detected obstacle which has not been explored.
ObstacleProximity = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_PROXIMITY")
#: The node contains a proximity detected obstacle which has been explored.
ObstacleProximityExplored = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_PROXIMITY_EXPLORED")
#: The node contains an unrecognized obstacle.
ObstacleUnrecognized = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_UNRECOGNIZED")
#: The node contains a cliff (a sharp drop).
Cliff = protocol.NavNodeContentType.Value("NAV_NODE_CLIFF")
#: The node contains a visible edge (based on the camera feed).
InterestingEdge = protocol.NavNodeContentType.Value("NAV_NODE_INTERESTING_EDGE")
# This entry is undocumented and not currently used
NonInterestingEdge = protocol.NavNodeContentType.Value("NAV_NODE_NON_INTERESTING_EDGE")
class NavMapGridNode:
"""A node in a :class:`NavMapGrid`.
Leaf nodes contain content, all other nodes are split into 4 equally sized
children.
Child node indices are stored in the following X,Y orientation:
+---+----+---+
| ^ | 2 | 0 |
+---+----+---+
| Y | 3 | 1 |
+---+----+---+
| | X->| |
+---+----+---+
"""
def __init__(self, depth: int, size: float, center: util.Vector3, parent: 'NavMapGridNode', logger: Logger):
#: The depth of this node (i.e. how far down the quad-tree it is).
self.depth = depth
#: The size (width or length) of this square node.
self.size = size
#: The center of this node.
self.center = center
#: The parent of this node. Is ``None`` for the root node.
self.parent = parent
#: ``None`` for leaf nodes, a list of 4 child nodes otherwise.
self.children: List[NavMapGridNode] = None
#: The content type in this node. Only leaf nodes have content,
#: this is ``None`` for all other nodes.
self.content: protocol.NavNodeContentType = None
self._next_child = 0 # Used when building to track which branch to follow
self._logger = logger
def __repr__(self):
return '<%s center: %s size: %s content: %s>' % (
self.__class__.__name__, self.center, self.size, self.content)
def contains_point(self, x: float, y: float) -> bool:
"""Test if the node contains the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
True if the node contains the point, False otherwise.
"""
half_size = self.size * 0.5
dist_x = abs(self.center.x - x)
dist_y = abs(self.center.y - y)
return (dist_x <= half_size) and (dist_y <= half_size)
def _get_node(self, x: float, y: float, assumed_in_bounds: bool) -> 'NavMapGridNode':
if not assumed_in_bounds and not self.contains_point(x, y):
# point is out of bounds
return None
if self.children is None:
return self
x_offset = 2 if x < self.center.x else 0
y_offset = 1 if y < self.center.y else 0
child_node = self.children[x_offset + y_offset]
# child node is by definition in bounds / on boundary
return child_node._get_node(x, y, True) # pylint: disable=protected-access
def get_node(self, x: float, y: float) -> 'NavMapGridNode':
"""Get the node at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
The smallest node that includes the point.
Will be ``None`` if the point is outside of the map.
"""
return self._get_node(x, y, assumed_in_bounds=False)
def get_content(self, x: float, y: float) -> protocol.NavNodeContentType:
"""Get the node's content at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
The content included at that point. Will be :attr:`NavNodeContentTypes.Unknown`
if the point is outside of the map.
"""
node = self.get_node(x, y)
if node:
return node.content
return NavNodeContentTypes.Unknown
def add_child(self, content: protocol.NavNodeContentType, depth: int) -> bool:
"""Add a child node to the quad tree.
The quad-tree is serialized to a flat list of nodes, we deserialize
back to a quad-tree structure here, with the depth of each node
indicating where it is placed.
:param content: The content to store in the leaf node.
:param depth: The depth that this leaf node is located at.
Returns:
True if parent should use the next child for future add_child
calls.
"""
if depth > self.depth:
self._logger.error("NavMapGridNode depth %s > %s", depth, self.depth)
if self._next_child > 3:
self._logger.error("NavMapGridNode _next_child %s (>3) at depth %s", self._next_child, self.depth)
if self.depth == depth:
if self.content is not None:
self._logger.error("NavMapGridNode: Clobbering %s at depth %s with %s",
self.content, self.depth, content)
self.content = content
# This node won't be further subdivided, and is now full
return True
if self.children is None:
# Create 4 child nodes for quad-tree structure
next_depth = self.depth - 1
next_size = self.size * 0.5
offset = next_size * 0.5
center1 = util.Vector3(self.center.x + offset, self.center.y + offset, self.center.z)
center2 = util.Vector3(self.center.x + offset, self.center.y - offset, self.center.z)
center3 = util.Vector3(self.center.x - offset, self.center.y + offset, self.center.z)
center4 = util.Vector3(self.center.x - offset, self.center.y - offset, self.center.z)
self.children = [NavMapGridNode(next_depth, next_size, center1, self, self._logger),
NavMapGridNode(next_depth, next_size, center2, self, self._logger),
NavMapGridNode(next_depth, next_size, center3, self, self._logger),
NavMapGridNode(next_depth, next_size, center4, self, self._logger)]
if self.children[self._next_child].add_child(content, depth):
# Child node is now full, start using the next child
self._next_child += 1
if self._next_child > 3:
# All children are now full - parent should start using the next child
return True
# Empty children remain - parent can keep using this child
return False
class NavMapGrid:
"""A navigation memory map, stored as a quad-tree."""
def __init__(self, msg: protocol.NavMapFeedResponse, logger: Logger):
#: The origin ID for the map. Only maps and :class:`~anki_vector.util.Pose`
#: objects of the same origin ID are in the same coordinate frame and
#: can therefore be compared.
self.origin_id = msg.origin_id
root_center = util.Vector3(msg.map_info.root_center_x, msg.map_info.root_center_y, msg.map_info.root_center_z)
self._root_node = NavMapGridNode(msg.map_info.root_depth, msg.map_info.root_size_mm, root_center, None, logger)
for quad in msg.quad_infos:
self.add_quad(quad.content, quad.depth)
self._logger = logger
def __repr__(self):
return '<%s center: %s size: %s>' % (
self.__class__.__name__, self.center, self.size)
@property
def root_node(self) -> NavMapGridNode:
"""The root node for the grid, contains all other nodes."""
return self._root_node
@property
def size(self) -> float:
"""The size (width or length) of the square grid."""
return self._root_node.size
@property
def center(self) -> util.Vector3:
"""The center of this map."""
return self._root_node.center
def contains_point(self, x: float, y: float) -> bool:
"""Test if the map contains the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
True if the map contains the point, False otherwise.
"""
return self._root_node.contains_point(x, y)
def get_node(self, x: float, y: float) -> NavMapGridNode:
"""Get the node at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
The smallest node that includes the point.
Will be ``None`` if the point is outside of the map.
"""
return self._root_node.get_node(x, y)
def get_content(self, x: float, y: float) -> protocol.NavNodeContentType:
"""Get the map's content at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
.. testcode::
import anki_vector
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
# Make sure Vector drives around so the nav map will update
robot.behavior.drive_off_charger()
robot.motors.set_wheel_motors(-100, 100)
latest_nav_map = robot.nav_map.latest_nav_map
content = latest_nav_map.get_content(0.0, 100.0)
print(f"Sampling point at 0.0, 100.0 and found content: {content}")
Returns:
The content included at that point. Will be :attr:`NavNodeContentTypes.Unknown`
if the point is outside of the map.
"""
return self._root_node.get_content(x, y)
def add_quad(self, content: protocol.NavNodeContentType, depth: int):
"""Adds a new quad to the nav map.
:param content: What content this node contains.
:param depth: How deep in the navMap this node is.
"""
self._root_node.add_child(content, depth)
class NavMapComponent(util.Component):
"""Represents Vector's navigation memory map.
The NavMapComponent object subscribes for nav memory map updates from the robot to store and dispatch.
The :class:`anki_vector.robot.Robot` or :class:`anki_vector.robot.AsyncRobot` instance hosts this component.
.. testcode::
import anki_vector
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
# Make sure Vector drives around so the nav map will update
robot.behavior.drive_off_charger()
robot.motors.set_wheel_motors(-100, 100)
latest_nav_map = robot.nav_map.latest_nav_map
:param robot: A reference to the owner Robot object.
"""
def __init__(self, robot):
super().__init__(robot)
self._latest_nav_map: NavMapGrid = None
self._nav_map_feed_task: asyncio.Task = None
@property
@util.block_while_none()
def latest_nav_map(self) -> NavMapGrid:
""":class:`NavMapGrid`: The most recently processed image received from the robot.
.. testcode::
import anki_vector
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
# Make sure Vector drives around so the nav map will update
robot.behavior.drive_off_charger()
robot.motors.set_wheel_motors(-100, 100)
latest_nav_map = robot.nav_map.latest_nav_map
"""
if not self._nav_map_feed_task or self._nav_map_feed_task.done():
raise Exception("Nav map not initialized")
return self._latest_nav_map
def init_nav_map_feed(self, frequency: float = 0.5) -> None:
"""Begin nav map feed task.
:param frequency: How frequently to send nav map updates.
"""
if not self._nav_map_feed_task or self._nav_map_feed_task.done():
self._nav_map_feed_task = self.conn.loop.create_task(self._request_and_handle_nav_maps(frequency))
def close_nav_map_feed(self) -> None:
"""Cancel nav map feed task."""
if self._nav_map_feed_task:
self._nav_map_feed_task.cancel()
future = self.conn.run_coroutine(self._nav_map_feed_task)
future.result()
self._nav_map_feed_task = None
async def _request_and_handle_nav_maps(self, frequency: float) -> None:
"""Queries and listens for nav map feed events from the robot.
Received events are parsed by a helper function.
:param frequency: How frequently to send nav map updates.
"""
try:
req = protocol.NavMapFeedRequest(frequency=frequency)
async for evt in self.grpc_interface.NavMapFeed(req):
self._latest_nav_map = NavMapGrid(evt, self.logger)
await self._robot.events.dispatch_event(evt, Events.nav_map_update)
except CancelledError:
self.logger.debug('Nav Map feed task was cancelled. This is expected during disconnection.')
```
#### File: anki_vector/opengl/opengl_viewer.py
```python
__all__ = ['OpenGLViewer']
import math
import multiprocessing as mp
import sys
from typing import List
from anki_vector import nav_map, util
from . import opengl, opengl_vector
try:
from OpenGL.GL import (GL_FILL,
GL_FRONT_AND_BACK,
GL_LIGHTING, GL_NORMALIZE,
GL_TEXTURE_2D,
glBindTexture, glColor3f, glDisable, glEnable,
glMultMatrixf, glPolygonMode, glPopMatrix, glPushMatrix,
glScalef, glWindowPos2f)
from OpenGL.GLUT import (ctypes,
GLUT_ACTIVE_ALT, GLUT_ACTIVE_CTRL, GLUT_ACTIVE_SHIFT, GLUT_BITMAP_9_BY_15,
GLUT_DOWN, GLUT_LEFT_BUTTON, GLUT_RIGHT_BUTTON, GLUT_VISIBLE,
glutBitmapCharacter, glutCheckLoop, glutGetModifiers, glutIdleFunc,
glutKeyboardFunc, glutKeyboardUpFunc, glutMainLoop, glutMouseFunc, glutMotionFunc, glutPassiveMotionFunc,
glutPostRedisplay, glutSpecialFunc, glutSpecialUpFunc, glutVisibilityFunc)
from OpenGL.error import NullFunctionError
except ImportError as import_exc:
opengl.raise_opengl_or_pillow_import_error(import_exc)
# Constants
class _RobotControlIntents(): # pylint: disable=too-few-public-methods
"""Input intents for controlling the robot.
These are sent from the OpenGL process, and consumed by the main process for
issuing movement commands on Vector (to provide a remote-control interface).
"""
def __init__(self, left_wheel_speed=0.0, right_wheel_speed=0.0,
lift_speed=0.0, head_speed=0.0):
self.left_wheel_speed = left_wheel_speed
self.right_wheel_speed = right_wheel_speed
self.lift_speed = lift_speed
self.head_speed = head_speed
def _draw_text(font, input_str, x, y, line_height=16, r=1.0, g=1.0, b=1.0):
"""Render text based on window position. The origin is in the bottom-left."""
glColor3f(r, g, b)
glWindowPos2f(x, y)
input_list = input_str.split('\n')
y = y + (line_height * (len(input_list) - 1))
for line in input_list:
glWindowPos2f(x, y)
y -= line_height
for ch in line:
glutBitmapCharacter(font, ctypes.c_int(ord(ch)))
def _glut_install_instructions():
if sys.platform.startswith('linux'):
return "Install freeglut: `sudo apt-get install freeglut3`"
if sys.platform.startswith('darwin'):
return "GLUT should already be installed by default on macOS!"
if sys.platform in ('win32', 'cygwin'):
return "Install freeglut: You can download it from http://freeglut.sourceforge.net/ \n"\
"You just need the `freeglut.dll` file, from any of the 'Windows binaries' downloads. "\
"Place the DLL next to your Python script, or install it somewhere in your PATH "\
"to allow any script to use it."
return "(Instructions unknown for platform %s)" % sys.platform
class _OpenGLViewController():
"""Controller that registers for keyboard and mouse input through GLUT, and uses them to update
the camera and listen for a shutdown cue.
:param shutdown_delegate: Function to call when we want to exit the host OpenGLViewer.
:param camera: The camera object for the controller to mutate.
:param input_intent_queue: Sends key commands from the 3D viewer process to the main process.
:type input_intent_queue: multiprocessing.Queue
:param viewer: A reference to the owning OpenGLViewer.
:type viewer: OpenGLViewer
"""
def __init__(self, shutdown_delegate: callable, camera: opengl.Camera, input_intent_queue: mp.Queue, viewer):
self._logger = util.get_class_logger(__name__, self)
self._input_intent_queue = input_intent_queue
self._last_robot_control_intents = _RobotControlIntents()
self._is_keyboard_control_enabled = False
# Keyboard
self._is_key_pressed = {}
self._is_alt_down = False
self._is_ctrl_down = False
self._is_shift_down = False
# Mouse
self._is_mouse_down = {}
self._mouse_pos = None # type: util.Vector2
self._shutdown_delegate = shutdown_delegate
self._last_robot_position = None
self._camera = camera
self._opengl_viewer = viewer
#### Public Properties ####
@property
def last_robot_position(self):
return self._last_robot_position
@last_robot_position.setter
def last_robot_position(self, last_robot_position):
self._last_robot_position = last_robot_position
#### Public Methods ####
def initialize(self):
"""Sets up the OpenGL window and binds input callbacks to it
"""
glutKeyboardFunc(self._on_key_down)
glutSpecialFunc(self._on_special_key_down)
# [Keyboard/Special]Up methods aren't supported on some old GLUT implementations
has_keyboard_up = False
has_special_up = False
try:
if bool(glutKeyboardUpFunc):
glutKeyboardUpFunc(self._on_key_up)
has_keyboard_up = True
if bool(glutSpecialUpFunc):
glutSpecialUpFunc(self._on_special_key_up)
has_special_up = True
except NullFunctionError:
# Methods aren't available on this GLUT version
pass
if not has_keyboard_up or not has_special_up:
# Warn on old GLUT implementations that don't implement much of the interface.
self._logger.warning("Warning: Old GLUT implementation detected - keyboard remote control of Vector disabled."
"We recommend installing freeglut. %s", _glut_install_instructions())
self._is_keyboard_control_enabled = False
else:
self._is_keyboard_control_enabled = True
try:
GLUT_BITMAP_9_BY_15
except NameError:
self._logger.warning("Warning: GLUT font not detected. Help message will be unavailable.")
glutMouseFunc(self._on_mouse_button)
glutMotionFunc(self._on_mouse_move)
glutPassiveMotionFunc(self._on_mouse_move)
glutIdleFunc(self._idle)
glutVisibilityFunc(self._visible)
#### Private Methods ####
def _update_modifier_keys(self):
"""Updates alt, ctrl, and shift states.
"""
modifiers = glutGetModifiers()
self._is_alt_down = (modifiers & GLUT_ACTIVE_ALT != 0)
self._is_ctrl_down = (modifiers & GLUT_ACTIVE_CTRL != 0)
self._is_shift_down = (modifiers & GLUT_ACTIVE_SHIFT != 0)
def _key_byte_to_lower(self, key): # pylint: disable=no-self-use
"""Convert bytes-object (representing keyboard character) to lowercase equivalent.
"""
if b'A' <= key <= b'Z':
lowercase_key = ord(key) - ord(b'A') + ord(b'a')
lowercase_key = bytes([lowercase_key])
return lowercase_key
return key
def _on_key_up(self, key, x, y): # pylint: disable=unused-argument
"""Called by GLUT when a standard keyboard key is released.
:param key: which key was released.
:param x: the x coordinate of the mouse cursor.
:param y: the y coordinate of the mouse cursor.
"""
key = self._key_byte_to_lower(key)
self._update_modifier_keys()
self._is_key_pressed[key] = False
def _on_key_down(self, key, x, y): # pylint: disable=unused-argument
"""Called by GLUT when a standard keyboard key is pressed.
:param key: which key was released.
:param x: the x coordinate of the mouse cursor.
:param y: the y coordinate of the mouse cursor.
"""
key = self._key_byte_to_lower(key)
self._update_modifier_keys()
self._is_key_pressed[key] = True
if ord(key) == 9: # Tab
# Set Look-At point to current robot position
if self._last_robot_position is not None:
self._camera.look_at = self._last_robot_position
elif ord(key) == 27: # Escape key
self._shutdown_delegate()
elif ord(key) == 72 or ord(key) == 104: # H key
self._opengl_viewer.show_controls = not self._opengl_viewer.show_controls
def _on_special_key_up(self, key, x, y): # pylint: disable=unused-argument
"""Called by GLUT when a special key is released.
:param key: which key was released.
:param x: the x coordinate of the mouse cursor.
:param y: the y coordinate of the mouse cursor.
"""
self._update_modifier_keys()
def _on_special_key_down(self, key, x, y): # pylint: disable=unused-argument
"""Called by GLUT when a special key is pressed.
:param key: which key was pressed.
:param x: the x coordinate of the mouse cursor.
:param y: the y coordinate of the mouse cursor.
"""
self._update_modifier_keys()
def _on_mouse_button(self, button, state, x, y):
"""Called by GLUT when a mouse button is pressed.
:param button: which button was pressed.
:param state: the current state of the button.
:param x: the x coordinate of the mouse cursor.
:param y: the y coordinate of the mouse cursor.
"""
# Don't update modifier keys- reading modifier keys is unreliable
# from _on_mouse_button (for LMB down/up), only SHIFT key seems to read there
# self._update_modifier_keys()
is_down = (state == GLUT_DOWN)
self._is_mouse_down[button] = is_down
self._mouse_pos = util.Vector2(x, y)
def _on_mouse_move(self, x, y):
"""Handles mouse movement.
:param x: the x coordinate of the mouse cursor.
:param y: the y coordinate of the mouse cursor.
"""
# is_active is True if this is not passive (i.e. a mouse button was down)
last_mouse_pos = self._mouse_pos
self._mouse_pos = util.Vector2(x, y)
if last_mouse_pos is None:
# First mouse update - ignore (we need a delta of mouse positions)
return
left_button = self._is_mouse_down.get(GLUT_LEFT_BUTTON, False)
# For laptop and other 1-button mouse users, treat 'x' key as a right mouse button too
right_button = (self._is_mouse_down.get(GLUT_RIGHT_BUTTON, False) or
self._is_key_pressed.get(b'x', False))
MOUSE_SPEED_SCALAR = 1.0 # general scalar for all mouse movement sensitivity
MOUSE_ROTATE_SCALAR = 0.025 # additional scalar for rotation sensitivity
mouse_delta = (self._mouse_pos - last_mouse_pos) * MOUSE_SPEED_SCALAR
if left_button and right_button:
# Move up/down
self._camera.move(up_amount=-mouse_delta.y)
elif right_button:
# Move forward/back and left/right
self._camera.move(forward_amount=mouse_delta.y, right_amount=mouse_delta.x)
elif left_button:
if self._is_key_pressed.get(b'z', False):
# Zoom in/out
self._camera.zoom(mouse_delta.y)
else:
self._camera.turn(mouse_delta.x * MOUSE_ROTATE_SCALAR, mouse_delta.y * MOUSE_ROTATE_SCALAR)
def _update_intents_for_robot(self):
# Update driving intents based on current input, and pass to SDK thread
# so that it can pass the input onto the robot.
def get_intent_direction(key1, key2):
# Helper for keyboard inputs that have 1 positive and 1 negative input
pos_key = self._is_key_pressed.get(key1, False)
neg_key = self._is_key_pressed.get(key2, False)
return pos_key - neg_key
drive_dir = get_intent_direction(b'w', b's')
turn_dir = get_intent_direction(b'd', b'a')
lift_dir = get_intent_direction(b'r', b'f')
head_dir = get_intent_direction(b't', b'g')
if drive_dir < 0:
# It feels more natural to turn the opposite way when reversing
turn_dir = -turn_dir
# Scale drive speeds with SHIFT (faster) and ALT (slower)
if self._is_shift_down:
speed_scalar = 2.0
elif self._is_alt_down:
speed_scalar = 0.5
else:
speed_scalar = 1.0
drive_speed = 75.0 * speed_scalar
turn_speed = 100.0 * speed_scalar
left_wheel_speed = (drive_dir * drive_speed) + (turn_speed * turn_dir)
right_wheel_speed = (drive_dir * drive_speed) - (turn_speed * turn_dir)
lift_speed = 4.0 * lift_dir * speed_scalar
head_speed = head_dir * speed_scalar
control_intents = _RobotControlIntents(left_wheel_speed, right_wheel_speed,
lift_speed, head_speed)
self._input_intent_queue.put(control_intents, True)
def _idle(self):
if self._is_keyboard_control_enabled:
self._update_intents_for_robot()
glutPostRedisplay()
def _visible(self, vis):
# Called from OpenGL when visibility changes (windows are either visible
# or completely invisible/hidden)
if vis == GLUT_VISIBLE:
glutIdleFunc(self._idle)
else:
glutIdleFunc(None)
#: A default window resolution provided for OpenGL Vector programs
#: 800x600 is large enough to see detail, while fitting on the smaller
#: end of modern monitors.
default_resolution = [800, 600]
#: A default projector configurate provided for OpenGL Vector programs
#: A Field of View of 45 degrees is common for 3d applications,
#: and a viewable distance range of 1.0 to 1000.0 will provide a
#: visible space comparable with most physical Vector environments.
default_projector = opengl.Projector(
fov=45.0,
near_clip_plane=1.0,
far_clip_plane=1000.0)
#: A default camera object provided for OpenGL Vector programs.
#: Starts close to and looking at the charger.
default_camera = opengl.Camera(
look_at=util.Vector3(100.0, -25.0, 0.0),
up=util.Vector3(0.0, 0.0, 1.0),
distance=500.0,
pitch=math.radians(40),
yaw=math.radians(270))
#: A default light group provided for OpenGL Vector programs.
#: Contains one light near the origin.
default_lights = [opengl.Light(
ambient_color=[1.0, 1.0, 1.0, 1.0],
diffuse_color=[1.0, 1.0, 1.0, 1.0],
specular_color=[1.0, 1.0, 1.0, 1.0],
position=util.Vector3(0, 32, 20))]
# Global viewer instance. Stored to make sure multiple viewers are not
# instantiated simultaneously.
opengl_viewer = None # type: OpenGLViewer
class OpenGLViewer():
"""OpenGL-based 3D Viewer.
Handles rendering of a 3D world view including navigation map.
:param close_event: Used to notify each process when done rendering.
:type close_event: multiprocessing.Event
:param input_intent_queue: Sends key commands from the 3D viewer process to the main process.
:type input_intent_queue: multiprocessing.Queue
:param nav_map_queue: Updates the 3D viewer process with the latest navigation map.
:type nav_map_queue: multiprocessing.Queue
:param world_frame_queue: Provides the 3D viewer with details about the world.
:type world_frame_queue: multiprocessing.Queue
:param extra_render_function_queue: Functions to be executed in the 3D viewer process.
:type extra_render_function_queue: multiprocessing.Queue
:param user_data_queue: A queue that may be used outside the SDK to pass information to the viewer process.
May be used by ``extra_render_function_queue`` functions.
:type user_data_queue: multiprocessing.Queue
:param resolution: Specifies whether to draw controls on the view.
:param projector: Specifies whether to draw controls on the view.
:param camera: Specifies whether to draw controls on the view.
:param lights: Specifies whether to draw controls on the view.
:param show_viewer_controls: Specifies whether to draw controls on the view.
"""
def __init__(self,
close_event: mp.Event,
input_intent_queue: mp.Queue,
nav_map_queue: mp.Queue,
world_frame_queue: mp.Queue,
extra_render_function_queue: mp.Queue,
user_data_queue: mp.Queue,
resolution: List[int] = None,
projector: opengl.Projector = None,
camera: opengl.Camera = None,
lights: List[opengl.Light] = None,
show_viewer_controls: bool = True):
if resolution is None:
resolution = default_resolution
if projector is None:
projector = default_projector
if camera is None:
camera = default_camera
if lights is None:
lights = default_lights
self._close_event = close_event
self._input_intent_queue = input_intent_queue
self._nav_map_queue = nav_map_queue
self._world_frame_queue = world_frame_queue
self._extra_render_function_queue = extra_render_function_queue
self._user_data_queue = user_data_queue
self._logger = util.get_class_logger(__name__, self)
self._extra_render_calls = []
self._internal_function_finished = False
# Controls
self.show_controls = show_viewer_controls
self._instructions = '\n'.join(['W, S: Move forward, backward',
'A, D: Turn left, right',
'R, F: Lift up, down',
'T, G: Head up, down',
'',
'LMB: Rotate camera',
'RMB: Move camera',
'LMB + RMB: Move camera up/down',
'LMB + Z: Zoom camera',
'X: same as RMB',
'TAB: center view on robot',
'',
'H: Toggle help'])
self._vector_view_manifest = opengl_vector.VectorViewManifest()
self._main_window = opengl.OpenGLWindow(0, 0, resolution[0], resolution[1], b"Vector 3D Visualizer")
# Create a 3d projector configuration class.
self._projector = projector
self._camera = camera
self._lights = lights
self._view_controller = _OpenGLViewController(self.close, self._camera, self._input_intent_queue, self)
self._latest_world_frame: opengl_vector.WorldRenderFrame = None
def _render_world_frame(self, world_frame: opengl_vector.WorldRenderFrame):
"""Render the world to the current OpenGL context
:param world_frame: frame to render
"""
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_NORMALIZE) # to re-scale scaled normals
light_cube_view = self._vector_view_manifest.light_cube_view
unit_cube_view = self._vector_view_manifest.unit_cube_view
robot_view = self._vector_view_manifest.robot_view
nav_map_view = self._vector_view_manifest.nav_map_view
robot_frame = world_frame.robot_frame
robot_pose = robot_frame.pose
try:
glDisable(GL_LIGHTING)
nav_map_view.display()
glEnable(GL_LIGHTING)
# Render the cube
for obj in world_frame.cube_frames:
cube_pose = obj.pose
if cube_pose is not None and cube_pose.is_comparable(robot_pose):
light_cube_view.display(cube_pose)
# Render the custom objects
for obj in world_frame.custom_object_frames:
obj_pose = obj.pose
if obj_pose is not None and obj_pose.is_comparable(robot_pose):
glPushMatrix()
obj_matrix = obj_pose.to_matrix()
glMultMatrixf(obj_matrix.in_row_order)
glScalef(obj.x_size_mm * 0.5,
obj.y_size_mm * 0.5,
obj.z_size_mm * 0.5)
# Only draw solid object for observable custom objects
if obj.is_fixed:
# fixed objects are drawn as transparent outlined boxes to make
# it clearer that they have no effect on vision.
FIXED_OBJECT_COLOR = [1.0, 0.7, 0.0, 1.0]
unit_cube_view.display(FIXED_OBJECT_COLOR, False)
else:
CUSTOM_OBJECT_COLOR = [1.0, 0.3, 0.3, 1.0]
unit_cube_view.display(CUSTOM_OBJECT_COLOR, True)
glPopMatrix()
glBindTexture(GL_TEXTURE_2D, 0)
for face in world_frame.face_frames:
face_pose = face.pose
if face_pose is not None and face_pose.is_comparable(robot_pose):
glPushMatrix()
face_matrix = face_pose.to_matrix()
glMultMatrixf(face_matrix.in_row_order)
# Approximate size of a head
glScalef(100, 25, 100)
FACE_OBJECT_COLOR = [0.5, 0.5, 0.5, 1.0]
draw_solid = face.time_since_last_seen < 30
unit_cube_view.display(FACE_OBJECT_COLOR, draw_solid)
glPopMatrix()
except BaseException as e:
self._logger.error('rendering error: {0}'.format(e))
glDisable(GL_LIGHTING)
# Draw the Vector robot to the screen
robot_view.display(robot_frame.pose, robot_frame.head_angle, robot_frame.lift_position)
if self.show_controls:
self._draw_controls()
def _draw_controls(self):
try:
GLUT_BITMAP_9_BY_15
except NameError:
pass
else:
_draw_text(GLUT_BITMAP_9_BY_15, self._instructions, x=10, y=10)
def _render_3d_view(self, window: opengl.OpenGLWindow):
"""Renders 3d objects to an openGL window
:param window: OpenGL window to render to
"""
window.prepare_for_rendering(self._projector, self._camera, self._lights)
try:
extra_render_call = self._extra_render_function_queue.get(False)
self._extra_render_calls.append(extra_render_call)
except mp.queues.Empty:
pass
# Update the latest world frame if there is a new one available
try:
world_frame = self._world_frame_queue.get(False) # type: WorldRenderFrame
if world_frame is not None:
self._view_controller.last_robot_position = world_frame.robot_frame.pose.position
self._latest_world_frame = world_frame
except mp.queues.Empty:
world_frame = self._latest_world_frame
try:
new_nav_map = self._nav_map_queue.get(False)
if new_nav_map is not None:
new_nav_map = nav_map.NavMapGrid(new_nav_map, self._logger)
self._vector_view_manifest.nav_map_view.build_from_nav_map(new_nav_map)
except mp.queues.Empty:
# no new nav map - queue is empty
pass
if world_frame is not None:
self._render_world_frame(world_frame)
for render_call in self._extra_render_calls:
# Protecting the external calls with pushMatrix so internal transform
# state changes will not alter other calls
glPushMatrix()
try:
render_call.invoke(self._user_data_queue)
finally:
glPopMatrix()
window.display_rendered_content()
def _on_window_update(self):
"""Top level display call.
"""
try:
self._render_3d_view(self._main_window)
except KeyboardInterrupt:
self._logger.info("_display caught KeyboardInterrupt - exitting")
self._close_event.set()
def run(self):
"""Turns control of the current thread over to the OpenGL viewer
"""
self._main_window.initialize(self._on_window_update)
self._view_controller.initialize()
self._vector_view_manifest.load_assets()
# use a non-blocking update loop if possible to make exit conditions
# easier (not supported on all GLUT versions).
if bool(glutCheckLoop):
while not self._close_event.is_set():
glutCheckLoop()
else:
# This blocks until quit
glutMainLoop()
if not self._close_event.is_set():
# Pass the keyboard interrupt on to SDK so that it can close cleanly
raise KeyboardInterrupt
def close(self):
"""Called from the SDK when the program is complete and it's time to exit."""
if not self._close_event.is_set():
self._close_event.set()
```
#### File: vector-python-sdk/anki_vector/touch.py
```python
__all__ = ["TouchComponent", "TouchSensorData"]
from . import util
from .events import Events
from .messaging import protocol
class TouchSensorData:
"""A touch sample from the capacitive touch sensor, accompanied with the robot's
conclusion on whether this is considered a valid touch.
"""
def __init__(self, proto_data: protocol.TouchData):
self._raw_touch_value = proto_data.raw_touch_value
self._is_being_touched = proto_data.is_being_touched
@property
def raw_touch_value(self) -> int:
"""The detected sensitivity from the touch sensor.
This will not map to a constant raw value, as it may be impacted by various
environmental factors such as whether the robot is on its charger, being held, humidity, etc.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
touch_data = robot.touch.last_sensor_reading
if touch_data is not None:
raw_touch_value = touch_data.raw_touch_value
"""
return self._raw_touch_value
@property
def is_being_touched(self) -> bool:
"""The robot's conclusion on whether the current value is considered
a valid touch.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
touch_data = robot.touch.last_sensor_reading
if touch_data is not None:
is_being_touched = touch_data.is_being_touched
"""
return self._is_being_touched
class TouchComponent(util.Component):
"""Maintains the most recent touch sensor data
This will be updated with every broadcast RobotState, and can be queried at any time.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
touch_data = robot.touch.last_sensor_reading
if touch_data is not None:
print('Touch sensor value: {0}, is being touched: {1}'.format(touch_data.raw_touch_value, touch_data.is_being_touched))
"""
def __init__(self, robot):
super().__init__(robot)
self._last_sensor_reading = None
# Subscribe to a callback that updates the robot's local properties - which includes touch data.
self._robot.events.subscribe(self._on_robot_state,
Events.robot_state,
on_connection_thread=True)
def close(self):
"""Closing the touch component will unsubscribe from robot state updates."""
self._robot.events.unsubscribe(self._on_robot_state,
Events.robot_state)
@property
def last_sensor_reading(self) -> TouchSensorData:
""":class:`anki_vector.touch.TouchSensorData`: The last reported sensor data.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
touch_data = robot.touch.last_sensor_reading
"""
return self._last_sensor_reading
def _on_robot_state(self, _, msg):
self._last_sensor_reading = TouchSensorData(msg.touch_data)
``` |
{
"source": "JohnnyDeuss/DPPy",
"score": 2
} |
#### File: DPPy/dppy/beta_ensemble_polynomial_potential.py
```python
import numpy as np
from numpy.polynomial.chebyshev import poly2cheb # cf rescale_largest_eig_val
import scipy.linalg as la
import matplotlib.pyplot as plt
from dppy.beta_ensemble_polynomial_potential_core import (polynomial_in_negative_log_conditional_a_coef as P_a_cond,
polynomial_in_negative_log_conditional_b_coef as P_b_cond,
sampler_exact_convex_quartic,
sampler_mala,
equilibrium_x2m,
equilibrium_x2_x4,
equilibrium_ClItKr10,
check_random_state)
class BetaEnsemblePolynomialPotential(object):
"""docstring for BetaEnsemblePolynomialPotential"""
def __init__(self, beta, potential_V, name=None):
self.beta = beta
self.V = potential_V
if not (self.beta > 0):
raise ValueError('beta = {} <= 0'.format(beta))
if self.V.order > 7:
str_ = ['Polynomial potentials V are allowed up to degree 6',
'Given\n',
' '.join(['g_{}={}'.format(n, g_n)
for n, g_n in enumerate(self.V.coef[::-1])])]
raise ValueError(' '.join(str_))
if self.V[5]:
raise ValueError('Potentials V = ... + g_5 x^5 + ... not supported, given g_5={}'.format(self.V[5]))
if self.V.order % 2:
raise ValueError('V has odd degree deg(V)={}'.format(self.V.order))
if self.V[self.V.order] < 0:
raise ValueError('V = g_{} x^{} + ... with g_{} < 0'.format(*[self.V.order] * 3))
if self.V[0] != 0:
print('Weird thing to introduce a constant V = ... + g_0={}. Reset g_0 = 0'.format(self.V[0]))
self.V[0] = 0
self._V_ClItKr10 = np.poly1d([1 / 20, -4 / 15, 1 / 5, 8 / 5, 0])
self.equilibrium_density, self.support = self.__compute_equilibrium()
# Sampling
self.N = None
self.nb_gibbs_passes = None
def __str__(self):
return '\n'.join(['beta={}'.format(self.beta),
'V(x) =\n{}'.format(self.V.__str__())])
def __compute_equilibrium(self):
""" Update attribute equilibrium_density and support if available
"""
equi_dens, supp = None, None
deg_V = self.V.order
deg_V_2, deg_V_odd = divmod(deg_V, 2)
set_non_zero_coefs = set(np.nonzero(self.V.coef[::-1])[0])
if not deg_V_odd:
if self.V == self._V_ClItKr10:
equi_dens, supp = equilibrium_ClItKr10()
elif set_non_zero_coefs == {deg_V}:
equi_dens, supp = equilibrium_x2m(deg_V_2,
deg_V * self.V[deg_V])
elif set_non_zero_coefs == {2, 4}:
equi_dens, supp = equilibrium_x2_x4(2 * self.V[2],
4 * self.V[4])
return equi_dens, supp
def sample_mcmc(self,
N=10,
nb_gibbs_passes=10,
sample_exact_cond=False,
nb_mala_steps=100,
return_chain_of_eig_vals=False,
return_chain_of_lambda_max=False,
random_state=None):
""" Gibbs sampler on Jacobi matrices to sample approximately from the corresponding :math:`\\beta`-ensemble.
:param N:
Number of points/size of the :math:`\\beta`-ensemble
:type N:
int
:param nb_gibbs_passes:
Number of passes/sweeps over the variables using the Gibbs sampler
:type nb_gibbs_passes:
int
:param sample_exact_cond:
Flag to force (``True``) exact sampling from the conditionals when it is possible.
Otherwise run MALA for ``nb_mala_steps` to sample from the conditionals.
:type sample_exact_cond:
bool (default 100)
:param nb_mala_steps:
Number of steps of Metropolis Ajusted Langevin Algorithm (MALA) to perform when the conditionals are sampled approximately
:type nb_mala_steps:
int, default 100
:param return_chain_of_eig_vals:
Flag to return the chain of eigenvalues associated to the chain of Jacobi matrices.
If ``True`` the whole chain of eigenvalues is returned
If ``False`` only the last sequence of eigenvalues is returned
:type return_chain_of_eig_vals:
bool (default False)
:param return_chain_of_lambda:
Flag to return the chain of the **largest** eigenvalues associated to the chain of Jacobi matrices.
If ``True`` the whole chain of the **largest** eigenvalues is returned
If ``False`` only the **largest** eigenvalue of the last Jacobi matrix is returned
:type return_chain_of_eig_vals:
bool (default False)
"""
rng = check_random_state(random_state)
if sample_exact_cond:
if self.V[3]:
raise ValueError('Sampling exactly the conditionals a_i |... from V = ... + x^3 + ... is not supported, given g_3={}. Conditionals are not log-concave, cannot use Dev12 sampler'.format(self.V[3]))
if self.V.order >= 5:
raise ValueError('Sampling exactly the conditionals a_i |... from V = ... + x^5 + ... is not supported, deg(V)={}>=5. Conditionals are not log-concave, cannot use Dev12 sampler'.format(self.V.order))
even_coefs_V = self.V.coef[::-1][2::2]
if not all(even_coefs_V >= 0):
raise ValueError('\n'.join(
['even coefs of V are not all >=0',
', '.join(['g_{}={}'.format(2 * (n + 1), g_2n)
for n, g_2n in enumerate(even_coefs_V)]),
'Conditionals are not log-concave, cannot use Dev12 sampler',
'You may retry swithching `sample_exact_cond` to False']))
self.N = N
self.nb_gibbs_passes = nb_gibbs_passes
a, b = np.zeros((2, N + 3))
if return_chain_of_eig_vals:
eig_vals = np.zeros((N, nb_gibbs_passes))
elif return_chain_of_lambda_max:
lambda_max = np.zeros(nb_gibbs_passes)
for p in range(nb_gibbs_passes):
if (p + 1) % 50 == 0:
print(p + 1)
for i in range(1, N + 1):
# a_i | ... propto exp - P_a_i
P_a_i = 0.5 * self.beta * N * P_a_cond(i, a, b, self.V)
if sample_exact_cond:
a[i], _ = sampler_exact_convex_quartic(
P=P_a_i,
random_state=rng)
else:
a[i] = sampler_mala(a[i],
V=P_a_i,
sigma=0.01,
nb_steps=nb_mala_steps,
random_state=rng)
# b_i | ... propto x^(shape-1) * exp - P_b_i
if i < N:
P_b_i = 0.5 * self.beta * N * P_b_cond(i, a, b, self.V)
b[i], _ = sampler_exact_convex_quartic(
P=P_b_i,
shape=0.5 * self.beta * (N - i),
random_state=rng)
if return_chain_of_eig_vals:
eig_vals[:, p] = la.eigvalsh_tridiagonal(a[1:N + 1],
np.sqrt(b[1:N]))
elif return_chain_of_lambda_max:
lambda_max[p] = la.eigvalsh_tridiagonal(
a[1:N + 1],
np.sqrt(b[1:N]),
select='i',
select_range=(N - 1, N - 1))[0]
if return_chain_of_eig_vals:
return eig_vals
if return_chain_of_lambda_max:
return lambda_max
return la.eigvalsh_tridiagonal(a[1:N + 1], np.sqrt(b[1:N]))
def hist(self, sampl, save_file_name=False):
""" Display the histogram of a ``sampl`` from the corresponding :math:`\\beta`-ensemble and the corresponding equilibrium distribution when available
:param sampl:
One or multiple samples from the corresponding :math:`\\beta`-ensemble.
**In any case ``sampl`` is flattened** as if the samples were concatenated
:type sampl:
array_like
:param save_file_name:
File name, e.g. ``figure.pdf``, to save the plot
:type save_file_name:
str
.. seealso::
:py:func:`__compute_equilibrium`
"""
fig, ax = plt.subplots(1, 1)
# Title
# V_x = ' '.join(['V(x) =',
# ' + '.join([r'$g_{} x^{}$'.format(n, n)
# for n, g_n in enumerate(self.V.coef[::-1],
# start=0)
# if g_n])])
# with_coefs = ', '.join([r'$g_{}={:0.2f}$'.format(n, g_n)
# for n, g_n in enumerate(self.V.coef[::-1])
# if g_n])
# beta_N_passes = r'$\beta={}, N={}$ #Gibbs_passes={}'.format(
# self.beta, self.N, self.nb_gibbs_passes)
# plt.title('\n'.join([V_x, with_coefs, beta_N_passes]))
# histogram
ax.hist(np.ravel(sampl),
density=True,
histtype='step',
lw=3,
bins=30,
label='histogram')
if self.equilibrium_density is not None and self.support is not None:
# equilibrium_measure
x = np.linspace(1.1 * self.support[0],
1.1 * self.support[1],
300)
ax.plot(x, self.equilibrium_density(x),
label=r'$\mu_{eq}$', lw=3, c='k')
# start, end = ax.get_xlim()
# ax.xaxis.set_ticks(np.arange(-1.5, 2.1, 1.5))
ax.xaxis.set_ticks(np.arange(-2, 2.1, 1))
plt.legend(loc='best',
fontsize='x-large',
frameon=False,
handlelength=1)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
if save_file_name:
plt.savefig(save_file_name)
def rescale_largest_eig_val(self, lambda_max):
""" Rescale the largest eigenvalue to see Tracy-Widom fluctuations
.. math::
N^{\\frac{2}{3}} c_v (\\lambda_{\\max} - b_v)
where
.. math::
c_V = (b_v - a_v)^{-\\frac{1}{3}} \\left(\\sum_{n=1}^{\\infty} k {V'}_k \\right)^{\\frac{2}{3}}
with :math:`{V'}_k` being the Chebychev coefficients of
.. math::
V'(\\frac{a_v + b_v}{2} + \\frac{b_v - a_v}{2} X)
.. seealso::
- Section 3.2 https://arxiv.org/pdf/1210.2199.pdf
- :cite:`OlNaTr14` p.5 Equation 2.3 `https://arxiv.org/pdf/1404.0071.pdf <https://arxiv.org/pdf/1404.0071.pdf>`_
- `poly2cheb <https://docs.scipy.org/doc/numpy/reference/generated/numpy.polynomial.chebyshev.poly2cheb.html?>`_
"""
a_v, b_v = self.support
shift = np.poly1d([0.5 * (b_v - a_v), 0.5 * (b_v + a_v)])
dV_shift = self.V.deriv(m=1)(shift)
dV_shift_cheb = poly2cheb(dV_shift.coeffs[::-1])
sum_k_dV_k = sum(k * dV_k
for k, dV_k in enumerate(dV_shift_cheb[1:], start=1))
c_v = np.cbrt(sum_k_dV_k**2 / (b_v - a_v))
return self.N**(2 / 3) * c_v * (lambda_max - b_v)
```
#### File: DPPy/dppy/exact_sampling.py
```python
import numpy as np
import scipy.linalg as la
from dppy.utils import inner1d, check_random_state, get_progress_bar
from dppy.intermediate_sampling import (vfx_sampling_precompute_constants,
vfx_sampling_do_sampling_loop,
alpha_dpp_sampling_precompute_constants,
alpha_dpp_sampling_do_sampling_loop)
#####################
# Projection kernel #
#####################
# Sample projection DPP from kernel
def proj_dpp_sampler_kernel(kernel, mode='GS', size=None, random_state=None):
"""
.. seealso::
- :func:`proj_dpp_sampler_kernel_GS <proj_dpp_sampler_kernel_GS>`
- :func:`proj_dpp_sampler_kernel_Schur <proj_dpp_sampler_kernel_Schur>`
- :func:`proj_dpp_sampler_kernel_Chol <proj_dpp_sampler_kernel_Chol>`
"""
rng = check_random_state(random_state)
if size:
rank = np.rint(np.trace(kernel)).astype(int)
if size > rank:
raise ValueError('size k={} > rank={}'. format(size, rank))
# Sample from orthogonal projection kernel K = K^2 = K.H K
if mode == 'GS': # Gram-Schmidt equiv Cholesky
sampl = proj_dpp_sampler_kernel_GS(kernel, size, rng)
elif mode == 'Chol': # Cholesky updates of Pou19
sampl = proj_dpp_sampler_kernel_Chol(kernel, size, rng)[0]
elif mode == 'Schur': # Schur complement
sampl = proj_dpp_sampler_kernel_Schur(kernel, size, rng)
else:
str_list = ['Invalid sampling mode, choose among:',
'- "GS (default)',
'- "Chol"',
'- "Schur"',
'Given "{}"'.format(mode)]
raise ValueError('\n'.join(str_list))
return sampl
def proj_dpp_sampler_kernel_Chol(K, size=None, random_state=None):
""" Sample from:
- :math:`\\operatorname{DPP}(K)` with orthogonal projection **correlation** kernel :math:`K` if ``size`` is not provided
- :math:`\\operatorname{k-DPP}` with orthogonal projection **likelihood** kernel :math:`K` with :math:`k=` ``size`` is not provided
Chain rule is applied by performing Cholesky updates of :math:`K`.
:param K:
Orthogonal projection kernel.
:type K:
array_like
:param k:
Size of the sample.
Default is :math:`k=\\operatorname{trace}(K)=\\operatorname{rank}(K)`.
:type k:
int
:return:
If ``size`` is not provided (None),
A sample :math:`\\mathcal{X}` from :math:`\\operatorname{DPP}(K)`.
If ``size`` is provided,
A sample :math:`\\mathcal{X}` from :math:`\\operatorname{k-DPP}(K)`.
along with in-place Cholesky factorization of :math:`\\mathbf{K}_{\\mathcal{X} }`
:rtype:
list and array_like
.. caution::
The current implementation is an attempt of @guilgautier to reproduce the original C implementation of `catamari <https://gitlab.com/hodge_star/catamari>`_
.. seealso::
- :cite:`Pou19` Algorithm 3 and :ref:`catamari code <https://gitlab.com/hodge_star/catamari/blob/38718a1ea34872fb6567e019ece91fbeb5af5be1/include/catamari/dense_dpp/elementary_hermitian_dpp-impl.hpp#L37>`_ for the Hermitian swap routine.
- :func:`proj_dpp_sampler_kernel_GS <proj_dpp_sampler_kernel_GS>`
- :func:`proj_dpp_sampler_kernel_Schur <proj_dpp_sampler_kernel_Schur>`
"""
rng = check_random_state(random_state)
hermitian = True if K.dtype.kind == 'c' else False
N, rank = len(K), np.rint(np.trace(K)).astype(int)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
A = K.copy()
d = np.diagonal(A).astype(float)
orig_indices = np.arange(N)
for j in range(size):
# Sample from pivot index and permute
t = rng.choice(range(j, N), p=np.abs(d[j:]) / (rank - j))
# Hermitian swap of indices j and t of A (may be written in a function)
# bottom swap
A[t + 1:, [j, t]] = A[t + 1:, [t, j]]
# inner swap
tmp = A[j + 1:t, j].copy()
np.conj(A[t, j + 1:t], out=A[j + 1:t, j])
np.conj(tmp, out=A[t, j + 1:t])
# corner swap
A[t, j] = A[t, j].conj()
# diagonal swap
A[[j, t], [j, t]] = A[[t, j], [t, j]].real
# left swap
A[[j, t], :j] = A[[t, j], :j]
# Swap positions j and t of orig_indices and d
orig_indices[[j, t]] = orig_indices[[t, j]]
d[[j, t]] = d[[t, j]]
A[j, j] = np.sqrt(d[j])
if j == size - 1:
break
# Form new column and update diagonal
A[j + 1:, j] -= A[j + 1:, :j].dot(A[j, :j].conj())
A[j + 1:, j] /= A[j, j]
if hermitian:
d[j + 1:] -= A[j + 1:, j].real**2 + A[j + 1:, j].imag**2
else:
d[j + 1:] -= A[j + 1:, j]**2
return orig_indices[:size].tolist(), A[:size, :size]
def proj_dpp_sampler_kernel_GS(K, size=None, random_state=None):
""" Sample from:
- :math:`\\operatorname{DPP}(K)` with orthogonal projection **correlation** kernel :math:`K` if ``size`` is not provided
- :math:`\\operatorname{k-DPP}` with orthogonal projection **likelihood** kernel :math:`K` with :math:`k=` ``size`` is not provided
Chain rule is applied by performing sequential Gram-Schmidt orthogonalization or equivalently Cholesky decomposition updates of :math:`K`.
:param K:
Orthogonal projection kernel.
:type K:
array_like
:param k:
Size of the sample.
Default is :math:`k=\\operatorname{trace}(K)=\\operatorname{rank}(K)`.
:type k:
int
:return:
If ``size`` is not provided (None),
A sample from :math:`\\operatorname{DPP}(K)`.
If ``size`` is provided,
A sample from :math:`\\operatorname{k-DPP}(K)`.
:rtype:
array_like
.. seealso::
- cite:`TrBaAm18` Algorithm 3, :cite:`Gil14` Algorithm 2
- :func:`proj_dpp_sampler_kernel_Schur <proj_dpp_sampler_kernel_Schur>`
- :func:`proj_dpp_sampler_kernel_Chol <proj_dpp_sampler_kernel_Chol>`
"""
rng = check_random_state(random_state)
# Initialization
# ground set size / rank(K) = Tr(K)
N, rank = len(K), np.rint(np.trace(K)).astype(int)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
ground_set = np.arange(N)
sampl = np.zeros(size, dtype=int) # sample list
avail = np.ones(N, dtype=bool) # available items
c = np.zeros((N, size))
norm_2 = K.diagonal().copy() # residual norm^2
for it in range(size):
j = rng.choice(ground_set[avail],
p=np.abs(norm_2[avail]) / (rank - it))
sampl[it] = j
if it == size - 1:
break
# Update the Cholesky factor
avail[j] = False
c[avail, it] = (K[avail, j] - c[avail, :it].dot(c[j, :it]))\
/ np.sqrt(norm_2[j])
norm_2[avail] -= c[avail, it]**2
return sampl.tolist() # , np.prod(norm_2[sampl])
def proj_dpp_sampler_kernel_Schur(K, size=None, random_state=None):
""" Sample from:
- :math:`\\operatorname{DPP}(K)` with orthogonal projection **correlation** kernel :math:`K` if ``size`` is not provided
- :math:`\\operatorname{k-DPP}` with orthogonal projection **likelihood** kernel :math:`K` with :math:`k=` ``size``
Chain rule is applied by computing the Schur complements.
:param K:
Orthogonal projection kernel.
:type K:
array_like
:param size:
Size of the sample.
Default is :math:`k=\\operatorname{trace}(K)=\\operatorname{rank}(K)`.
:type size:
int
:return:
If ``size`` is not provided (None),
A sample from :math:`\\operatorname{DPP}(K)`.
If ``size`` is provided,
A sample from :math:`\\operatorname{k-DPP}(K)`.
:rtype:
array_like
.. seealso::
- :func:`proj_dpp_sampler_kernel_GS <proj_dpp_sampler_kernel_GS>`
- :func:`proj_dpp_sampler_kernel_Chol <proj_dpp_sampler_kernel_Chol>`
"""
rng = check_random_state(random_state)
# Initialization
# ground set size / rank(K) = Tr(K)
N, rank = len(K), np.rint(np.trace(K)).astype(int)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
ground_set = np.arange(N)
sampl = np.zeros(size, dtype=int) # sample list
avail = np.ones(N, dtype=bool) # available items
# Schur complement list i.e. residual norm^2
schur_comp = K.diagonal().copy()
K_inv = np.zeros((size, size))
for it in range(size):
# Pick a new item proportionally to residual norm^2
j = rng.choice(ground_set[avail],
p=np.abs(schur_comp[avail]) / (rank - it))
# store the item and make it unavailasble
sampl[it], avail[j] = j, False
# Update Schur complements K_ii - K_iY (K_Y)^-1 K_Yi
#
# 1) use Woodbury identity to update K[Y,Y]^-1 to K[Y+j,Y+j]^-1
# K[Y+j,Y+j]^-1 =
# [ K[Y,Y]^-1 + (K[Y,Y]^-1 K[Y,j] K[j,Y] K[Y,Y]^-1)/schur_j,
# -K[Y,Y]^-1 K[Y,j]/schur_j]
# [ -K[j,Y] K[Y,Y]^-1/schur_j,
# 1/schur_j]
if it == 0:
K_inv[0, 0] = 1.0 / K[j, j]
elif it == 1:
i = sampl[0]
K_inv[:2, :2] = np.array([[K[j, j], -K[j, i]],
[-K[j, i], K[i, i]]])\
/ (K[i, i] * K[j, j] - K[j, i]**2)
elif it < size - 1:
temp = K_inv[:it, :it].dot(K[sampl[:it], j]) # K_Y^-1 K_Yj
# K_jj - K_jY K_Y^-1 K_Yj
schur_j = K[j, j] - K[j, sampl[:it]].dot(temp)
K_inv[:it, :it] += np.outer(temp, temp / schur_j)
K_inv[:it, it] = - temp / schur_j
K_inv[it, :it] = K_inv[:it, it]
K_inv[it, it] = 1.0 / schur_j
else: # it == size-1
break # no need to update for nothing
# 2) update Schur complements
# K_ii - K_iY (K_Y)^-1 K_Yi for Y <- Y+j
K_iY = K[np.ix_(avail, sampl[:it + 1])]
schur_comp[avail] = K[avail, avail]\
- inner1d(K_iY.dot(K_inv[:it+1, :it+1]), K_iY, axis=1)
return sampl.tolist()
##################
# Generic kernel #
##################
# Directly from correlation kernel, without spectral decomposition
##################################################################
def dpp_sampler_generic_kernel(K, random_state=None):
""" Sample from generic :math:`\\operatorname{DPP}(\\mathbf{K})` with potentially non hermitian correlation kernel :math:`\\operatorname{DPP}(\\mathbf{K})` based on :math:`LU` factorization procedure.
:param K:
Correlation kernel (potentially non hermitian).
:type K:
array_like
:return:
A sample :math:`\\mathcal{X}` from :math:`\\operatorname{DPP}(K)` and
the in-place :math:`LU factorization of :math:`K − I_{\\mathcal{X}^{c}}` where :math:`I_{\\mathcal{X}^{c}}` is the diagonal indicator matrix for the entries not in the sample :math:`\\mathcal{X}`.
:rtype:
list and array_like
.. seealso::
- :cite:`Pou19` Algorithm 1
"""
rng = check_random_state(random_state)
A = K.copy()
sample = []
for j in range(len(A)):
if rng.rand() < A[j, j]:
sample.append(j)
else:
A[j, j] -= 1
A[j + 1:, j] /= A[j, j]
A[j + 1:, j + 1:] -= np.outer(A[j + 1:, j], A[j, j + 1:])
# A[j+1:, j+1:] -= np.einsum('i,j', A[j+1:, j], A[j, j+1:])
return sample, A
# From spectral decomposition
#############################
# Phase 1: subsample eigenvectors by drawing independent Bernoulli variables with parameter the eigenvalues of the correlation kernel K.
def dpp_eig_vecs_selector(ber_params, eig_vecs,
random_state=None):
""" Phase 1 of exact sampling procedure. Subsample eigenvectors :math:`V` of the initial kernel (correlation :math:`K`, resp. likelihood :math:`L`) to build a projection DPP with kernel :math:`U U^{\\top}` from which sampling is easy.
The selection is made based on a realization of Bernoulli variables with parameters to the eigenvalues of :math:`K`.
:param ber_params:
Parameters of Bernoulli variables
:math:`\\lambda^K=\\lambda^L/(1+\\lambda^L)
:type ber_params:
list, array_like
:param eig_vecs:
Collection of eigenvectors of the kernel :math:`K`, resp. :math:`L`
:type eig_vecs:
array_like
:return:
selected eigenvectors
:rtype:
array_like
.. seealso::
- :func:`dpp_sampler_eig <dpp_sampler_eig>`
"""
rng = check_random_state(random_state)
# Realisation of Bernoulli random variables with params ber_params
ind_sel = rng.rand(ber_params.size) < ber_params
return eig_vecs[:, ind_sel]
# Phase 2:
# Sample projection kernel VV.T where V are the eigvecs selected in Phase 1.
def proj_dpp_sampler_eig(eig_vecs, mode='GS', size=None,
random_state=None):
""" Sample from projection :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the projection kernel :math:`K=VV^{\\top}` where :math:`V^{\\top}V = I_r` and :math:`r=\\operatorname{rank}(\\mathbf{K})`.
.. seealso::
Phase 1:
- :func:`dpp_eig_vecs_selector <dpp_eig_vecs_selector>`
Phase 2:
- :func:`proj_dpp_sampler_eig_GS <proj_dpp_sampler_eig_GS>`
- :func:`proj_dpp_sampler_eig_GS_bis <proj_dpp_sampler_eig_GS_bis>`
- :func:`proj_dpp_sampler_eig_KuTa12 <proj_dpp_sampler_eig_KuTa12>`
"""
rng = check_random_state(random_state)
if eig_vecs.shape[1]:
# Phase 2: Sample from projection kernel VV.T
# Chain rule, conditionals are updated using:
if mode == 'GS': # Gram-Schmidt
sampl = proj_dpp_sampler_eig_GS(eig_vecs, size, rng)
elif mode == 'GS_bis': # Slight modif of 'GS'
sampl = proj_dpp_sampler_eig_GS_bis(eig_vecs, size, rng)
elif mode == 'KuTa12': # cf Kulesza-Taskar
sampl = proj_dpp_sampler_eig_KuTa12(eig_vecs, size, rng)
else:
str_list = ['Invalid sampling mode, choose among:',
'- "GS" (default)',
'- "GS_bis"',
'- "KuTa12"',
'Given "{}"'.format(mode)]
raise ValueError('\n'.join(str_list))
else:
sampl = []
return sampl
# Using Gram-Schmidt orthogonalization
def proj_dpp_sampler_eig_GS(eig_vecs, size=None,
random_state=None):
""" Sample from projection :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the projection kernel :math:`K=VV^{\\top}` where :math:`V^{\\top}V = I_r` and :math:`r=\\operatorname{rank}(\\mathbf{K})`.
It performs sequential update of Cholesky decomposition, which is equivalent to Gram-Schmidt orthogonalization of the rows of the eigenvectors.
:param eig_vecs:
Eigenvectors used to form projection kernel :math:`K=VV^{\\top}`.
:type eig_vecs:
array_like
:return:
A sample from projection :math:`\\operatorname{DPP}(K)`.
:rtype:
list, array_like
.. seealso::
- cite:`TrBaAm18` Algorithm 3, :cite:`Gil14` Algorithm 2
- :func:`proj_dpp_sampler_eig_GS_bis <proj_dpp_sampler_eig_GS_bis>`
- :func:`proj_dpp_sampler_eig_KuTa12 <proj_dpp_sampler_eig_KuTa12>`
"""
rng = check_random_state(random_state)
# Initialization
V = eig_vecs
N, rank = V.shape # ground set size / rank(K)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
ground_set = np.arange(N)
sampl = np.zeros(size, dtype=int) # sample list
avail = np.ones(N, dtype=bool) # available items
# Phase 1: Already performed!
# Select eigvecs with Bernoulli variables with parameter = eigvals of K.
# Phase 2: Chain rule
# Use Gram-Schmidt recursion to compute the Vol^2 of the parallelepiped spanned by the feature vectors associated to the sample
c = np.zeros((N, size))
norms_2 = inner1d(V, axis=1) # ||V_i:||^2
for it in range(size):
# Pick an item \propto this squred distance
j = rng.choice(ground_set[avail],
p=np.abs(norms_2[avail]) / (rank - it))
sampl[it] = j
if it == size - 1:
break
# Cancel the contribution of V_j to the remaining feature vectors
avail[j] = False
c[avail, it] =\
(V[avail, :].dot(V[j, :]) - c[avail, :it].dot(c[j, :it]))\
/ np.sqrt(norms_2[j])
norms_2[avail] -= c[avail, it]**2 # update residual norm^2
return sampl.tolist()
# Slight modif of Gram-Schmidt above
def proj_dpp_sampler_eig_GS_bis(eig_vecs, size=None, random_state=None):
""" Sample from projection :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the projection kernel :math:`K=VV^{\\top}` where :math:`V^{\\top}V = I_r` and :math:`r=\\operatorname{rank}(\\mathbf{K})`.
It performs sequential Gram-Schmidt orthogonalization of the rows of the eigenvectors.
:param eig_vecs:
Eigenvectors used to form projection kernel :math:`K=VV^{\\top}`.
:type eig_vecs:
array_like
:return:
A sample from projection :math:`\\operatorname{DPP}(K)`.
:rtype:
list, array_like
.. seealso::
- This is a slight modification of :func:`proj_dpp_sampler_eig_GS <proj_dpp_sampler_eig_GS>`
- :func:`proj_dpp_sampler_eig_KuTa12 <proj_dpp_sampler_eig_KuTa12>`
"""
rng = check_random_state(random_state)
# Initialization
V = eig_vecs.copy()
N, rank = V.shape # ground set size / rank(K)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
ground_set = np.arange(N)
sampl = np.zeros(size, dtype=int) # sample list
avail = np.ones(N, dtype=bool) # available items
# Phase 1: Already performed!
# Select eigvecs with Bernoulli variables with parameter = eigvals of K.
# Phase 2: Chain rule
# Use Gram-Schmidt recursion to compute the Vol^2 of the parallelepiped spanned by the feature vectors associated to the sample
# Matrix of the contribution of remaining vectors
# <V_i, P_{V_Y}^{orthog} V_j>
contrib = np.zeros((N, size))
norms_2 = inner1d(V, axis=1) # ||V_i:||^2
for it in range(size):
# Pick an item proportionally to the residual norm^2
# ||P_{V_Y}^{orthog} V_j||^2
j = rng.choice(ground_set[avail],
p=np.abs(norms_2[avail]) / (rank - it))
sampl[it] = j
if it == size - 1:
break
# Update the residual norm^2
#
# |P_{V_Y+j}^{orthog} V_i|^2
# <V_i,P_{V_Y}^{orthog} V_j>^2
# = |P_{V_Y}^{orthog} V_i|^2 - ----------------------------
# |P_{V_Y}^{orthog} V_j|^2
#
# 1) Orthogonalize V_j w.r.t. orthonormal basis of Span(V_Y)
# V'_j = P_{V_Y}^{orthog} V_j
# = V_j - <V_j,sum_Y V'_k>V'_k
# = V_j - sum_Y <V_j, V'_k> V'_k
# Note V'_j is not normalized
avail[j] = False
V[j, :] -= contrib[j, :it].dot(V[sampl[:it], :])
# 2) Compute <V_i, V'_j> = <V_i, P_{V_Y}^{orthog} V_j>
contrib[avail, it] = V[avail, :].dot(V[j, :])
# 3) Normalize V'_j with norm^2 and not norm
# V'_j P_{V_Y}^{orthog} V_j
# V'_j = ------- = --------------------------
# |V'j|^2 |P_{V_Y}^{orthog} V_j|^2
#
# in preparation for next orthogonalization in 1)
V[j, :] /= norms_2[j]
# 4) Update the residual norm^2: cancel contrib of V_i onto V_j
#
# |P_{V_Y+j}^{orthog} V_i|^2
# = |P_{V_Y}^{orthog} V_i|^2 - <V_i,V'_j>^2 / |V'j|^2
# <V_i,P_{V_Y}^{orthog} V_j>^2
# = |P_{V_Y}^{orthog} V_i|^2 - ----------------------------
# |P_{V_Y}^{orthog} V_j|^2
norms_2[avail] -= contrib[avail, it]**2 / norms_2[j]
return sampl.tolist()
def proj_dpp_sampler_eig_KuTa12(eig_vecs, size=None, random_state=None):
""" Sample from :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the similarity kernel :math:`K`.
It is based on the orthogonalization of the selected eigenvectors.
:param eig_vals:
Collection of eigen values of the similarity kernel :math:`K`.
:type eig_vals:
list
:param eig_vecs:
Eigenvectors of the similarity kernel :math:`K`.
:type eig_vecs:
array_like
:return:
A sample from :math:`\\operatorname{DPP}(K)`.
:rtype:
list
.. seealso::
- :cite:`KuTa12` Algorithm 1
- :func:`proj_dpp_sampler_eig_GS <proj_dpp_sampler_eig_GS>`
- :func:`proj_dpp_sampler_eig_GS_bis <proj_dpp_sampler_eig_GS_bis>`
"""
rng = check_random_state(random_state)
# Initialization
V = eig_vecs.copy()
N, rank = V.shape # ground set size / rank(K)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
sampl = np.zeros(size, dtype=int) # sample list
# Phase 1: Already performed!
# Select eigvecs with Bernoulli variables with parameter the eigvals
# Phase 2: Chain rule
norms_2 = inner1d(V, axis=1) # ||V_i:||^2
# Following [Algo 1, KuTa12], the aim is to compute the orhto complement of the subspace spanned by the selected eigenvectors to the canonical vectors \{e_i ; i \in Y\}. We proceed recursively.
for it in range(size):
j = rng.choice(N, p=np.abs(norms_2) / (rank - it))
sampl[it] = j
if it == size - 1:
break
# Cancel the contribution of e_i to the remaining vectors that is, find the subspace of V that is orthogonal to \{e_i ; i \in Y\}
# Take the index of a vector that has a non null contribution on e_j
k = np.where(V[j, :] != 0)[0][0]
# Cancel the contribution of the remaining vectors on e_j, but stay in the subspace spanned by V i.e. get the subspace of V orthogonal to \{e_i ; i \in Y\}
V -= np.outer(V[:, k] / V[j, k], V[j, :])
# V_:j is set to 0 so we delete it and we can derive an orthononormal basis of the subspace under consideration
V, _ = la.qr(np.delete(V, k, axis=1), mode='economic')
norms_2 = inner1d(V, axis=1) # ||V_i:||^2
return sampl.tolist()
def dpp_vfx_sampler(intermediate_sample_info,
X_data,
eval_L,
random_state=None,
**params):
""" First pre-compute quantities necessary for the vfx rejection sampling loop, such as the inner Nystrom approximation, and the RLS of all elements in :math:`\\mathbf{L}`.
Then, given the pre-computed information,run a rejection sampling loop to generate DPP samples.
:param intermediate_sample_info:
If available, the pre-computed information necessary for the vfx rejection sampling loop.
If ``None``, this function will compute and return an ``_IntermediateSampleInfo`` with fields
- ``.alpha_star``: appropriate rescaling such that the expected sample size of :math:`\\operatorname{DPP}(\\alpha^* \\mathbf{L})` is equal to a user-indicated constant ``params['desired_expected_size']``, or 1.0 if no such constant was specified by the user.
- ``.logdet_I_A``: :math:`\\log \\det` of the Nystrom approximation of :math:`\\mathbf{L} + I`
- ``.q``: placeholder q constant used for vfx sampling, to be replaced by the user before the sampling loop
- ``.s`` and ``.z``: approximations of the expected sample size of :math:`\\operatorname{DPP}(\\alpha^* \\mathbf{L})` to be used in the sampling loop. For more details see :cite:`DeCaVa19`
- ``.rls_estimate``: approximations of the RLS of all elements in X (i.e. in :math:`\\mathbf{L}`)
:type intermediate_sample_info:
``_IntermediateSampleInfo`` or ``None``, default ``None``
:param array_like X_data:
dataset such that :math:`\\mathbf{L}=` ``eval_L(X_data)``, out of which we are sampling objects according to a DPP
:param callable eval_L:
Likelihood function.
Given two sets of n points X and m points Y, ``eval_L(X, Y)`` should compute the :math:`n x m` matrix containing the likelihood between points.
The function should also accept a single argument X and return ``eval_L(X) = eval_L(X, X)``.
As an example, see the implementation of any of the kernels provided by scikit-learn (e.g. `PairwiseKernel <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.PairwiseKernel.html>`_).
:param random_state:
random source used for sampling, if None a RandomState is automatically generated
:type random_state:
RandomState or None, default None
:param dict params:
Dictionary including optional parameters:
- ``'desired_expected_size'`` (float or None, default None)
Desired expected sample size for the DPP.
If None, use the natural DPP expected sample size.
The vfx sampling algorithm can approximately adjust the expected sample size of the DPP by rescaling the :math:`\\mathbf{L}` matrix with a scalar :math:`\\alpha^*\\leq 1` .
Adjusting the expected sample size can be useful to control downstream complexity, and it is necessary to improve the probability of drawing a sample with exactly :math:`k` elements when using vfx for k-DPP sampling.
Currently only reducing the sample size is supported, and the sampler will return an exception if the DPP sample has already a natural expected size smaller than ``params['desired_expected_size'``.]
- ``'rls_oversample_dppvfx'`` (float, default 4.0)
Oversampling parameter used to construct dppvfx's internal Nystrom approximation.
The ``rls_oversample_dppvfx``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_dppvfx`` factor.
This makes each rejection round slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_dppvfx``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate.
- ``'rls_oversample_bless'`` (float, default 4.0)
Oversampling parameter used during bless's internal Nystrom approximation.
Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_dppvfx`, and can be tuned separately.
The ``rls_oversample_bless``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_bless`` factor.
This makes the one-time pre-processing slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_bless``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate or is not accurate.
- ``'q_func'`` (function, default x: x*x)
Mapping from estimate expected size of the DPP to Poisson intensity used to choose size of the intermediate sample.
Larger intermediate sampler cause less efficient iterations but higher acceptance probability.
- ``'nb_iter_bless'`` (int or None, default None)
Iterations for inner BLESS execution, if None it is set to log(n)
- ``'verbose'`` (bool, default True)
Controls verbosity of debug output, including progress bars.
If intermediate_sample_info is not provided, the first progress bar reports the inner execution of
the bless algorithm, showing:
- lam: lambda value of the current iteration
- m: current size of the dictionary (number of centers contained)
- m_expected: expected size of the dictionary before sampling
- probs_dist: (mean, max, min) of the approximate rlss at the current iteration
Subsequent progress bars show the execution of each rejection sampling loops (i.e. once per sample generated)
- acc_thresh: latest computed probability of acceptance
- rej_iter: iteration of the rejection sampling loop (i.e. rejections so far)
- ``'max_iter'`` (int, default 1000)
Maximum number of intermediate sample rejections before giving up.
:return:
Sample from a DPP (as a list) and updated intermediate_sample_info
:rtype:
tuple(list, _IntermediateSampleInfo)
"""
rng = check_random_state(random_state)
if intermediate_sample_info is None:
intermediate_sample_info = vfx_sampling_precompute_constants(
X_data=X_data,
eval_L=eval_L,
rng=rng,
**params)
q_func = params.get('q_func', lambda s: s * s)
intermediate_sample_info = intermediate_sample_info._replace(q=q_func(intermediate_sample_info.s))
sampl, rej_count = vfx_sampling_do_sampling_loop(X_data, eval_L, intermediate_sample_info, rng, **params)
return sampl, intermediate_sample_info
def alpha_dpp_sampler(intermediate_sample_info,
X_data,
eval_L,
random_state=None,
**params):
""" First pre-compute quantities necessary for the alpha-dpp rejection sampling loop, such as the inner Nystrom
approximation, and the and the initial rescaling alpha_hat for the binary search.
Then, given the pre-computed information,run a rejection sampling loop to generate samples from DPP(alpha * L).
:param intermediate_sample_info:
If available, the pre-computed information necessary for the alpha-dpp rejection sampling loop.
If ``None``, this function will compute and return an ``_IntermediateSampleInfoAlphaRescale`` (see :func:`alpha_dpp_sampling_precompute_constants`)
:type intermediate_sample_info:
``_IntermediateSampleInfoAlphaRescale`` or ``None``, default ``None``
:param array_like X_data:
dataset such that :math:`\\mathbf{L}=` ``eval_L(X_data)``, out of which we are sampling objects according to a DPP
:param callable eval_L:
Likelihood function.
Given two sets of n points X and m points Y, ``eval_L(X, Y)`` should compute the :math:`n x m` matrix containing the likelihood between points.
The function should also accept a single argument X and return ``eval_L(X) = eval_L(X, X)``.
As an example, see the implementation of any of the kernels provided by scikit-learn (e.g. `PairwiseKernel <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.PairwiseKernel.html>`_).
:param random_state:
random source used for sampling, if None a RandomState is automatically generated
:type random_state:
RandomState or None, default None
:param dict params:
Dictionary including optional parameters:
- ``'desired_expected_size'`` (float or None, default None)
Desired expected sample size for the rescaled DPP.
If None, use the natural DPP expected sample size.
The alpha-dpp sampling algorithm can approximately adjust the expected sample size of the DPP by rescaling the :math:`\\mathbf{L}` matrix with a scalar :math:`\\alpha^*\\leq 1` .
Adjusting the expected sample size can be useful to control downstream complexity, and it is necessary to improve the probability of drawing a sample with exactly :math:`k` elements when using alpha-dpp for k-DPP sampling.
Currently only reducing the sample size is supported, and the sampler will return an exception if the DPP sample has already a natural expected size smaller than ``params['desired_expected_size'``.]
- ``'rls_oversample_alphadpp'`` (float, default 4.0)
Oversampling parameter used to construct alphadpp's internal Nystrom approximation.
The ``rls_oversample_alphadpp``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_alphadpp`` factor.
This makes each rejection round slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_alphadpp``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate.
- ``'rls_oversample_bless'`` (float, default 4.0)
Oversampling parameter used during bless's internal Nystrom approximation.
Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_alphadpp`, and can be tuned separately.
The ``rls_oversample_bless``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_bless`` factor.
This makes the one-time pre-processing slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_bless``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate or is not accurate.
- ``'r_func'`` (function, default x: x)
Mapping from estimate expected size of the rescaled alpha-DPP to Poisson intensity used to choose size
of the intermediate sample. Larger intermediate sampler cause less efficient iterations but higher
acceptance probability.
- ``'nb_iter_bless'`` (int or None, default None)
Iterations for inner BLESS execution, if None it is set to log(n)
- ``'verbose'`` (bool, default True)
Controls verbosity of debug output, including progress bars.
If intermediate_sample_info is not provided, the first progress bar reports the inner execution of
the bless algorithm, showing:
- lam: lambda value of the current iteration
- m: current size of the dictionary (number of centers contained)
- m_expected: expected size of the dictionary before sampling
- probs_dist: (mean, max, min) of the approximate rlss at the current iteration
Subsequent progress bars show the execution of each rejection sampling loops (i.e. once per sample generated)
- acc_thresh: latest computed probability of acceptance
- rej_iter: iteration of the rejection sampling loop (i.e. rejections so far)
- ``'max_iter'`` (int, default 1000)
Maximum number of intermediate sample rejections before giving up.
:return:
Sample from a DPP (as a list) and updated intermediate_sample_info
:rtype:
tuple(list, _IntermediateSampleInfoAlphaRescale)
"""
rng = check_random_state(random_state)
if intermediate_sample_info is None:
intermediate_sample_info = alpha_dpp_sampling_precompute_constants(
X_data=X_data,
eval_L=eval_L,
rng=rng,
**params)
r_func = params.get('r_func', lambda r: r)
intermediate_sample_info = intermediate_sample_info._replace(r=r_func(intermediate_sample_info.deff_alpha_L_hat))
sampl, rej_count, intermediate_sample_info = alpha_dpp_sampling_do_sampling_loop(X_data,
eval_L,
intermediate_sample_info,
rng,
**params)
return sampl, intermediate_sample_info
##########
# k-DPPs #
##########
def k_dpp_vfx_sampler(size,
intermediate_sample_info,
X_data,
eval_L,
random_state=None,
**params):
""" First pre-compute quantities necessary for the vfx rejection sampling loop, such as the inner Nystrom approximation, and the RLS of all elements in :math:`\\mathbf{L}`.
Then, given the pre-computed information,run a rejection sampling loop to generate DPP samples.
To guarantee that the returned sample has size ``size``, we internally set desired_expected_size=size and
then repeatedly invoke dpp_vfx_sampler until a sample of the correct size is returned,
or exit with an error after a chosen number of rejections is reached.
:param int size: The size of the sample (i.e. the k of k-DPPs)
:param intermediate_sample_info:
If available, the pre-computed information necessary for the vfx rejection sampling loop.
If ``None``, this function will compute and return an ``_IntermediateSampleInfo`` with fields
- ``.alpha_star``: appropriate rescaling such that the expected sample size of :math:`\\operatorname{DPP}(\\alpha^* \\mathbf{L})` is equal to a user-indicated constant ``params['desired_expected_size']``, or 1.0 if no such constant was specified by the user.
- ``.logdet_I_A``: :math:`\\log \\det` of the Nystrom approximation of :math:`\\mathbf{L} + I`
- ``.q``: placeholder q constant used for vfx sampling, to be replaced by the user before the sampling loop
- ``.s`` and ``.z``: approximations of the expected sample size of :math:`\\operatorname{DPP}(\\alpha^* \\mathbf{L})` to be used in the sampling loop. For more details see :cite:`DeCaVa19`
- ``.rls_estimate``: approximations of the RLS of all elements in X (i.e. in :math:`\\mathbf{L}`)
:type intermediate_sample_info:
``_IntermediateSampleInfo`` or ``None``, default ``None``
:param array_like X_data:
dataset such that :math:`\\mathbf{L}=` ``eval_L(X_data)``, out of which we are sampling objects according to a DPP
:param callable eval_L:
Likelihood function.
Given two sets of n points X and m points Y, ``eval_L(X, Y)`` should compute the :math:`n x m` matrix containing the likelihood between points.
The function should also accept a single argument X and return ``eval_L(X) = eval_L(X, X)``.
As an example, see the implementation of any of the kernels provided by scikit-learn (e.g. `PairwiseKernel <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.PairwiseKernel.html>`_).
:param random_state:
random source used for sampling, if None a RandomState is automatically generated
:type random_state:
RandomState or None, default None
:param dict params:
Dictionary including optional parameters:
- ``'rls_oversample_dppvfx'`` (float, default 4.0)
Oversampling parameter used to construct dppvfx's internal Nystrom approximation.
The ``rls_oversample_dppvfx``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_dppvfx`` factor.
This makes each rejection round slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_dppvfx``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate.
- ``'rls_oversample_bless'`` (float, default 4.0)
Oversampling parameter used during bless's internal Nystrom approximation.
Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_dppvfx`, and can be tuned separately.
The ``rls_oversample_bless``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_bless`` factor.
This makes the one-time pre-processing slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_bless``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate or is not accurate.
- ``'q_func'`` (function, default x: x*x)
Mapping from estimate expected size of the DPP to Poisson intensity used to choose size of the intermediate sample.
Larger intermediate sampler cause less efficient iterations but higher acceptance probability.
- ``'nb_iter_bless'`` (int or None, default None)
Iterations for inner BLESS execution, if None it is set to log(n)
- ``'verbose'`` (bool, default True)
Controls verbosity of debug output, including progress bars.
If intermediate_sample_info is not provided, the first progress bar reports the inner execution of
the bless algorithm, showing:
- lam: lambda value of the current iteration
- m: current size of the dictionary (number of centers contained)
- m_expected: expected size of the dictionary before sampling
- probs_dist: (mean, max, min) of the approximate rlss at the current iteration
Subsequent progress bars show the execution of each rejection sampling loops (i.e. once per sample generated)
- acc_thresh: latest computed probability of acceptance
- rej_iter: iteration of the rejection sampling loop (i.e. rejections so far)
- ``'max_iter'`` (int, default 1000)
Maximum number of intermediate sample rejections before giving up.
- ``'max_iter_size_rejection'`` (int, default 100)
Maximum number of size-based rejections before giving up.
:return:
Sample from a DPP (as a list) and updated intermediate_sample_info
:rtype:
tuple(list, _IntermediateSampleInfo)
"""
rng = check_random_state(random_state)
if (intermediate_sample_info is None
or not np.isclose(intermediate_sample_info.s, size).item()):
intermediate_sample_info = vfx_sampling_precompute_constants(
X_data=X_data,
eval_L=eval_L,
desired_expected_size=size,
rng=rng,
**params)
q_func = params.get('q_func', lambda s: s * s)
intermediate_sample_info = intermediate_sample_info._replace(q=q_func(intermediate_sample_info.s))
max_iter_size_rejection = params.get('max_iter_size_rejection', 100)
for size_rejection_iter in range(max_iter_size_rejection):
sampl, rej_count = vfx_sampling_do_sampling_loop(
X_data,
eval_L,
intermediate_sample_info,
rng,
**params)
intermediate_sample_info = intermediate_sample_info._replace(rej_to_first_sample=intermediate_sample_info.rej_to_first_sample + rej_count)
if len(sampl) == size:
break
else:
raise ValueError('The vfx sampler reached the maximum number of rejections allowed '
'for the k-DPP size rejection ({}), try to increase the q factor '
'(see q_func parameter) or the Nystrom approximation accuracy '
'see rls_oversample_* parameters).'.format(max_iter_size_rejection))
return sampl, intermediate_sample_info
def alpha_k_dpp_sampler(size,
intermediate_sample_info,
X_data,
eval_L,
random_state=None,
**params):
""" First pre-compute quantities necessary for the alpha-dpp rejection sampling loop, such as the inner Nystrom
approximation, the and the initial rescaling alpha_hat for the binary search.
Then, given the pre-computed information,run a rejection sampling loop to generate k-DPP samples.
To guarantee that the returned sample has size ``size``, we internally set desired_expected_size=size and
then repeatedly invoke alpha_dpp_sampler until a sample of the correct size is returned,
or exit with an error after a chosen number of rejections is reached.
:param int size: The size of the sample (i.e. the k of k-DPPs)
:param intermediate_sample_info:
If available, the pre-computed information necessary for the alpha-dpp rejection sampling loop.
If ``None``, this function will compute and return an ``_IntermediateSampleInfoAlphaRescale`` (see :func:`alpha_dpp_sampling_precompute_constants`)
:type intermediate_sample_info:
``_IntermediateSampleInfoAlphaRescale`` or ``None``, default ``None``
:param array_like X_data:
dataset such that :math:`\\mathbf{L}=` ``eval_L(X_data)``, out of which we are sampling objects according to a DPP
:param callable eval_L:
Likelihood function.
Given two sets of n points X and m points Y, ``eval_L(X, Y)`` should compute the :math:`n x m` matrix containing the likelihood between points.
The function should also accept a single argument X and return ``eval_L(X) = eval_L(X, X)``.
As an example, see the implementation of any of the kernels provided by scikit-learn (e.g. `PairwiseKernel <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.PairwiseKernel.html>`_).
:param random_state:
random source used for sampling, if None a RandomState is automatically generated
:type random_state:
RandomState or None, default None
:param dict params:
Dictionary including optional parameters:
- ``'rls_oversample_alphadpp'`` (float, default 4.0)
Oversampling parameter used to construct alphadpp's internal Nystrom approximation.
The ``rls_oversample_alphadpp``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_alphadpp`` factor.
This makes each rejection round slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_alphadpp``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate.
- ``'rls_oversample_bless'`` (float, default 4.0)
Oversampling parameter used during bless's internal Nystrom approximation.
Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_alphadpp`, and can be tuned separately.
The ``rls_oversample_bless``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_bless`` factor.
This makes the one-time pre-processing slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_bless``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate or is not accurate.
- ``'r_func'`` (function, default x: x)
Mapping from estimate expected size of the rescaled alpha-DPP to Poisson intensity used to choose size
of the intermediate sample. Larger intermediate sampler cause less efficient iterations but higher
acceptance probability.
- ``'nb_iter_bless'`` (int or None, default None)
Iterations for inner BLESS execution, if None it is set to log(n)
- ``'verbose'`` (bool, default True)
Controls verbosity of debug output, including progress bars.
If intermediate_sample_info is not provided, the first progress bar reports the inner execution of
the bless algorithm, showing:
- lam: lambda value of the current iteration
- m: current size of the dictionary (number of centers contained)
- m_expected: expected size of the dictionary before sampling
- probs_dist: (mean, max, min) of the approximate rlss at the current iteration
Subsequent progress bars show the execution of each rejection sampling loops (i.e. once per sample generated)
- acc_thresh: latest computed probability of acceptance
- rej_iter: iteration of the rejection sampling loop (i.e. rejections so far)
- ``'early_stop'`` (bool, default False)
Wheter to return as soon as a first sample is accepted. If True, the sampling loop is interrupted
as soon as a k-DPP sample is generated. If False, the algorithm continues the binary search until
of a sufficiently good rescaling alpha is found. While this makes subsequent sampling faster, it is wasteful
in the case where a single k-DPP sample is desired.
- ``'max_iter_size_rejection'`` (int, default 100)
Maximum number of size-based rejections before giving up.
- ``'max_iter_size_rejection'`` (int, default 100)
Maximum number of size-based rejections before giving up.
:return:
Sample from a DPP (as a list) and updated intermediate_sample_info
:rtype:
tuple(list, _IntermediateSampleInfoAlphaRescale)
"""
rng = check_random_state(random_state)
if intermediate_sample_info is None or intermediate_sample_info.k != size:
intermediate_sample_info = alpha_dpp_sampling_precompute_constants(
X_data=X_data,
eval_L=eval_L,
desired_expected_size=size,
rng=rng,
**params)
r_func = params.get('r_func', lambda r: r)
intermediate_sample_info = intermediate_sample_info._replace(r=r_func(intermediate_sample_info.deff_alpha_L_hat))
max_iter_size_rejection = params.get('max_iter_size_rejection', 100)
number_trial_search = np.ceil(np.sqrt(size)).astype('int')
stopping_ratio = (1 + 1 / (size + 3) ** 2)
sample_count = 0
trial_count = 0
under_k_count = 0
over_k_count = 0
ratio_alpha = intermediate_sample_info.alpha_max / intermediate_sample_info.alpha_min
found_good_alpha = ratio_alpha <= stopping_ratio
prog_bar = get_progress_bar(disable=not params.get('verbose', False))
verbose_outer = None
if 'verbose' in params:
verbose_outer = params.pop('verbose')
params['verbose'] = False
early_stop = params.get('early_stop', False)
trial_count_overall = 0
for size_rejection_iter in range(max_iter_size_rejection):
sampl, rej_count, intermediate_sample_info = alpha_dpp_sampling_do_sampling_loop(X_data,
eval_L,
intermediate_sample_info,
rng,
**params)
trial_count += 1
trial_count_overall += 1
prog_bar.set_postfix(trial_count=trial_count,
alpha="{:.4}".format(intermediate_sample_info.alpha_hat),
alpha_switch=intermediate_sample_info.alpha_switches,
k=size,
k_emp=len(sampl),
rej_count=rej_count)
prog_bar.update()
if len(sampl) == size:
sampl_out = sampl
if intermediate_sample_info.trial_to_first_sample == 0:
intermediate_sample_info = intermediate_sample_info._replace(trial_to_first_sample=trial_count_overall)
sample_count += 1
if early_stop:
break
if len(sampl) < size:
under_k_count += 1
if len(sampl) > size:
over_k_count += 1
if intermediate_sample_info.trial_to_first_sample == 0:
intermediate_sample_info = intermediate_sample_info._replace(rej_to_first_sample=intermediate_sample_info.rej_to_first_sample + rej_count)
if sample_count == 2:
found_good_alpha = True
break
if trial_count == number_trial_search:
if under_k_count > over_k_count:
intermediate_sample_info = intermediate_sample_info._replace(alpha_min=intermediate_sample_info.alpha_hat)
else:
intermediate_sample_info = intermediate_sample_info._replace(alpha_max=intermediate_sample_info.alpha_hat)
geom_mean_alpha = np.sqrt(intermediate_sample_info.alpha_min * intermediate_sample_info.alpha_max)
diag_L = intermediate_sample_info.diag_L
intermediate_sample_info = intermediate_sample_info._replace(alpha_hat=geom_mean_alpha)
intermediate_sample_info = intermediate_sample_info._replace(rls_upper_bound=geom_mean_alpha * diag_L)
intermediate_sample_info = intermediate_sample_info._replace(rls_upper_bound_valid=np.full((diag_L.shape[0],), False))
ratio_alpha = intermediate_sample_info.alpha_max/intermediate_sample_info.alpha_min
if ratio_alpha <= stopping_ratio and sample_count > 0:
found_good_alpha = True
break
intermediate_sample_info = intermediate_sample_info._replace(alpha_switches=intermediate_sample_info.alpha_switches + 1)
trial_count = 0
under_k_count = 0
over_k_count = 0
else:
raise ValueError('The alpha sampler reached the maximum number of rejections allowed '
'for the k-DPP size rejection ({}), try to increase the r factor '
'(see r_func parameter) or the Nystrom approximation accuracy '
'see rls_oversample_* parameters).'.format(max_iter_size_rejection))
if found_good_alpha:
intermediate_sample_info = intermediate_sample_info._replace(alpha_min=intermediate_sample_info.alpha_hat)
intermediate_sample_info = intermediate_sample_info._replace(alpha_max=intermediate_sample_info.alpha_hat)
intermediate_sample_info = intermediate_sample_info._replace(alpha_switches=intermediate_sample_info.alpha_switches + 1)
if verbose_outer:
params['verbose'] = verbose_outer
else:
params.pop('verbose')
return sampl_out, intermediate_sample_info
def k_dpp_eig_vecs_selector(eig_vals, eig_vecs, size,
E_poly=None, random_state=None):
""" Subsample eigenvectors V of the 'L' kernel to build a projection DPP with kernel V V.T from which sampling is easy. The selection is made based a realization of Bernoulli variables with parameters the eigenvalues of 'L' and evalutations of the elementary symmetric polynomials.
:param eig_vals:
Collection of eigen values of 'L' (likelihood) kernel.
:type eig_vals:
list, array_like
:param eig_vecs:
Collection of eigenvectors of 'L' kernel.
:type eig_vecs:
array_like
:param size:
Size :math:`k` of :math:`k`-DPP
:type size:
int
:param E_poly:
Evaluation of symmetric polynomials in the eigenvalues
:type E_poly:
array_like
:return:
Selected eigenvectors
:rtype:
array_like
.. seealso::
- :cite:`KuTa12` Algorithm 8
- :func:`elementary_symmetric_polynomials <elementary_symmetric_polynomials>`
"""
rng = check_random_state(random_state)
# Size of: ground set / sample
N, k = eig_vecs.shape[0], size
# as in np.linalg.matrix_rank
tol = np.max(eig_vals) * N * np.finfo(np.float).eps
rank = np.count_nonzero(eig_vals > tol)
if k > rank:
raise ValueError('size k={} > rank={}'.format(k, rank))
if E_poly is None:
E_poly = elementary_symmetric_polynomials(eig_vals, k)
ind_selected = np.zeros(k, dtype=int)
for n in range(eig_vals.size, 0, -1):
if rng.rand() < eig_vals[n - 1] * E_poly[k - 1, n - 1] / E_poly[k, n]:
k -= 1
ind_selected[k] = n - 1
if k == 0:
break
return eig_vecs[:, ind_selected]
# Evaluate the elementary symmetric polynomials
def elementary_symmetric_polynomials(eig_vals, size):
""" Evaluate the elementary symmetric polynomials :math:`e_k` in the eigenvalues :math:`(\\lambda_1, \\cdots, \\lambda_N)`.
:param eig_vals:
Collection of eigenvalues :math:`(\\lambda_1, \\cdots, \\lambda_N)` of the similarity kernel :math:`L`.
:type eig_vals:
list
:param size:
Maximum degree of elementary symmetric polynomial.
:type size:
int
:return:
:math:`[E_{kn}]_{k=0, n=0}^{\text{size}, N}`
:math:`E_{kn} = e_k(\\lambda_1, \\cdots, \\lambda_n)`
:rtype:
array_like
.. seealso::
- :cite:`KuTa12` Algorithm 7
- `Wikipedia <https://en.wikipedia.org/wiki/Elementary_symmetric_polynomial>`_
"""
# Initialize output array
N = eig_vals.size
E = np.zeros((size + 1, N + 1))
E[0, :] = 1.0
# Recursive evaluation
for k in range(1, size + 1):
for n in range(1, N + 1):
E[k, n] = E[k, n - 1] + eig_vals[n - 1] * E[k - 1, n - 1]
return E
```
#### File: DPPy/tests/test_multivariate_jacobi_ope.py
```python
import unittest
import numpy as np
from scipy.integrate import quad
from scipy.special import eval_jacobi
import sys
sys.path.append('..')
from dppy.multivariate_jacobi_ope import (MultivariateJacobiOPE,
compute_ordering,
compute_rejection_bounds)
from dppy.utils import is_symmetric
class TestMultivariateJacobiOPE(unittest.TestCase):
"""
"""
seed = 0
def test_ordering(self):
"""Make sure the ordering of multi-indices respects the one prescirbed by :cite:`BaHa16` Section 2.1.3
"""
ord_d2_N16 = [(0, 0),
(0, 1), (1, 0), (1, 1),
(0, 2), (1, 2), (2, 0), (2, 1), (2, 2),
(0, 3), (1, 3), (2, 3), (3, 0), (3, 1), (3, 2), (3, 3)]
ord_d3_N27 = [(0, 0, 0),
(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1),
(0, 0, 2), (0, 1, 2), (0, 2, 0), (0, 2, 1), (0, 2, 2), (1, 0, 2), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)]
orderings = [ord_d2_N16, ord_d3_N27]
for idx, ord_to_check in enumerate(orderings):
with self.subTest(idx=idx):
N, d = len(ord_to_check), len(ord_to_check[0])
self.assertTrue(compute_ordering(N, d), ord_to_check)
def test_norms_of_multiD_polynomials(self):
N = 100
dims = np.arange(2, 5)
max_deg = 50 # to avoid quad warning in dimension 1
for d in dims:
with self.subTest(dimension=d):
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
pol_2_eval = dpp.degrees_1D_polynomials[:max_deg]
quad_square_norms =\
[[quad(lambda x:
(1-x)**a * (1+x)**b * eval_jacobi(n, a, b, x)**2,
-1, 1)[0]
for n, a, b in zip(deg,
dpp.jacobi_params[:, 0],
dpp.jacobi_params[:, 1])]
for deg in pol_2_eval]
self.assertTrue(
np.allclose(
dpp.norms_1D_polynomials[pol_2_eval, range(dpp.dim)],
np.sqrt(quad_square_norms)))
def test_Gautschi_bounds(self):
"""Test if bounds computed w/wo log scale coincide"""
N = 100
dims = np.arange(2, 5)
for d in dims:
with self.subTest(dimension=d):
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
with_log_scale = compute_rejection_bounds(dpp.jacobi_params,
dpp.ordering,
log_scale=True)
without_log_scale = compute_rejection_bounds(dpp.jacobi_params,
dpp.ordering,
log_scale=False)
self.assertTrue(np.allclose(with_log_scale, without_log_scale))
def test_kernel_evaluations(self):
N = 100
dims = np.arange(2, 5)
for d in dims:
with self.subTest(dimension=d):
jacobi_params = 0.5 - np.random.rand(d, 2)
jacobi_params[0, :] = -0.5
dpp = MultivariateJacobiOPE(N, jacobi_params)
X = np.random.rand(20, d)
Y = np.random.rand(20, d)
K_XX = is_symmetric(dpp.K(X, X))
K_xx = np.diag(K_XX)
K_xy = np.ravel([dpp.K(x, y) for x, y in zip(X, Y)])
checks = ((dpp.K(X), K_XX),
(dpp.K(X, X, eval_pointwise=True), K_xx),
(dpp.K(X, Y, eval_pointwise=True), K_xy))
for idx, (a, b) in enumerate(checks):
with self.subTest(idx=idx):
self.assertTrue(np.allclose(a, b),
'a={}, b={}'.format(a, b))
def test_sample_1D(self):
N, d = 20, 1
jacobi_params = - 0.5 * np.ones((d, 2))
dpp = MultivariateJacobiOPE(N, jacobi_params)
sampl = dpp.sample(random_state=self.seed) # seed = 0
expected_sample = np.array([[0.9995946],
[0.98944808],
[0.97485733],
[0.86576265],
[0.7958162],
[0.64406931],
[0.53459294],
[0.4259159],
[0.1784497],
[0.12319757],
[-0.13340743],
[-0.28758726],
[-0.40275405],
[-0.68282936],
[-0.76523971],
[-0.82355336],
[-0.88258742],
[-0.94587727],
[-0.96426474],
[-0.99658163]])
self.assertTrue(np.allclose(sampl, expected_sample))
def test_sample_2D(self):
N, d = 20, 2
jacobi_params = - 0.5 * np.ones((d, 2))
dpp = MultivariateJacobiOPE(N, jacobi_params)
sampl = dpp.sample(random_state=self.seed) # seed = 0
expected_sample = np.array([[-0.44929357, -0.92988338],
[0.07128896, -0.98828901],
[-0.43895328, -0.64850438],
[-0.56491996, 0.43632636],
[0.33859341, 0.6642957],
[-0.89437538, -0.98384996],
[0.93451148, -0.42788073],
[-0.81846092, 0.57000777],
[-0.42084694, 0.98065145],
[0.97651548, 0.94243444],
[0.11753084, 0.96240585],
[-0.12183308, -0.14093164],
[-0.9940169, 0.16811198],
[-0.76730512, -0.05402772],
[0.99984566, -0.95942833],
[0.99996511, -0.01959666],
[0.05053165, -0.40778628],
[0.82158181, 0.58501064],
[-0.97396649, 0.90805501],
[-0.99808676, -0.49690354]])
self.assertTrue(np.allclose(sampl, expected_sample))
def main():
unittest.main()
if __name__ == '__main__':
main()
```
#### File: DPPy/tests/test_tracy_widom.py
```python
import unittest
import numpy as np
import sys
sys.path.append('..')
from dppy.beta_ensemble_polynomial_potential_core import TracyWidom
class TestTracyWidom(unittest.TestCase):
""" Based on the work of Bornemann 2010 `https://arxiv.org/pdf/0804.2543.pdf <https://arxiv.org/pdf/0804.2543.pdf>`_
"""
TW = TracyWidom()
def test_kernel_example_bornemann_fredholm_determinant_should_equal_sin1(self):
""" Equation 5.8 Bornemann
"""
def K_Green(x, y):
Y, X = np.meshgrid(x, y)
return np.where(X <= Y, X * (1 - Y), Y * (1 - X))
quad_order = 50
x_quad, w_quad = self.TW.compute_quadrature(quad_order)
fred_det_K_approx = self.TW.fredholm_determinant(K_Green,
x_quad,
w_quad)
fred_det_K_theo = np.sin(1)
self.assertAlmostEqual(fred_det_K_approx, fred_det_K_theo,
msg=(fred_det_K_approx, fred_det_K_theo),
delta=1e-5)
def test_change_of_variables_from_0_1_to_s_oo_should_be_increasing(self):
"""
.. todo::
Add refer to increasing choice
"""
points = np.linspace(0, 1, 10)
s = -1
phi, d_phi = self.TW.change_of_variable(s)
for x, y in zip(points[:-1], points[1:]):
with self.subTest(x=x, y=y):
self.assertLessEqual(phi(x), phi(y))
def test_change_of_variables_from_0_1_to_s_oo_derivative_is_correct(self):
points = np.linspace(0, 1, 10, endpoint=False)
s = -1
phi, d_phi = self.TW.change_of_variable(s)
eps = 1e-7
for x in points:
with self.subTest(x=x):
d_phi_x_approx = (phi(x + eps) - phi(x)) / eps
d_phi_x = d_phi(x)
self.assertAlmostEqual(d_phi_x_approx, d_phi_x,
msg=(x, d_phi_x_approx, d_phi_x),
delta=1e-2)
def test_evaluation_Tracy_Widom_cdf(self):
""" evalution points obtained from Table 5. in *LARGEST EIGENVALUES AND SAMPLE COVARIANCE MATRICES*, <NAME>. BEJAN
https://pdfs.semanticscholar.org/ca19/3484415f374d8fb02e7fbdad72b99727b41f.pdf?_ga=2.251544262.1964171041.1570206947-237360766.1567514713
"""
points = np.array([[-3.0, 0.080361],
[-2.5, 0.212392],
[-2.0, 0.413256],
[-1.5, 0.631401],
[-1.0, 0.807225],
[-0.5, 0.916070],
[0.0, 0.969375],
[0.5, 0.990545],
[1.0, 0.997506],
[1.5, 0.999432],
[2.0, 0.999888]])
quad_order = 50
tol = 1e-4
cdf_s_approx = self.TW.cdf(points[:, 0], quad_order)
self.assertTrue(np.allclose(cdf_s_approx, points[:, 1], atol=tol))
def main():
unittest.main()
if __name__ == '__main__':
main()
``` |
{
"source": "JohnnyDev2001/fast_zipcode",
"score": 3
} |
#### File: fast_zipcode/fzip/viacep.py
```python
import requests
import json
def search_adress_viacep(cep):
req = requests.get(f'https://viacep.com.br/ws/{cep}/json/')
if req.status_code == 200:
address = json.loads(req.text)
return{
'bairro': address.get('bairro', ''),
'cep': address.get('cep', ''),
'cidade': address.get('localidade', ''),
'logradouro': address.get('logradouro', ''),
'uf': address.get('uf', ''),
'complemento': address.get('complemento', ''),
}
else:
0
if __name__ == '__main__':
print(search_adress_viacep(1166835))
``` |
{
"source": "JohnnyDev2001/gblue",
"score": 3
} |
#### File: JohnnyDev2001/gblue/gb.py
```python
from tkinter import *
from Behavior import Behavior_render
tk = Tk()
class window:
def __init__(self, start=True, height='800', width='600', * , bg='#44475A', name='', icon=''):
self.name = name
self.icon = icon if icon != '' else './icon/icone.ico'
self.height = height
self.width = width
self.start = start
self.bg = bg
self.args = any
self.styleArgs = any
#---------start--------------
tk.geometry(f'{str(self.height)}x{str(self.width)}')
tk.configure(background=self.bg)
tk.title(str(self.name))
tk.iconbitmap(str(self.icon))
def render(self, args, *, styleArgs=None, styleMain=None):
self.args = args
self.styleArgs = styleArgs
self.styleMain = styleMain
if type(self.args) == dict:
res = Behavior_render.renderIt(tk, self.args, styleMain=self.styleMain)
for x in res:
return f'{x}'
else:
return f'Valor não corresponde ao esperado'
def App(self):
if self.start == True:
tk.mainloop()
if __name__ == '__main__':
pass
``` |
{
"source": "johnnydevriese/python_fun",
"score": 4
} |
#### File: python_fun/data_structures/bubble_sort.py
```python
def bubbleSort(alist):
for passnum in range(len(alist) - 1, 0, -1):
for i in range(passnum):
if alist[i] > alist[i + 1]:
temp = alist[i]
alist[i] = alist[i + 1]
alist[i + 1] = temp
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
bubbleSort(alist)
print(alist)
# short bubble sort will return if the list doesn't need to sorted that much.
def shortBubbleSort(alist):
exchanges = True
passnum = len(alist) - 1
while passnum > 0 and exchanges:
exchanges = False
for i in range(passnum):
if alist[i] > alist[i + 1]:
exchanges = True
temp = alist[i]
alist[i] = alist[i + 1]
alist[i + 1] = temp
passnum = passnum - 1
alist = [20, 30, 40, 90, 50, 60, 70, 80, 100, 110]
shortBubbleSort(alist)
print(alist)
```
#### File: python_fun/google/google_interview.py
```python
from math import ceil
def solution(number, first_index, second_index):
a = int(number[first_index])
b = int(number[second_index])
# compute the average of the two numbers and round.
average = (a + b) / 2.0
average_rounded = int(ceil(average))
# print average_rounded
# then replace the two digits with the new computed average.
# new_x = x[2:]
text = number[:first_index] + str(average_rounded) + number[second_index + 1:]
return text
x = '623315'
first_index = 0
second_index = 1
altered_number = solution(x, first_index, second_index)
print altered_number
# print text
# could just concatenate strings but this wouldn't hold up to the other cases.
# final_x = str(average_rounded) + new_x
#
# print final_x
``` |
{
"source": "johnnydevriese/wsu_courses",
"score": 2
} |
#### File: wsu_courses/astro345_fall2015/bsc.py
```python
import matplotlib.pyplot as plot
import numpy as np
import math
from pylab import *
import re # "regular expressions"
from planetary_orbit_nov1 import *
# set plot limits here (whole sky would be 0,365, -90,90)
x_low = 370
x_hi = -5
y_low = -91
y_hi = 91
# read from the file
hr,rah,ram,ras,dd,dm,ds,v = loadtxt('bsc5.trim',unpack=True, usecols=[0,1,2,3,4,5,6,8])
# convert sexagesimal R.A. and dec to decimal degrees
ra = 15.0*(rah + ram/60.0 + ras/3600.)
sign = range(len(dd))
regex = re.compile('-')
for d1 in range(len(dd)):
sign[d1] = 1
foo = str(dd[d1]) #subtlety: only the degrees column has a negative
# sign, but the neg needs to apply to min and sec too
if (regex.match(foo)): # --> hence, this wierd regex workaround,
sign[d1] = -1 # since -0 is different in meaning than 0
dec = abs(dd) + dm/60.0 + ds/3600.0
dec = dec*sign
# note to user: uncomment the 'plot' line and comment the 'scatter' line, if
# desired.
#plot(ra,dec,'ro',color='r',ms=0.5) # uniform marker size
#******************* beginning of my code ***********************
#surprisingly regular polynomial doesn't fit the data very well at all.
#~ coefficients = np.polyfit(ra, dec, 15)
#~ polynomial = np.poly1d(coefficients)
x = arange(0.0, 360, 0.1)
#~ ys = polynomial(x)
#print coefficients
#print polynomial
#arange is (start, stop, step)
#try to get some extra credit points for this stuff!
def f(x):
f = 65.0*cos((1.0/57.2958) * x)
return f
y = f(x)
plot(x, y)
colors = ['g', 'b','c','m', 'y', 'Aqua', 'Crimson', 'ForestGreen','Chartreuse' ]
mercury = scatter(alpha_hours[0], degrees_delta_planets[0], c=colors[0], s=100)
venus = scatter(alpha_hours[1], degrees_delta_planets[1], c=colors[1], s=100)
earth = scatter(alpha_hours[2], degrees_delta_planets[2], c=colors[2], s=100)
mars = scatter(alpha_hours[3], degrees_delta_planets[3], c=colors[3], s=100)
jupiter = scatter(alpha_hours[4], degrees_delta_planets[4], c=colors[4], s=100)
saturn = scatter(alpha_hours[5], degrees_delta_planets[5], c=colors[5], s=100)
uranus = scatter(alpha_hours[6], degrees_delta_planets[6], c=colors[6], s=100)
neptune = scatter(alpha_hours[7], degrees_delta_planets[7], c=colors[7], s=100)
sun = scatter(alpha_hours[8], degrees_delta_planets[8], c=colors[8], s=100)
legend((mercury, venus, earth, mars, jupiter, saturn, uranus, neptune,sun), ('Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune','Sun'),
scatterpoints=1, loc='lower left', ncol=3, fontsize=8)
#*********************** end of my code ***********************
# fancy, nonlinear marker size trick
ssize = (100.0 - 14.0*v)*(100.0-14.0*v)/100.0
scatter(ra,dec,c='r',marker='s',s=ssize) #variable marker size
xlim(x_low,x_hi)
ylim(y_low, y_hi)
xlabel('Right Ascension (degrees)')
ylabel('Declination (degrees)')
title('JDE = {}' .format(JDE))
legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# the next line is crucial
show()
```
#### File: wsu_courses/astro345_fall2015/kepler_cleanedup.py
```python
import math
import numpy
import scipy
import pylab
import scipy.optimize
#function definitions.
#the 0.2 is the t-Tau moved to the other side so we can solve for x when y is 0.
def f(x):
y = x - 0.2 * numpy.sin(x) - 0.8
return y
def f_prime(x):
y = 1.0 - 0.2 * numpy.cos(x)
return y
#could also use this simple newton method.
def newt(x,n):
for i in range(n):
if f_prime(x) == 0:
return x
x = x - f(x)/f_prime(x)
return x
#using a scipy function for computing the zero.
# Where 0.2 is the intial guess.
root = scipy.optimize.newton(f, .2, fprime = f_prime)
#could uncomment line 40 to use our own newton method. (The answer is still the same.)
#0.2 is the intial guess and 10 is the number of iterations.
#root = new(0.2, 10)
print 'this is the root for the given mean anomaly (radians):', root
#converting from radians to degrees.
root_degrees = root * ( 180.0 / numpy.pi )
print 'This is the root in degrees (E):', root_degrees
#now we want r and theta.
# r = a(1 - eccentricity * cos(E) )
# tan(theta/2) = sqrt((1+eccentricity) / (1-eccentricity)) * tan(E/2)
#We want to solve for theta
#where e = 0.2 and a = 1.1 A.U. (given)
eccentricity = 0.2
semi_major_axis = 1.1
#calculating r and theta.
r = semi_major_axis * (1.0 - eccentricity * numpy.cos(root))
theta = 2.0 * numpy.arctan(numpy.sqrt((1.0 + eccentricity) / (1.0 - eccentricity))) * (root_degrees/2.0)
print 'this is r:', r
print 'this is theta:', theta
```
#### File: wsu_courses/astro345_fall2015/physics330_Newton_problem.py
```python
import math
import numpy
import scipy
import pylab
import scipy.optimize
def f(x):
y = ( ( 0.965 + 1.0 ) / ( 0.2588 ) ) * x + numpy.log( 1 - ( x / 0.2588 ) )
return y
def y(x):
y = ( ( 0.9659258263 + 1.0 ) / ( 0.2588190451 ) ) + ( 1 / ( 1 - (x / 0.2588190451)) ) * (- 1.0 / 0.2588190451 )
return y
#x = numpy.linspace(0.0,0.3,100) # 100 linearly spaced numbers
#y = f(x) # computing the values of f(x)
#pylab.plot(x,y)
#pylab.axis([0, 0.5, -0.5, 0.5]) #axis() command in the example above takes a list of [xmin, xmax, ymin, ymax]
#pylab.show() # show the plot
x = scipy.optimize.newton(f, .2, fprime = y )
print(x)
```
#### File: wsu_courses/astro345_fall2015/test_october.py
```python
import math
import numpy
import scipy
import pylab
import scipy.optimize
def f(x):
f = numpy.power(x,3)
return f
def g(x):
y = x - 0.2 * numpy.sin(x) - 0.2
return y
x = scipy.optimize.newton(f, 0.0 )
print(x)
x = scipy.optimize.newton(g, 0.0 )
print(x)
``` |
{
"source": "JohnnyDHo/Brand-Clout",
"score": 3
} |
#### File: JohnnyDHo/Brand-Clout/googlesheetstest.py
```python
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from pprint import pprint
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
spreadsheet_id = '17RNBDCwmETuDRPGxHskMzXC3tiZ-shc7xmva7QwzbB4' # TODO: Update placeholder value.
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
# The A1 notation of a range to search for a logical table of data.
# Values will be appended after the last row of the table.
range_ = 'A1:A500' # TODO: Update placeholder value.
# How the input data should be interpreted.
value_input_option = 'USER_ENTERED' # TODO: Update placeholder value.
# How the input data should be inserted.
insert_data_option = 'INSERT_ROWS' # TODO: Update placeholder value.
value_range_body = {
# TODO: Add desired entries to the request body.
}
request = service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, valueInputOption=value_input_option, insertDataOption=insert_data_option, body=value_range_body)
response = request.execute()
# TODO: Change code below to process the `response` dict:
pprint(response)
# # The ID and range of a sample spreadsheet.
# SAMPLE_SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
# SAMPLE_RANGE_NAME = 'Class Data!A2:E'
#
# def main():
# """Shows basic usage of the Sheets API.
# Prints values from a sample spreadsheet.
# """
# store = file.Storage('token.json')
# creds = store.get()
# if not creds or creds.invalid:
# flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
# creds = tools.run_flow(flow, store)
# service = build('sheets', 'v4', http=creds.authorize(Http()))
#
# # Call the Sheets API
# SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
# RANGE_NAME = 'Class Data!A2:E'
# result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
# range=RANGE_NAME).execute()
# values = result.get('values', [])
#
# if not values:
# print('No data found.')
# else:
# print('Name, Major:')
# for row in values:
# # Print columns A and E, which correspond to indices 0 and 4.
# print('%s, %s' % (row[0], row[4]))
#
# if __name__ == '__main__':
# main()
# # [END sheets_quickstart]
``` |
{
"source": "Johnnyevans32/chatbot",
"score": 2
} |
#### File: chatbot/engine/views.py
```python
from django.shortcuts import render
from django.contrib import messages
from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import auth
from django.contrib.auth import get_user_model
from django.conf import settings
import requests
import json
import uuid
from django.contrib.auth.views import logout_then_login
import urllib.parse
from django.core.exceptions import ValidationError
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.core.mail import send_mail
# Home page view
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.conf import settings
from django.http import JsonResponse
# Create your views here.
from .models import Bio
def base_layout(request):
template='home/base.html'
return render(request, template)
def signup(request):
if request.method == 'POST':
username = request.POST.get('username')
email = request.POST.get('email')
password = request.POST.get('password')
try:
user = User.objects.create_user(username=username, email=email, password=password)
user.set_password(password)
user.save()
messages.success(request, 'Registration successful')
return render(request, 'login.html')
except Exception as err:
print(err)
messages.error(request, 'Something went sideways!')
return render(request, 'signup.html')
return render(request,'signup.html')
def signin(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
try:
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect(reverse('home'))
else:
messages.info(request, 'Invalid credentials')
return render(request, 'login.html')
except Exception as err:
print(err)
messages.error(request, 'Something went sideways!')
return render(request, 'login.html')
return render(request,'login.html')
def logout_user(request):
logout(request)
return redirect(reverse('signin'))
@login_required(login_url='signin')
def index(request):
return render(request, 'chat.html')
``` |
{
"source": "johnnyfco3/ProjectsAssignment",
"score": 2
} |
#### File: site-packages/pikepdf/_methods.py
```python
import datetime
import inspect
import mimetypes
import platform
import shutil
from collections.abc import KeysView, MutableMapping
from decimal import Decimal
from io import BytesIO
from pathlib import Path
from subprocess import PIPE, run
from tempfile import NamedTemporaryFile
from typing import (
Any,
BinaryIO,
Callable,
ItemsView,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
ValuesView,
)
from warnings import warn
from . import Array, Dictionary, Name, Object, Page, Pdf, Stream
from ._qpdf import (
AccessMode,
AttachedFile,
AttachedFileSpec,
Attachments,
NameTree,
ObjectStreamMode,
Rectangle,
StreamDecodeLevel,
StreamParser,
Token,
_ObjectMapping,
)
from .models import Encryption, EncryptionInfo, Outline, PdfMetadata, Permissions
from .models.metadata import decode_pdf_date, encode_pdf_date
# pylint: disable=no-member,unsupported-membership-test,unsubscriptable-object
# mypy: ignore-errors
__all__ = []
Numeric = TypeVar('Numeric', int, float, Decimal)
def augment_override_cpp(fn):
fn._augment_override_cpp = True
return fn
def _is_inherited_method(meth):
# Augmenting a C++ with a method that cls inherits from the Python
# object is never what we want.
return meth.__qualname__.startswith('object.')
def _is_augmentable(m):
return (
inspect.isfunction(m) and not _is_inherited_method(m)
) or inspect.isdatadescriptor(m)
def augments(cls_cpp: Type[Any]):
"""Attach methods of a Python support class to an existing class
This monkeypatches all methods defined in the support class onto an
existing class. Example:
.. code-block:: python
@augments(ClassDefinedInCpp)
class SupportClass:
def foo(self):
pass
The Python method 'foo' will be monkeypatched on ClassDefinedInCpp. SupportClass
has no meaning on its own and should not be used, but gets returned from
this function so IDE code inspection doesn't get too confused.
We don't subclass because it's much more convenient to monkeypatch Python
methods onto the existing Python binding of the C++ class. For one thing,
this allows the implementation to be moved from Python to C++ or vice
versa. It saves having to implement an intermediate Python subclass and then
ensures that the C++ superclass never 'leaks' to pikepdf users. Finally,
wrapper classes and subclasses can become problematic if the call stack
crosses the C++/Python boundary multiple times.
Any existing methods may be used, regardless of whether they are defined
elsewhere in the support class or in the target class.
For data fields to work, the target class must be
tagged ``py::dynamic_attr`` in pybind11.
Strictly, the target class does not have to be C++ or derived from pybind11.
This works on pure Python classes too.
THIS DOES NOT work for class methods.
(Alternative ideas: https://github.com/pybind/pybind11/issues/1074)
"""
OVERRIDE_WHITELIST = {'__eq__', '__hash__', '__repr__'}
if platform.python_implementation() == 'PyPy':
# Either PyPy or pybind11's interface to PyPy automatically adds a __getattr__
OVERRIDE_WHITELIST |= {'__getattr__'} # pragma: no cover
def class_augment(cls, cls_cpp=cls_cpp):
# inspect.getmembers has different behavior on PyPy - in particular it seems
# that a typical PyPy class like cls will have more methods that it considers
# methods than CPython does. Our predicate should take care of this.
for name, member in inspect.getmembers(cls, predicate=_is_augmentable):
if name == '__weakref__':
continue
if (
hasattr(cls_cpp, name)
and hasattr(cls, name)
and name not in getattr(cls, '__abstractmethods__', set())
and name not in OVERRIDE_WHITELIST
and not getattr(getattr(cls, name), '_augment_override_cpp', False)
):
# If the original C++ class and Python support class both define the
# same name, we generally have a conflict, because this is augmentation
# not inheritance. However, if the method provided by the support class
# is an abstract method, then we can consider the C++ version the
# implementation. Also, pybind11 provides defaults for __eq__,
# __hash__ and __repr__ that we often do want to override directly.
raise RuntimeError( # pragma: no cover
f"C++ {cls_cpp} and Python {cls} both define the same "
f"non-abstract method {name}: "
f"{getattr(cls_cpp, name, '')!r}, "
f"{getattr(cls, name, '')!r}"
)
if inspect.isfunction(member):
setattr(cls_cpp, name, member)
installed_member = getattr(cls_cpp, name)
installed_member.__qualname__ = member.__qualname__.replace(
cls.__name__, cls_cpp.__name__
)
elif inspect.isdatadescriptor(member):
setattr(cls_cpp, name, member)
def disable_init(self):
# Prevent initialization of the support class
raise NotImplementedError(self.__class__.__name__ + '.__init__')
cls.__init__ = disable_init
return cls
return class_augment
def _single_page_pdf(page) -> bytes:
"""Construct a single page PDF from the provided page in memory"""
pdf = Pdf.new()
pdf.pages.append(page)
bio = BytesIO()
pdf.save(bio)
bio.seek(0)
return bio.read()
def _mudraw(buffer, fmt) -> bytes:
"""Use mupdf draw to rasterize the PDF in the memory buffer"""
# mudraw cannot read from stdin so NamedTemporaryFile is required
with NamedTemporaryFile(suffix='.pdf') as tmp_in:
tmp_in.write(buffer)
tmp_in.seek(0)
tmp_in.flush()
proc = run(
['mudraw', '-F', fmt, '-o', '-', tmp_in.name],
stdout=PIPE,
stderr=PIPE,
check=True,
)
return proc.stdout
@augments(Object)
class Extend_Object:
def _ipython_key_completions_(self):
if isinstance(self, (Dictionary, Stream)):
return self.keys()
return None
def emplace(self, other: Object, retain=(Name.Parent,)):
"""Copy all items from other without making a new object.
Particularly when working with pages, it may be desirable to remove all
of the existing page's contents and emplace (insert) a new page on top
of it, in a way that preserves all links and references to the original
page. (Or similarly, for other Dictionary objects in a PDF.)
Any Dictionary keys in the iterable *retain* are preserved. By default,
/Parent is retained.
When a page is assigned (``pdf.pages[0] = new_page``), only the
application knows if references to the original the original page are
still valid. For example, a PDF optimizer might restructure a page
object into another visually similar one, and references would be valid;
but for a program that reorganizes page contents such as a N-up
compositor, references may not be valid anymore.
This method takes precautions to ensure that child objects in common
with ``self`` and ``other`` are not inadvertently deleted.
Example:
>>> pdf.pages[0].objgen
(16, 0)
>>> pdf.pages[0].emplace(pdf.pages[1])
>>> pdf.pages[0].objgen
(16, 0) # Same object
.. versionchanged:: 2.11.1
Added the *retain* argument.
"""
if not self.same_owner_as(other):
raise TypeError("Objects must have the same owner for emplace()")
# .keys() returns strings, so make all strings
retain = {str(k) for k in retain}
self_keys = set(self.keys())
other_keys = set(other.keys())
assert all(isinstance(k, str) for k in (retain | self_keys | other_keys))
del_keys = self_keys - other_keys - retain
for k in (k for k in other_keys if k not in retain):
self[k] = other[k] # pylint: disable=unsupported-assignment-operation
for k in del_keys:
del self[k] # pylint: disable=unsupported-delete-operation
def _type_check_write(self, filter, decode_parms):
if isinstance(filter, list):
filter = Array(filter)
filter = filter.wrap_in_array()
if isinstance(decode_parms, list):
decode_parms = Array(decode_parms)
elif decode_parms is None:
decode_parms = Array([])
else:
decode_parms = decode_parms.wrap_in_array()
if not all(isinstance(item, Name) for item in filter):
raise TypeError(
"filter must be: pikepdf.Name or pikepdf.Array([pikepdf.Name])"
)
if not all(
(isinstance(item, Dictionary) or item is None) for item in decode_parms
):
raise TypeError(
"decode_parms must be: pikepdf.Dictionary or "
"pikepdf.Array([pikepdf.Dictionary])"
)
if len(decode_parms) != 0 and len(filter) != len(decode_parms):
raise ValueError(
f"filter ({repr(filter)}) and decode_parms "
f"({repr(decode_parms)}) must be arrays of same length"
)
if len(filter) == 1:
filter = filter[0]
if len(decode_parms) == 0:
decode_parms = None
elif len(decode_parms) == 1:
decode_parms = decode_parms[0]
return filter, decode_parms
def write(
self,
data: bytes,
*,
filter: Union[Name, Array, None] = None,
decode_parms: Union[Dictionary, Array, None] = None,
type_check: bool = True,
): # pylint: disable=redefined-builtin
"""
Replace stream object's data with new (possibly compressed) `data`.
`filter` and `decode_parms` specify that compression that is present on
the input `data`.
When writing the PDF in :meth:`pikepdf.Pdf.save`,
pikepdf may change the compression or apply compression to data that was
not compressed, depending on the parameters given to that function. It
will never change lossless to lossy encoding.
PNG and TIFF images, even if compressed, cannot be directly inserted
into a PDF and displayed as images.
Args:
data: the new data to use for replacement
filter: The filter(s) with which the
data is (already) encoded
decode_parms: Parameters for the
filters with which the object is encode
type_check: Check arguments; use False only if you want to
intentionally create malformed PDFs.
If only one `filter` is specified, it may be a name such as
`Name('/FlateDecode')`. If there are multiple filters, then array
of names should be given.
If there is only one filter, `decode_parms` is a Dictionary of
parameters for that filter. If there are multiple filters, then
`decode_parms` is an Array of Dictionary, where each array index
is corresponds to the filter.
"""
if type_check and filter is not None:
filter, decode_parms = self._type_check_write(filter, decode_parms)
self._write(data, filter=filter, decode_parms=decode_parms)
@augments(Pdf)
class Extend_Pdf:
def _repr_mimebundle_(
self, include=None, exclude=None
): # pylint: disable=unused-argument
"""
Present options to IPython or Jupyter for rich display of this object
See https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display
"""
bio = BytesIO()
self.save(bio)
bio.seek(0)
data = {'application/pdf': bio.read()}
return data
@property
def docinfo(self) -> Dictionary:
"""
Access the (deprecated) document information dictionary.
The document information dictionary is a brief metadata record that can
store some information about the origin of a PDF. It is deprecated and
removed in the PDF 2.0 specification (not deprecated from the
perspective of pikepdf). Use the ``.open_metadata()`` API instead, which
will edit the modern (and unfortunately, more complicated) XMP metadata
object and synchronize changes to the document information dictionary.
This property simplifies access to the actual document information
dictionary and ensures that it is created correctly if it needs to be
created.
A new, empty dictionary will be created if this property is accessed
and dictionary does not exist. (This is to ensure that convenient code
like ``pdf.docinfo[Name.Title] = "Title"`` will work when the dictionary
does not exist at all.)
You can delete the document information dictionary by deleting this property,
``del pdf.docinfo``. Note that accessing the property after deleting it
will re-create with a new, empty dictionary.
.. versionchanged: 2.4
Added support for ``del pdf.docinfo``.
"""
if Name.Info not in self.trailer:
self.trailer.Info = self.make_indirect(Dictionary())
return self.trailer.Info
@docinfo.setter
def docinfo(self, new_docinfo: Dictionary):
if not new_docinfo.is_indirect:
raise ValueError(
"docinfo must be an indirect object - use Pdf.make_indirect"
)
self.trailer.Info = new_docinfo
@docinfo.deleter
def docinfo(self):
if Name.Info in self.trailer:
del self.trailer.Info
def open_metadata(
self,
set_pikepdf_as_editor: bool = True,
update_docinfo: bool = True,
strict: bool = False,
) -> PdfMetadata:
"""
Open the PDF's XMP metadata for editing.
There is no ``.close()`` function on the metadata object, since this is
intended to be used inside a ``with`` block only.
For historical reasons, certain parts of PDF metadata are stored in
two different locations and formats. This feature coordinates edits so
that both types of metadata are updated consistently and "atomically"
(assuming single threaded access). It operates on the ``Pdf`` in memory,
not any file on disk. To persist metadata changes, you must still use
``Pdf.save()``.
Example:
>>> with pdf.open_metadata() as meta:
meta['dc:title'] = 'Set the Dublic Core Title'
meta['dc:description'] = 'Put the Abstract here'
Args:
set_pikepdf_as_editor: Automatically update the metadata ``pdf:Producer``
to show that this version of pikepdf is the most recent software to
modify the metadata, and ``xmp:MetadataDate`` to timestamp the update.
Recommended, except for testing.
update_docinfo: Update the standard fields of DocumentInfo
(the old PDF metadata dictionary) to match the corresponding
XMP fields. The mapping is described in
:attr:`PdfMetadata.DOCINFO_MAPPING`. Nonstandard DocumentInfo
fields and XMP metadata fields with no DocumentInfo equivalent
are ignored.
strict: If ``False`` (the default), we aggressively attempt
to recover from any parse errors in XMP, and if that fails we
overwrite the XMP with an empty XMP record. If ``True``, raise
errors when either metadata bytes are not valid and well-formed
XMP (and thus, XML). Some trivial cases that are equivalent to
empty or incomplete "XMP skeletons" are never treated as errors,
and always replaced with a proper empty XMP block. Certain
errors may be logged.
"""
return PdfMetadata(
self,
pikepdf_mark=set_pikepdf_as_editor,
sync_docinfo=update_docinfo,
overwrite_invalid_xml=not strict,
)
def open_outline(self, max_depth: int = 15, strict: bool = False) -> Outline:
"""
Open the PDF outline ("bookmarks") for editing.
Recommend for use in a ``with`` block. Changes are committed to the
PDF when the block exits. (The ``Pdf`` must still be opened.)
Example:
>>> with pdf.open_outline() as outline:
outline.root.insert(0, OutlineItem('Intro', 0))
Args:
max_depth: Maximum recursion depth of the outline to be
imported and re-written to the document. ``0`` means only
considering the root level, ``1`` the first-level
sub-outline of each root element, and so on. Items beyond
this depth will be silently ignored. Default is ``15``.
strict: With the default behavior (set to ``False``),
structural errors (e.g. reference loops) in the PDF document
will only cancel processing further nodes on that particular
level, recovering the valid parts of the document outline
without raising an exception. When set to ``True``, any such
error will raise an ``OutlineStructureError``, leaving the
invalid parts in place.
Similarly, outline objects that have been accidentally
duplicated in the ``Outline`` container will be silently
fixed (i.e. reproduced as new objects) or raise an
``OutlineStructureError``.
"""
return Outline(self, max_depth=max_depth, strict=strict)
def make_stream(self, data: bytes, d=None, **kwargs) -> Stream:
"""
Create a new pikepdf.Stream object that is attached to this PDF.
See:
:meth:`pikepdf.Stream.__new__`
"""
return Stream(self, data, d, **kwargs)
def add_blank_page(
self, *, page_size: Tuple[Numeric, Numeric] = (612.0, 792.0)
) -> Page:
"""
Add a blank page to this PDF. If pages already exist, the page will be added to
the end. Pages may be reordered using ``Pdf.pages``.
The caller may add content to the page by modifying its objects after creating
it.
Args:
page_size (tuple): The size of the page in PDF units (1/72 inch or 0.35mm).
Default size is set to a US Letter 8.5" x 11" page.
"""
for dim in page_size:
if not (3 <= dim <= 14400):
raise ValueError('Page size must be between 3 and 14400 PDF units')
page_dict = Dictionary(
Type=Name.Page,
MediaBox=Array([0, 0, page_size[0], page_size[1]]),
Contents=self.make_stream(b''),
Resources=Dictionary(),
)
page_obj = self.make_indirect(page_dict)
self._add_page(page_obj, first=False)
return Page(page_obj)
def close(self) -> None:
"""
Close a ``Pdf`` object and release resources acquired by pikepdf.
If pikepdf opened the file handle it will close it (e.g. when opened with a file
path). If the caller opened the file for pikepdf, the caller close the file.
``with`` blocks will call close when exit.
pikepdf lazily loads data from PDFs, so some :class:`pikepdf.Object` may
implicitly depend on the :class:`pikepdf.Pdf` being open. This is always the
case for :class:`pikepdf.Stream` but can be true for any object. Do not close
the `Pdf` object if you might still be accessing content from it.
When an ``Object`` is copied from one ``Pdf`` to another, the ``Object`` is copied into
the destination ``Pdf`` immediately, so after accessing all desired information
from the source ``Pdf`` it may be closed.
.. versionchanged:: 3.0
In pikepdf 2.x, this function actually worked by resetting to a very short
empty PDF. Code that relied on this quirk may not function correctly.
"""
self._close()
if getattr(self, '_tmp_stream', None):
self._tmp_stream.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def allow(self) -> Permissions:
"""
Report permissions associated with this PDF.
By default these permissions will be replicated when the PDF is
saved. Permissions may also only be changed when a PDF is being saved,
and are only available for encrypted PDFs. If a PDF is not encrypted,
all operations are reported as allowed.
pikepdf has no way of enforcing permissions.
"""
results = {}
for field in Permissions._fields:
results[field] = getattr(self, '_allow_' + field)
return Permissions(**results)
@property
def encryption(self) -> EncryptionInfo:
"""
Report encryption information for this PDF.
Encryption settings may only be changed when a PDF is saved.
"""
return EncryptionInfo(self._encryption_data)
def check(self) -> List[str]:
"""
Check if PDF is well-formed. Similar to ``qpdf --check``.
"""
class DiscardingParser(StreamParser):
def __init__(self): # pylint: disable=useless-super-delegation
super().__init__() # required for C++
def handle_object(self, *_args):
pass
def handle_eof(self):
pass
problems: List[str] = []
self._decode_all_streams_and_discard()
discarding_parser = DiscardingParser()
for page in self.pages:
page.parse_contents(discarding_parser)
for warning in self.get_warnings():
problems.append("WARNING: " + warning)
return problems
def save(
self,
filename_or_stream: Union[Path, str, BinaryIO, None] = None,
*,
static_id: bool = False,
preserve_pdfa: bool = True,
min_version: Union[str, Tuple[str, int]] = "",
force_version: Union[str, Tuple[str, int]] = "",
fix_metadata_version: bool = True,
compress_streams: bool = True,
stream_decode_level: Optional[StreamDecodeLevel] = None,
object_stream_mode: ObjectStreamMode = ObjectStreamMode.preserve,
normalize_content: bool = False,
linearize: bool = False,
qdf: bool = False,
progress: Callable[[int], None] = None,
encryption: Optional[Union[Encryption, bool]] = None,
recompress_flate: bool = False,
) -> None:
"""
Save all modifications to this :class:`pikepdf.Pdf`.
Args:
filename_or_stream: Where to write the output. If a file
exists in this location it will be overwritten.
If the file was opened with ``allow_overwriting_input=True``,
then it is permitted to overwrite the original file, and
this parameter may be omitted to implicitly use the original
filename. Otherwise, the filename may not be the same as the
input file, as overwriting the input file would corrupt data
since pikepdf using lazy loading.
static_id: Indicates that the ``/ID`` metadata, normally
calculated as a hash of certain PDF contents and metadata
including the current time, should instead be generated
deterministically. Normally for debugging.
preserve_pdfa: Ensures that the file is generated in a
manner compliant with PDF/A and other stricter variants.
This should be True, the default, in most cases.
min_version: Sets the minimum version of PDF
specification that should be required. If left alone QPDF
will decide. If a tuple, the second element is an integer, the
extension level. If the version number is not a valid format,
QPDF will decide what to do.
force_version: Override the version recommend by QPDF,
potentially creating an invalid file that does not display
in old versions. See QPDF manual for details. If a tuple, the
second element is an integer, the extension level.
fix_metadata_version: If ``True`` (default) and the XMP metadata
contains the optional PDF version field, ensure the version in
metadata is correct. If the XMP metadata does not contain a PDF
version field, none will be added. To ensure that the field is
added, edit the metadata and insert a placeholder value in
``pdf:PDFVersion``. If XMP metadata does not exist, it will
not be created regardless of the value of this argument.
object_stream_mode:
``disable`` prevents the use of object streams.
``preserve`` keeps object streams from the input file.
``generate`` uses object streams wherever possible,
creating the smallest files but requiring PDF 1.5+.
compress_streams: Enables or disables the compression of
stream objects in the PDF that are created without specifying
any compression setting. Metadata is never compressed.
By default this is set to ``True``, and should be except
for debugging. Existing streams in the PDF or streams will not
be modified. To decompress existing streams, you must set
both ``compress_streams=False`` and ``stream_decode_level``
to the desired decode level (e.g. ``.generalized`` will
decompress most non-image content).
stream_decode_level: Specifies how
to encode stream objects. See documentation for
:class:`pikepdf.StreamDecodeLevel`.
recompress_flate: When disabled (the default), qpdf does not
uncompress and recompress streams compressed with the Flate
compression algorithm. If True, pikepdf will instruct qpdf to
do this, which may be useful if recompressing streams to a
higher compression level.
normalize_content: Enables parsing and reformatting the
content stream within PDFs. This may debugging PDFs easier.
linearize: Enables creating linear or "fast web view",
where the file's contents are organized sequentially so that
a viewer can begin rendering before it has the whole file.
As a drawback, it tends to make files larger.
qdf: Save output QDF mode. QDF mode is a special output
mode in QPDF to allow editing of PDFs in a text editor. Use
the program ``fix-qdf`` to fix convert back to a standard
PDF.
progress: Specify a callback function that is called
as the PDF is written. The function will be called with an
integer between 0-100 as the sole parameter, the progress
percentage. This function may not access or modify the PDF
while it is being written, or data corruption will almost
certainly occur.
encryption: If ``False``
or omitted, existing encryption will be removed. If ``True``
encryption settings are copied from the originating PDF.
Alternately, an ``Encryption`` object may be provided that
sets the parameters for new encryption.
Raises:
PdfError
ForeignObjectError
ValueError
You may call ``.save()`` multiple times with different parameters
to generate different versions of a file, and you *may* continue
to modify the file after saving it. ``.save()`` does not modify
the ``Pdf`` object in memory, except possibly by updating the XMP
metadata version with ``fix_metadata_version``.
.. note::
:meth:`pikepdf.Pdf.remove_unreferenced_resources` before saving
may eliminate unnecessary resources from the output file if there
are any objects (such as images) that are referenced in a page's
Resources dictionary but never called in the page's content stream.
.. note::
pikepdf can read PDFs with incremental updates, but always
coalesces any incremental updates into a single non-incremental
PDF file when saving.
.. versionchanged:: 2.7
Added *recompress_flate*.
.. versionchanged:: 3.0
Keyword arguments now mandatory for everything except the first
argument.
"""
if not filename_or_stream and getattr(self, '_original_filename', None):
filename_or_stream = self._original_filename
if not filename_or_stream:
raise ValueError(
"Cannot save to original filename because the original file was "
"not opening using Pdf.open(..., allow_overwriting_input=True). "
"Either specify a new destination filename/file stream or open "
"with allow_overwriting_input=True. If this Pdf was created using "
"Pdf.new(), you must specify a destination object since there is "
"no original filename to save to."
)
self._save(
filename_or_stream,
static_id=static_id,
preserve_pdfa=preserve_pdfa,
min_version=min_version,
force_version=force_version,
fix_metadata_version=fix_metadata_version,
compress_streams=compress_streams,
stream_decode_level=stream_decode_level,
object_stream_mode=object_stream_mode,
normalize_content=normalize_content,
linearize=linearize,
qdf=qdf,
progress=progress,
encryption=encryption,
samefile_check=getattr(self, '_tmp_stream', None) is None,
recompress_flate=recompress_flate,
)
@staticmethod
def open(
filename_or_stream: Union[Path, str, BinaryIO],
*,
password: Union[str, bytes] = "",
hex_password: bool = False,
ignore_xref_streams: bool = False,
suppress_warnings: bool = True,
attempt_recovery: bool = True,
inherit_page_attributes: bool = True,
access_mode: AccessMode = AccessMode.default,
allow_overwriting_input: bool = False,
) -> Pdf:
"""
Open an existing file at *filename_or_stream*.
If *filename_or_stream* is path-like, the file will be opened for reading.
The file should not be modified by another process while it is open in
pikepdf, or undefined behavior may occur. This is because the file may be
lazily loaded. Despite this restriction, pikepdf does not try to use any OS
services to obtain an exclusive lock on the file. Some applications may
want to attempt this or copy the file to a temporary location before
editing. This behaviour changes if *allow_overwriting_input* is set: the whole
file is then read and copied to memory, so that pikepdf can overwrite it
when calling ``.save()``.
When this function is called with a stream-like object, you must ensure
that the data it returns cannot be modified, or undefined behavior will
occur.
Any changes to the file must be persisted by using ``.save()``.
If *filename_or_stream* has ``.read()`` and ``.seek()`` methods, the file
will be accessed as a readable binary stream. pikepdf will read the
entire stream into a private buffer.
``.open()`` may be used in a ``with``-block; ``.close()`` will be called when
the block exits, if applicable.
Whenever pikepdf opens a file, it will close it. If you open the file
for pikepdf or give it a stream-like object to read from, you must
release that object when appropriate.
Examples:
>>> with Pdf.open("test.pdf") as pdf:
...
>>> pdf = Pdf.open("test.pdf", password="<PASSWORD>")
Args:
filename_or_stream: Filename or Python readable and seekable file
stream of PDF to open.
password: User or owner password to open an
encrypted PDF. If the type of this parameter is ``str``
it will be encoded as UTF-8. If the type is ``bytes`` it will
be saved verbatim. Passwords are always padded or
truncated to 32 bytes internally. Use ASCII passwords for
maximum compatibility.
hex_password: If True, interpret the password as a
hex-encoded version of the exact encryption key to use, without
performing the normal key computation. Useful in forensics.
ignore_xref_streams: If True, ignore cross-reference
streams. See qpdf documentation.
suppress_warnings: If True (default), warnings are not
printed to stderr. Use :meth:`pikepdf.Pdf.get_warnings()` to
retrieve warnings.
attempt_recovery: If True (default), attempt to recover
from PDF parsing errors.
inherit_page_attributes: If True (default), push attributes
set on a group of pages to individual pages
access_mode: If ``.default``, pikepdf will
decide how to access the file. Currently, it will always
selected stream access. To attempt memory mapping and fallback
to stream if memory mapping failed, use ``.mmap``. Use
``.mmap_only`` to require memory mapping or fail
(this is expected to only be useful for testing). Applications
should be prepared to handle the SIGBUS signal on POSIX in
the event that the file is successfully mapped but later goes
away.
allow_overwriting_input: If True, allows calling ``.save()``
to overwrite the input file. This is performed by loading the
entire input file into memory at open time; this will use more
memory and may recent performance especially when the opened
file will not be modified.
Raises:
pikepdf.PasswordError: If the password failed to open the
file.
pikepdf.PdfError: If for other reasons we could not open
the file.
TypeError: If the type of ``filename_or_stream`` is not
usable.
FileNotFoundError: If the file was not found.
Note:
When *filename_or_stream* is a stream and the stream is located on a
network, pikepdf assumes that the stream using buffering and read caches
to achieve reasonable performance. Streams that fetch data over a network
in response to every read or seek request, no matter how small, will
perform poorly. It may be easier to download a PDF from network to
temporary local storage (such as ``io.BytesIO``), manipulate it, and
then re-upload it.
.. versionchanged:: 3.0
Keyword arguments now mandatory for everything except the first
argument.
"""
if isinstance(filename_or_stream, bytes) and filename_or_stream.startswith(
b'%PDF-'
):
warn(
"It looks like you called with Pdf.open(data) with a bytes-like object "
"containing a PDF. This will probably fail because this function "
"expects a filename or opened file-like object. Instead, please use "
"Pdf.open(BytesIO(data))."
)
tmp_stream, original_filename = None, False
if allow_overwriting_input:
try:
Path(filename_or_stream)
except TypeError as error:
raise ValueError(
'"allow_overwriting_input=True" requires "open" first argument '
'to be a file path'
) from error
original_filename = Path(filename_or_stream)
with open(original_filename, 'rb') as pdf_file:
tmp_stream = BytesIO()
shutil.copyfileobj(pdf_file, tmp_stream)
pdf = Pdf._open(
tmp_stream or filename_or_stream,
password=password,
hex_password=<PASSWORD>,
ignore_xref_streams=ignore_xref_streams,
suppress_warnings=suppress_warnings,
attempt_recovery=attempt_recovery,
inherit_page_attributes=inherit_page_attributes,
access_mode=access_mode,
)
pdf._tmp_stream = tmp_stream
pdf._original_filename = original_filename
return pdf
@augments(_ObjectMapping)
class Extend_ObjectMapping:
def get(self, key, default=None) -> Object:
try:
return self[key]
except KeyError:
return default
def keys(self):
return KeysView(self)
def values(self):
return (v for _k, v in self.items())
def check_is_box(obj) -> None:
try:
if obj.is_rectangle:
return
except AttributeError:
pass
try:
pdfobj = Array(obj)
if pdfobj.is_rectangle:
return
except Exception as e:
raise ValueError("object is not a rectangle") from e
raise ValueError("object is not a rectangle")
@augments(Page)
class Extend_Page:
@property
def mediabox(self):
"This page's /MediaBox, in PDF units."
return self._get_mediabox(True)
@mediabox.setter
def mediabox(self, value):
check_is_box(value)
self.obj['/MediaBox'] = value
@property
def cropbox(self):
"""This page's effective /CropBox, in PDF units.
If the /CropBox is not defined, the /MediaBox is returned.
"""
return self._get_cropbox(True)
@cropbox.setter
def cropbox(self, value):
check_is_box(value)
self.obj['/CropBox'] = value
@property
def trimbox(self):
"""This page's effective /TrimBox, in PDF units.
If the /TrimBox is not defined, the /CropBox is returned (and if
/CropBox is not defined, /MediaBox is returned).
"""
return self._get_trimbox(True)
@trimbox.setter
def trimbox(self, value):
check_is_box(value)
self.obj['/TrimBox'] = value
@property
def images(self) -> _ObjectMapping:
"""Return all images associated with this page."""
return self._images
@property
def resources(self) -> Dictionary:
"""Return this page's resources dictionary."""
return self.obj['/Resources']
def add_resource(
self,
res: Object,
res_type: Name,
name: Optional[Name] = None,
*,
prefix: str = '',
replace_existing: bool = True,
) -> Name:
"""Adds a new resource to the page's Resources dictionary.
If the Resources dictionaries do not exist, they will be created.
Args:
self: The object to add to the resources dictionary.
res: The dictionary object to insert into the resources
dictionary.
res_type: Should be one of the following Resource dictionary types:
ExtGState, ColorSpace, Pattern, Shading, XObject, Font, Properties.
name: The name of the object. If omitted, a random name will be
generated with enough randomness to be globally unique.
prefix: A prefix for the name of the object. Allows conveniently
namespacing when using random names, e.g. prefix="Im" for images.
Mutually exclusive with name parameter.
replace_existing: If the name already exists in one of the resource
dictionaries, remove it.
Returns:
The name of the object.
Example:
>>> resource_name = pdf.pages[0].add_resource(formxobj, Name.XObject)
.. versionadded:: 2.3
.. versionchanged:: 2.14
If *res* does not belong to the same `Pdf` that owns this page,
a copy of *res* is automatically created and added instead. In previous
versions, it was necessary to change for this case manually.
"""
if Name.Resources not in self.obj:
self.obj.Resources = Dictionary()
elif not isinstance(self.obj.Resources, Dictionary):
raise TypeError("Page /Resources exists but is not a dictionary")
resources = self.obj.Resources
if res_type not in resources:
resources[res_type] = Dictionary()
if name is not None and prefix:
raise ValueError("Must specify one of name= or prefix=")
if name is None:
name = Name.random(prefix=prefix)
for res_dict in resources.as_dict().values():
if not isinstance(res_dict, Dictionary):
continue
if name in res_dict:
if replace_existing:
del res_dict[name]
else:
raise ValueError(f"Name {name} already exists in page /Resources")
resources[res_type][name] = res.with_same_owner_as(self.obj)
return name
def _over_underlay(
self, other, rect: Optional[Rectangle], under: bool = True
) -> None:
formx = None
if isinstance(other, Page):
page = other
formx = other.as_form_xobject()
elif isinstance(other, Dictionary) and other.get(Name.Type) == Name.Page:
page = Page(other)
formx = page.as_form_xobject()
elif (
isinstance(other, Stream)
and other.get(Name.Type) == Name.XObject
and other.get(Name.Subtype) == Name.Form
):
formx = other
if formx is None:
raise TypeError("other object is not something we can convert to FormX")
if rect is None:
rect = Rectangle(page.trimbox)
formx_placed_name = self.add_resource(formx, Name.XObject)
cs = self.calc_form_xobject_placement(formx, formx_placed_name, rect)
self.contents_add(cs, prepend=under)
def add_overlay(self, other: Union[Object, Page], rect: Optional[Rectangle] = None):
"""Overlay another object on this page.
Overlays will be drawn after all previous content, potentially drawing on top
of existing content.
Args:
other: A Page or Form XObject to render as an overlay on top of this
page.
rect: The PDF rectangle (in PDF units) in which to draw the overlay.
If omitted, this page's trimbox, cropbox or mediabox will be used.
.. versionadded:: 2.14
"""
return self._over_underlay(other, rect, under=False)
def add_underlay(
self, other: Union[Object, Page], rect: Optional[Rectangle] = None
):
"""Underlay another object beneath this page.
Underlays will be drawn before all other content, so they may be overdrawn
partially or completely.
Args:
other: A Page or Form XObject to render as an underlay underneath this
page.
rect: The PDF rectangle (in PDF units) in which to draw the underlay.
If omitted, this page's MediaBox will be used.
.. versionadded:: 2.14
"""
return self._over_underlay(other, rect, under=True)
def contents_add(self, contents: Union[Stream, bytes], *, prepend: bool = False):
"""Append or prepend to an existing page's content stream.
Args:
contents: An existing content stream to append or prepend.
prepend: Prepend if true, append if false (default).
.. versionadded:: 2.14
"""
return self._contents_add(contents, prepend=prepend)
def __getattr__(self, name):
return getattr(self.obj, name)
@augment_override_cpp
def __setattr__(self, name, value):
if hasattr(self.__class__, name):
return object.__setattr__(self, name, value)
setattr(self.obj, name, value)
@augment_override_cpp
def __delattr__(self, name):
if hasattr(self.__class__, name):
return object.__delattr__(self, name)
delattr(self.obj, name)
def __getitem__(self, key):
return self.obj[key]
def __setitem__(self, key, value):
self.obj[key] = value
def __delitem__(self, key):
del self.obj[key]
def __contains__(self, key):
return key in self.obj
def __eq__(self, other):
return self.obj == other.obj
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def emplace(self, other: Page, retain=(Name.Parent,)):
return self.obj.emplace(other.obj, retain=retain)
def __repr__(self):
return (
repr(self.obj)
.replace('Dictionary', 'Page', 1)
.replace('(Type="/Page")', '', 1)
)
def _repr_mimebundle_(self, include=None, exclude=None):
data = {}
bundle = {'application/pdf', 'image/png'}
if include:
bundle = {k for k in bundle if k in include}
if exclude:
bundle = {k for k in bundle if k not in exclude}
pagedata = _single_page_pdf(self.obj)
if 'application/pdf' in bundle:
data['application/pdf'] = pagedata
if 'image/png' in bundle:
try:
data['image/png'] = _mudraw(pagedata, 'png')
except (FileNotFoundError, RuntimeError):
pass
return data
@augments(Token)
class Extend_Token:
def __repr__(self):
return f'pikepdf.Token({self.type_}, {self.raw_value})'
@augments(Rectangle)
class Extend_Rectangle:
def __repr__(self):
return f'pikepdf.Rectangle({self.llx}, {self.lly}, {self.urx}, {self.ury})'
def __hash__(self):
return hash((self.llx, self.lly, self.urx, self.ury))
@augments(Attachments)
class Extend_Attachments(MutableMapping):
def __getitem__(self, k: str) -> AttachedFileSpec:
filespec = self._get_filespec(k)
if filespec is None:
raise KeyError(k)
return filespec
def __setitem__(self, k: str, v: AttachedFileSpec) -> None:
if not v.filename:
v.filename = k
return self._add_replace_filespec(k, v)
def __delitem__(self, k: str) -> None:
return self._remove_filespec(k)
def __len__(self):
return len(self._get_all_filespecs())
def __iter__(self) -> Iterator[str]:
yield from self._get_all_filespecs()
def __repr__(self):
return f"<pikepdf._qpdf.Attachments with {len(self)} attached files>"
@augments(AttachedFileSpec)
class Extend_AttachedFileSpec:
@staticmethod
def from_filepath(pdf: Pdf, path: Union[Path, str], *, description: str = ''):
"""Construct a file specification from a file path.
This function will automatically add a creation and modified date
using the file system, and a MIME type inferred from the file's extension.
Args:
pdf: The Pdf to attach this file specification to.
path: A file path for the file to attach to this Pdf.
description: An optional description. May be shown to the user in
PDF viewers.
"""
mime, _ = mimetypes.guess_type(str(path))
if mime is None:
mime = ''
if not isinstance(path, Path):
path = Path(path)
stat = path.stat()
return AttachedFileSpec(
pdf,
path.read_bytes(),
description=description,
filename=str(path),
mime_type=mime,
creation_date=encode_pdf_date(
datetime.datetime.fromtimestamp(stat.st_ctime)
),
mod_date=encode_pdf_date(datetime.datetime.fromtimestamp(stat.st_mtime)),
)
def __repr__(self):
if self.filename:
return (
f"<pikepdf._qpdf.AttachedFileSpec for {self.filename!r}, "
f"description {self.description!r}>"
)
else:
return f"<pikepdf._qpdf.AttachedFileSpec description {self.description!r}>"
@augments(AttachedFile)
class Extend_AttachedFile:
@property
def creation_date(self) -> Optional[datetime.datetime]:
if not self._creation_date:
return None
return decode_pdf_date(self._creation_date)
@creation_date.setter
def creation_date(self, value: datetime.datetime):
self._creation_date = encode_pdf_date(value)
@property
def mod_date(self) -> Optional[datetime.datetime]:
if not self._mod_date:
return None
return decode_pdf_date(self._mod_date)
@mod_date.setter
def mod_date(self, value: datetime.datetime):
self._mod_date = encode_pdf_date(value)
def read_bytes(self) -> bytes:
return self.obj.read_bytes()
def __repr__(self):
return (
f'<pikepdf._qpdf.AttachedFile objid={self.obj.objgen} size={self.size} '
f'mime_type={self.mime_type} creation_date={self.creation_date} '
f'mod_date={self.mod_date}>'
)
@augments(NameTree)
class Extend_NameTree(MutableMapping):
def __len__(self):
return len(self._as_map())
def __iter__(self):
for name, _value in self._nameval_iter():
yield name
def keys(self):
return KeysView(self._as_map())
def values(self):
return ValuesView(self._as_map())
def items(self):
return ItemsView(self._as_map())
def __eq__(self, other):
return self.obj.objgen == other.obj.objgen
def __contains__(self, name: Union[str, bytes]) -> bool:
"""
Returns True if the name tree contains the specified name.
Args:
name (str or bytes): The name to search for in the name tree.
This is not a PDF /Name object, but an arbitrary key.
If name is a *str*, we search the name tree for the UTF-8
encoded form of name. If *bytes*, we search for a key
equal to those bytes.
"""
return self._contains(name)
def __getitem__(self, name: Union[str, bytes]) -> Object:
return self._getitem(name)
def __setitem__(self, name: Union[str, bytes], o: Object):
self._setitem(name, o)
def __delitem__(self, name: Union[str, bytes]):
self._delitem(name)
```
#### File: pikepdf/models/__init__.py
```python
from typing import Collection, List, Tuple, Union, cast
from pikepdf import Object, ObjectType, Operator, Page, PdfError, _qpdf
from .encryption import Encryption, EncryptionInfo, Permissions
from .image import PdfImage, PdfInlineImage, UnsupportedImageTypeError
from .matrix import PdfMatrix
from .metadata import PdfMetadata
from .outlines import (
Outline,
OutlineItem,
OutlineStructureError,
PageLocation,
make_page_destination,
)
# Operands, Operator
_OldContentStreamOperands = Collection[Union[Object, PdfInlineImage]]
_OldContentStreamInstructions = Tuple[_OldContentStreamOperands, Operator]
ContentStreamInstructions = Union[
_qpdf.ContentStreamInstruction, _qpdf.ContentStreamInlineImage
]
UnparseableContentStreamInstructions = Union[
ContentStreamInstructions, _OldContentStreamInstructions
]
class PdfParsingError(Exception):
def __init__(self, message=None, line=None):
if not message:
message = f"Error encoding content stream at line {line}"
super().__init__(message)
self.line = line
def parse_content_stream(
page_or_stream: Union[Object, Page], operators: str = ''
) -> List[ContentStreamInstructions]:
"""
Parse a PDF content stream into a sequence of instructions.
A PDF content stream is list of instructions that describe where to render
the text and graphics in a PDF. This is the starting point for analyzing
PDFs.
If the input is a page and page.Contents is an array, then the content
stream is automatically treated as one coalesced stream.
Each instruction contains at least one operator and zero or more operands.
This function does not have anything to do with opening a PDF file itself or
processing data from a whole PDF. It is for processing a specific object inside
a PDF that is already opened.
Args:
page_or_stream: A page object, or the content
stream attached to another object such as a Form XObject.
operators: A space-separated string of operators to whitelist.
For example 'q Q cm Do' will return only operators
that pertain to drawing images. Use 'BI ID EI' for inline images.
All other operators and associated tokens are ignored. If blank,
all tokens are accepted.
Example:
>>> with pikepdf.Pdf.open(input_pdf) as pdf:
>>> page = pdf.pages[0]
>>> for operands, command in parse_content_stream(page):
>>> print(command)
.. versionchanged:: 3.0
Returns a list of ``ContentStreamInstructions`` instead of a list
of (operand, operator) tuples. The returned items are duck-type compatible
with the previous returned items.
"""
if not isinstance(page_or_stream, (Object, Page)):
raise TypeError("stream must be a pikepdf.Object or pikepdf.Page")
if (
isinstance(page_or_stream, Object)
and page_or_stream._type_code != ObjectType.stream
and page_or_stream.get('/Type') != '/Page'
):
raise TypeError("parse_content_stream called on page or stream object")
if isinstance(page_or_stream, Page):
page_or_stream = page_or_stream.obj
try:
if page_or_stream.get('/Type') == '/Page':
page = page_or_stream
instructions = cast(
List[ContentStreamInstructions],
page._parse_page_contents_grouped(operators),
)
else:
stream = page_or_stream
instructions = cast(
List[ContentStreamInstructions],
Object._parse_stream_grouped(stream, operators),
)
except PdfError as e:
if 'supposed to be a stream or an array' in str(e):
raise TypeError("parse_content_stream called on non-stream Object") from e
else:
raise e from e
return instructions
def unparse_content_stream(
instructions: Collection[UnparseableContentStreamInstructions],
) -> bytes:
"""
Given a parsed list of instructions/operand-operators, convert to bytes suitable
for embedding in a PDF. In PDF the operator always follows the operands.
Args:
instructions: collection of instructions such as is returned
by :func:`parse_content_stream()`
Returns:
A binary content stream, suitable for attaching to a Pdf.
To attach to a Pdf, use :meth:`Pdf.make_stream()``.
.. versionchanged:: 3.0
Now accept collections that contain any mixture of
``ContentStreamInstruction``, ``ContentStreamInlineImage``, and the older
operand-operator tuples from pikepdf 2.x.
"""
try:
return _qpdf._unparse_content_stream(instructions)
except (ValueError, TypeError, RuntimeError) as e:
raise PdfParsingError(
"While unparsing a content stream, an error occurred"
) from e
```
#### File: ProjectsAssignment/KnowledgeBaseReader/dataSplitter.py
```python
import re
def extractData(text_data, column_dict):
#dict thant contains column name and all possible way it show up in the text
data_dict = dict()
# split pdf checking for the last possible column first
for i in column_dict:
if column_dict[i][1] == "split":
if re.search(column_dict[i][0],text_data) != None:
tempSplit = re.split(column_dict[i][0],text_data)
data_dict[i]=tempSplit[1].lstrip("s: ")
text_data = tempSplit[0]
else:
data_dict[i] = None
elif column_dict[i][1] == "search":
tempSearch = re.search(column_dict[i][0], text_data)
if tempSearch != None:
data_dict[i]= tempSearch.group(0)
else:
data_dict[i]= None
else:
print("Columns section of format file given in wrong form please adhere to the following: \n 'column':['regex','split OR search']")
data_dict[i]=None
#error handling would go here
return data_dict
``` |
{
"source": "johnnyfireball/pythonmusic",
"score": 2
} |
#### File: pythonmusic/cli/curses_demo.py
```python
import sys
import os
import curses
import json
from mpvcontrol import MPVControl
class PythonPlayer:
def __init__(self):
pass
mpv = None
gstdscr = None
width = None
height = None
def start_screen(stdscr):
global gstdscr
global width
global height
gstdscr = stdscr
# Clear and refresh the screen for a blank canvas
gstdscr.clear()
gstdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
draw_screen()
def draw_center_content(key_pressed):
global gstdscr
global width
global height
gstdscr.addstr(height - 3, 0, "Lyrics will go here")
def ask_user(msg):
curses.echo()
nstdscr = curses.initscr()
nstdscr.clear()
choice = my_raw_input(nstdscr, 2, 3, msg).decode("utf-8")
gstdscr.clear()
return choice.strip()
def get_paused():
paused = mpv.execute_cmd(['get_property', 'pause'])
# TODO I cant get json to read this. I did simple paused[0].decode('utf-8')
# json.loads wont work. Silly because this should work, something going on.
# this paused needs to be corrected. hacked together for now
# statusbar += f"{paused[0]}"
paused = str(paused[0].strip()).strip("'<>() ").replace('\'', '\"').replace('b"', '')
if paused == '{"data":false,"error":"success"}':
return False
else:
return True
mpv.execute_cmd(["set_property", "pause", False])
def my_raw_input(stdscr, r, c, prompt_string):
curses.echo()
stdscr.addstr(r, c, prompt_string)
stdscr.refresh()
inp = stdscr.getstr(r + 1, c, 200)
return inp
def draw_screen():
global gstdscr
global width
global height
global mpv
mpv = MPVControl(False)
# mpv.start_mpv()
key_pressed = None
mpv_launched = False
# TODO clean up this menu its a disaster
user_commands = {
"1) Launch MPV": ["set_property", "pause", False],
}
playing = ""
# Initialization
while key_pressed != ord("q"):
stdscr = curses.initscr()
gstdscr.clear()
height, width = gstdscr.getmaxyx()
gstdscr.clear()
if key_pressed == ord("1") and not mpv_launched:
# TODO fix what a bad way, not even using this as a dict.
user_commands = {
"2) Play/Pause": ["set_property", "pause", False],
"3) Pause": ["set_property", "pause", True],
"4) Play File": ["loadfile"],
"5) Load List": ["loadlist"],
"n) Next": ["playlist-next", "weak"],
"p) Prev": ["playlist-prev", "weak"],
"s) Shuffle Playlist": ["quit"],
"c) Launch Cava": ["quit"],
"q) Quit": ["quit"],
}
mpv.start_mpv()
mpv_launched = True
statusbar = " "
media_title = None
if mpv_launched:
statusbar = "MPV Started"
paused = mpv.execute_cmd(['get_property', 'pause'])
# TODO I cant get json to read this. I did simple paused[0].decode('utf-8')
# json.loads wont work. Silly because this should work, something going on.
# this paused needs to be corrected. hacked together for now
if key_pressed == ord("1"):
pass
if key_pressed in [ord("3"), ord("2")]:
if get_paused():
mpv.execute_cmd(["set_property", "pause", False])
else:
mpv.execute_cmd(["set_property", "pause", True])
paused = get_paused()
if paused:
statusbar += " - Paused"
else:
statusbar += " - Playing"
if key_pressed == ord("4"):
# TODO error checking
choice = ask_user("Enter File or Youtube URL")
if choice:
mpv.execute_cmd(['loadfile', choice, 'append-play'])
if key_pressed == ord("5"):
# TODO error checking
choice = ask_user("Enter File")
if choice:
mpv.execute_cmd(['loadlist', choice])
if key_pressed == ord("n"):
# TODO show response
mpv.execute_cmd(["playlist-next", "weak"])
if key_pressed == ord("p"):
# TODO show response
mpv.execute_cmd(["playlist-prev", "weak"])
if key_pressed == ord("s"):
# TODO show response
mpv.execute_cmd(["playlist-shuffle"])
# TODO hacked up example
# TODO Handle media_title properly.
media_title = mpv.execute_cmd(['get_property', 'media-title'])
media_title = str(media_title[0].decode("utf-8")).strip("'<>() ").replace('\'', '\"').replace('b"', '').replace('","error":"success"}', '')
# media_title = str(media_title[0].strip())
media_title = media_title.replace('{"data":"', '')
# TODO Handle statusbar properly.
gstdscr.addstr(height - 1, 0, statusbar)
try:
gstdscr.addstr(height - 2, 0, media_title)
except:
pass
# Rendering some text
# whstr = "Width: {}, Height: {}".format(width, height)
# gstdscr.addstr(0, 0, whstr, curses.color_pair(1))
for idx, i in enumerate(user_commands):
gstdscr.addstr(idx, 0, i)
# Turning on attributes for title
# Center window content
# TODO fix same json.loads issue
draw_center_content(key_pressed)
# Refresh the screen
gstdscr.refresh()
# Wait for next input
key_pressed = gstdscr.getch()
mpv.execute_cmd(['quit'])
def main():
curses.wrapper(start_screen)
if __name__ == "__main__":
main()
``` |
{
"source": "johnnygerard/pwdgen",
"score": 2
} |
#### File: johnnygerard/pwdgen/pwdgen.py
```python
from string import *
from secrets import choice, randbelow
import argparse
import sys
PROG_NAME = 'pwdgen'
parser = argparse.ArgumentParser(
prog=PROG_NAME,
formatter_class=argparse.RawTextHelpFormatter,
description='''Generate offline secure passwords using a CSPRNG\
(Cryptographically Strong Pseudo Random Number Generator).
By default, each character is randomly selected from the ASCII character set\
(excluding space and control characters).
The user-defined character set is built in two phases:
1. Form a base character set using one or more flags (--all\
when no flags are passed).
These flags combined define a character superset (equal to their set\
union).
2. Add or remove specific characters from base set\
using the options --include or --exclude.
These two options may require surrounding quotes and default to the empty\
string.''',
epilog="""EXAMPLES
4-digit PIN: %(prog)s -d 4
no symbols: %(prog)s -a
no slashes: %(prog)s -e '\\/'
8-bit key: %(prog)s -b 8
base64 key: %(prog)s -ai '+/'""")
parser.add_argument(
'length', nargs='?', default=16, type=int,
help='number of password characters (default: %(default)s)')
parser.add_argument(
'-v', '--version', action='version', version='%(prog)s 2.2',
help="show program's version number and exit\n\n")
def add_flag(short_option, long_option, help_string):
parser.add_argument(f'-{short_option}', f'--{long_option}',
action='store_true', help=help_string)
add_flag('l', 'lowercase', 'latin small letters (a-z)')
add_flag('u', 'uppercase', 'latin capital letters (A-Z)')
add_flag('d', 'digit', 'decimal digits (0-9)')
add_flag('s', 'symbol', 'punctuation and symbols')
add_flag('L', 'letter', 'same as --lowercase --uppercase')
add_flag('a', 'alphanumeric', 'same as --letter --digit')
add_flag('A', 'all', 'same as --alphanumeric --symbol (default)')
add_flag('0', 'empty', 'empty character set (use with --include)')
add_flag('b', 'binary', 'bits (0-1)')
add_flag('o', 'octal', 'octal digits (0-7)')
add_flag('x', 'hex-lower', 'lowercase hexadecimal digits (0-9, a-f)')
add_flag('X', 'hex-upper', 'same as --hex-lower converted to uppercase\n\n')
parser.add_argument(
'-e', '--exclude', default='', metavar='EXCLUDED',
help='remove EXCLUDED characters from base set')
parser.add_argument(
'-i', '--include', default='', metavar='INCLUDED',
help='add INCLUDED characters to base set\n\n')
parser.add_argument('--pure', action='store_true', help='''Disable the minimum\
of 1 character applied to digits, symbols, lowercase and uppercase.
Only applies to passwords of length >= 4.
As example: '%(prog)s 4' always contains exactly 1 character of each category.
'%(prog)s 4 --pure' could produce 0000 or $$$$.''')
namespace = parser.parse_args()
# validate length argument
if namespace.length <= 0:
sys.exit(f'{PROG_NAME}: error: length must be positive')
# define character sets
LOWERCASE = set(ascii_lowercase)
UPPERCASE = set(ascii_uppercase)
LETTER = LOWERCASE | UPPERCASE
DIGIT = set(digits)
ALPHANUMERIC = LETTER | DIGIT
SYMBOL = set(punctuation)
ALL = ALPHANUMERIC | SYMBOL
BINARY = set('01')
OCTAL = set(octdigits)
HEX_LOWER = DIGIT | set('abcdef')
HEX_UPPER = DIGIT | set('ABCDEF')
excluded_set = set(namespace.exclude)
included_set = set(namespace.include)
# sanitize --exclude and --include arguments
for char in (excluded_set | included_set):
if char not in ALL:
sys.exit(f'{PROG_NAME}: error: found unauthorized character\
(U+{ord(char):04X})')
# check --exclude and --include for conflicts
if excluded_set & included_set:
sys.exit(f'{PROG_NAME}: error: options --exclude and --include conflict\
(common characters disallowed)')
# phase 1: combine flags to build the base character set
character_set = set()
if namespace.all:
character_set = ALL
else:
if namespace.lowercase:
character_set |= LOWERCASE
if namespace.uppercase:
character_set |= UPPERCASE
if namespace.letter:
character_set |= LETTER
if namespace.digit:
character_set |= DIGIT
if namespace.alphanumeric:
character_set |= ALPHANUMERIC
if namespace.symbol:
character_set |= SYMBOL
if namespace.binary:
character_set |= BINARY
if namespace.octal:
character_set |= OCTAL
if namespace.hex_lower:
character_set |= HEX_LOWER
if namespace.hex_upper:
character_set |= HEX_UPPER
# default flag (--all) or --empty
if not character_set and not namespace.empty:
character_set = ALL
# phase 2: add or remove using --include and --exclude strings
character_set |= included_set
character_set -= excluded_set
character_list = list(character_set)
if not character_list:
sys.exit(f'{PROG_NAME}: error: character set empty')
password = []
if namespace.length >= 4 and not namespace.pure:
# classify characters into 4 categories
DIGITS = []
PUNCTUATION = []
ASCII_LOWERCASE = []
ASCII_UPPERCASE = []
for char in character_list:
if char in digits:
DIGITS.append(char)
elif char in ascii_lowercase:
ASCII_LOWERCASE.append(char)
elif char in ascii_uppercase:
ASCII_UPPERCASE.append(char)
else:
PUNCTUATION.append(char)
count = sum([bool(DIGITS),
bool(PUNCTUATION),
bool(ASCII_LOWERCASE),
bool(ASCII_UPPERCASE)])
for i in range(namespace.length - count):
password.append(choice(character_list))
# make sure we have at least one character for each non-empty category
for chars in [DIGITS, PUNCTUATION, ASCII_LOWERCASE, ASCII_UPPERCASE]:
if chars:
# insert instead of append to maintain randomness
password.insert(randbelow(len(password) + 1), choice(chars))
else:
# remaining characters
for i in range(namespace.length):
password.append(choice(character_list))
print(''.join(password))
``` |
{
"source": "johnnygreco/asas-sn-hd",
"score": 2
} |
#### File: asas-sn-hd/ashd/butler.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import re
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from .image import ASHDImage
class Butler(object):
"""
Fetch ASAS-SN stacked images.
"""
def __init__(self, data_dir):
self.data_dir = data_dir
self.files = [file for file in os.listdir(self.data_dir) if file[-4:]=='fits']
ra_vals = []
dec_vals = []
for f in self.files:
_ra = int(f[1:5])
if _ra > 2400:
_ra = _ra - 2400
_ra = str(_ra).zfill(4)
ra_vals.append(_ra[:2]+':'+_ra[2:]+':00')
dec_vals.append(f[5:8])
self.fn_coords = SkyCoord(ra_vals, dec_vals, unit=(u.hourangle, u.deg))
ra_vals = self.fn_coords.ra.value
dec_vals = self.fn_coords.dec.value
df = pd.DataFrame(dict(ra=ra_vals, dec=dec_vals))
df.drop_duplicates(inplace=True)
self.unique_coords = SkyCoord(
df.ra.values, df.dec.values, unit=(u.hourangle, u.deg))
def get_image_fn(self, ra, dec=None, unit='deg'):
#ra can also be the entire SkyCoord here
coord = SkyCoord(ra, dec, unit=unit) if dec != None else ra
seps = self.fn_coords.separation(coord)
fn_coord = self.fn_coords[seps.argmin()].to_string('hmsdms').split()
fn_ra = ''.join(re.split('[a-z]', fn_coord[0])[:2])
fn_dec = re.split('[a-z]', fn_coord[1])[0]
prefix = f'F{fn_ra}{fn_dec}_'
fn = [f for f in self.files if prefix in f]
if len(fn)>1:
sig = []
for f in fn:
sig.append(self.get_sb_sig(image_fn=f))
fn = fn[np.argmax(sig)]
else:
fn = fn[0]
return os.path.join(self.data_dir, fn)
def get_image(self, ra, dec=None, unit='deg'):
return ASHDImage(self, ra=ra, dec=dec)
def get_hdulist(self, ra=None, dec=None, unit='deg', image_fn=None):
fn = image_fn if image_fn else self.get_image_fn(ra, dec, unit=unit)
return fits.open(fn)
def get_data(self, ra=None, dec=None, unit='deg', image_fn=None):
fn = image_fn if image_fn else self.get_image_fn(ra, dec, unit=unit)
return fits.getdata(fn)
def get_header(self, ra=None, dec=None, unit='deg', image_fn=None):
fn = image_fn if image_fn else self.get_image_fn(ra, dec, unit=unit)
return fits.getheader(fn)
#Returns np.nan if the object has no SB_SIG
def get_sb_sig(self, ra=None, dec=None, unit='deg', image_fn=None):
fn = image_fn if image_fn else self.get_image_fn(ra, dec, unit=unit)
head = fits.getheader(os.path.join(self.data_dir, fn))
return head.get('SB_SIG', np.nan)
```
#### File: asas-sn-hd/ashd/params.py
```python
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
class PipeParams(object):
"""
Class to hold all the pipline parameters.
"""
def __init__(self):
self.log_level = 'info'
self.data_dir = '/Users/protostar/Dropbox/projects/data/asas-sn-images'
# sep.Background parameters
self.bw = 64
self.bh = 64
self.fw = 3
self.fh = 3
self.fthresh = 0.0
# sep.extract parameters
self.thresh = 1.5
self.minarea = 80
self.deblend_nthresh = 32
self.deblend_cont = 0.005
self.clean = True
self.clean_param = 1.0
self.segmentation_map = False
self.filter_type = 'conv'
# smoothing kernel
size = 31
self.gauss_fwhm = 5.0
sigma = gaussian_fwhm_to_sigma*self.gauss_fwhm
self.kernel = Gaussian2DKernel(sigma, x_size=size, y_size=size)
# ring filter parameters
self.do_ring_filter = True
self.r_inner = 5.0
self.r_outer = 8.0
@property
def sep_back_kws(self):
kws = dict(
bw=self.bw, bh=self.bh, fw=self.fw, fh=self.fh,
fthresh=self.fthresh)
return kws
@property
def sep_extract_kws(self):
if self.kernel is not None:
self.kernel.normalize()
kern_arr = self.kernel.array
else:
kern_arr = None
kws = dict(
thresh=self.thresh, minarea=self.minarea, filter_kernel=kern_arr,
filter_type=self.filter_type, deblend_nthresh=self.deblend_nthresh,
deblend_cont=self.deblend_cont, clean=self.clean,
clean_param=self.clean_param,
segmentation_map=self.segmentation_map)
return kws
```
#### File: ashd/pipelinev2/algo2.py
```python
from global_vals import *
from common import cut_corners
import numpy as np
from scipy import signal
# this algo revolves around finding objects wherein some form gradient/sersic can be found
# basically, how do you avoid star clusters
def find_lbg(objects, data, **kwargs):
maxtries = kwargs.get('maxtries', objects.size)
if not kwargs.get('corners', False): objects = np.array(list(cut_corners(objects, thresh=500)))
largest = sorted(objects, key = lambda x: x['npix'], reverse=True)[0:maxtries]
found = 0; maxfindings = kwargs.get('maxfindings', MAX_FINDINGS)
for obj in largest:
if is_lbg(obj, data):
found += 1
yield obj
if found > maxfindings: break
#return None
def is_lbg(obj, data, default=[30, 2030], extend=30, sigma=1000):
_, smoothed = datavals(obj, data, default, extend, sigma)
m = np.mean(smoothed)
maxval = m + np.std(smoothed)
mid = smoothed[smoothed.size // 2]
p25 = smoothed[smoothed.size // 4] - smoothed[(smoothed.size // 4) - 1]
p75 = smoothed[smoothed.size * 3 // 4] - smoothed[(smoothed.size * 3 // 4) - 1]
return mid > maxval and p25 > 0 and p75 < m
def datavals(obj, data, default, extend, sigma):
xmin = int(obj['xmin']) - extend
xmin = xmin if xmin > default[0] else default[0]
xmax = int(obj['xmax']) + extend
xmax = xmax if xmax < default[1] else default[1]
subset = data[int(obj['y']), xmin:xmax]
#ash = np.arcsinh(subset)
smoothed = signal.cspline1d(subset, sigma)
return (subset, smoothed)
```
#### File: ashd/tests/test_butler.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ashd.butler import Butler
def test_butler():
ra = '02h39m59.3s'
dec = '-34d26m57s'
b = Butler()
fn = b.get_image_fn(ra, dec, unit=('hourangle', 'degree'))
assert fn
data = b.get_data(fn=fn)
assert data.shape == (2048, 2048)
``` |
{
"source": "johnnygreco/astrostamps",
"score": 3
} |
#### File: astrostamps/astrostamps/tools.py
```python
import os
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from io import BytesIO
import requests
from getpass import getpass
import xml.etree.ElementTree as ET
from PIL import Image
import numpy as np
from matplotlib import image
import astropy.io.fits as fits
from astropy.wcs import WCS
from astropy.visualization import make_lupton_rgb
__all__ = ["project_dir", "fetch_sdss_cutout", "HSCSession", "fetch_galex_cutout"]
project_dir = os.path.dirname(os.path.dirname(__file__))
def fetch_sdss_cutout(ra, dec, scale=0.4, width=512, height=512, opt=''):
"""Fetch SDSS color cutout image at the given ra, dec
ra, dec : float
in degrees
catid scale : float
pixel scale in arcseconds
width, height : integer
image width and height in number of pixels
should be between 64 and 2048
opt : str
a set of uppercase characters for options
Returns numpy array of (width, height, 3).
The array can be input to matplotlib imshow for an RGB image.
The following are available opt:
G Grid Draw a N-S E-W grid through the center
L Label Draw the name, scale, ra, and dec on image
P PhotoObj Draw a small cicle around each primary photoObj
S SpecObj Draw a small square around each specObj
O Outline Draw the outline of each photoObj
B Bounding Box Draw the bounding box of each photoObj
F Fields Draw the outline of each field
M Masks Draw the outline of each mask considered to be important
Q Plates Draw the outline of each plate
I Invert Invert the image (B on W)
This will raise HTTPError if outside SDSS footprint.
Reference
- http://skyserver.sdss.org/dr13/en/tools/chart/chartinfo.aspx
"""
url = ("http://skyserver.sdss.org/dr13/SkyServerWS/ImgCutout"
"/getjpeg?ra=%.8f&dec=%.8f&scale=%.5f"
"&width=%i&height=%i&opt=%s" % (
ra, dec, scale, width, height, opt))
return image.imread(urlopen(url), format='jpeg')
class HSCSession(object):
def __init__(self, user, password=<PASSWORD>,
base_url='https://hsc-release.mtk.nao.ac.jp/'):
self.session = requests.Session()
self.base_url = base_url
if password is None:
password = getpass('Enter password: ')
self.session.auth = (user, password)
def fetch_hsc_cutout(self, ra, dec, width=2.0, height=2.0, band='R',
imageonly=True, dr=2.1):
"""Fetch HSC cutout image at the given ra, dec
ra, dec : float
in degrees
width, height : float
in arcseconds
band : string of characters
HSC band names, GRIZY
imageonly : bool
return images only not the entire fits hdus
"""
band = band.upper()
images = []
for oneband in band:
url = (os.path.join(self.base_url, 'das_quarry/')+\
"dr%s/cgi-bin/quarryImage?"
"ra=%.6f&dec=%.6f&sw=%.6fasec&sh=%.6fasec"
"&type=coadd&image=on&mask=on&variance=on&"
"filter=HSC-%s&tract=&rerun=" %\
(str(dr), ra, dec, width/2.0, height/2.0, oneband))
resp = self.session.get(url)
if resp.ok:
images.append(fits.open(BytesIO(resp.content)))
if imageonly:
images = np.dstack([hdu[1].data for hdu in images])
return images
def make_rgb_image(self, ra=None, dec=None, width=2.0, height=2.0, band='irg',
stretch=5, Q=8, images=None, **kwargs):
"""
Make RGB image.
Parameters
----------
ra, dec : float
in degrees
width, height : float
in arcseconds
band : string of characters
HSC band names for in RGB order
stretch : float
Linear stretch of HSC RGB image
Q : float
The asinh softening parameter for HSC RGB image
images : ndarray
If not None, will make rgb image using these images
Returns
-------
rgb : ndarry
The RGB image
"""
if images is None:
images = self.fetch_hsc_cutout(ra, dec, width, height, band, **kwargs)
rgb = make_lupton_rgb(images[:, :, 0], images[:, :, 1],
images[:, :, 2], stretch=stretch, Q=Q)
return rgb
def fetch_psf(self, ra, dec, band='i', rerun='s18a_wide'):
"""
Fetch psf at give ra & dec
"""
num = {'s17a_wide':'5', 's18a_wide':'6'}[rerun]
url = self.base_url+'psf/'+num+'/cgi/getpsf?ra={:.6f}&'
url += 'dec={:.6f}&filter={}&rerun={}&tract='
url += 'auto&patch=auto&type=coadd'
url = url.format(ra, dec, band, rerun)
resp = self.session.get(url)
return fits.getdata(BytesIO(resp.content))
def fetch_galex_cutout(ra, dec, size=50, survey='AIS'):
"""
Fetch Galex NUV+FUV cutout image.
Parameters
----------
ra, dec : float
Center of cutout in degress
size : float
Size of cutout in arcsec
survey : str
Galex survey (AIS, MIS, DIS, NGS, GII)
Returns
-------
cutout : PIL.Image.Image
The cutout image
Notes
-----
- adapted from script by https://github.com/wschoenell
(https://gist.github.com/wschoenell/ea27e28f271da9b472e51e890b9477ba)
"""
pixscale = 1.5 # arcsec/pixel
url = 'http://galex.stsci.edu/gxWS/SIAP/gxSIAP.aspx?POS={},{}&SIZE=0'.format(ra, dec)
req = requests.request('GET', url)
data = ET.XML(req.content)
VOTab = '{http://www.ivoa.net/xml/VOTable/v1.1}'
resource = data.find(VOTab+'RESOURCE')
table = resource.find(VOTab+'TABLE').find(VOTab+'DATA').find(VOTab+'TABLEDATA')
survey_idx = np.argwhere(np.array([t[0].text for t in table])==survey)
if len(survey_idx)==0:
print('**** No {} image found at {} {} ****'.format(survey, ra, dec))
return None
fits_url = table[survey_idx[-2][0]][20].text
wcs = WCS(fits.getheader(fits_url))
x, y = wcs.wcs_world2pix(ra, dec, 0)
jpg_url = table[survey_idx[-1][0]][20].text
crop_pix = np.floor(size/pixscale/2.0)
crop = (x - crop_pix, y - crop_pix, x + crop_pix, y + crop_pix)
jpg_img = Image.open(BytesIO(requests.request('GET', jpg_url).content))
return jpg_img.transpose(Image.FLIP_TOP_BOTTOM).crop(crop)
``` |
{
"source": "johnnygreco/hugs-pipe",
"score": 3
} |
#### File: hugs/imtools/kernels.py
```python
from __future__ import division, print_function
import numpy as np
from astropy.convolution import discretize_model
__all__ = ['exp_kern']
def exp_kern(alpha, size, norm=True, mode='center', factor=10):
"""
Generate 2D, radially symmetric exponential kernel The kernels are
discretized using astropy.convolution.discretize_model.
Parameters
----------
alpha : float
The scale length of the exponential.
size : odd int
Number of pixel in x & y directions.
norm_array : bool, optional
If True, normalize the kern array.
mode : str, optional
One of the following discretization modes:
'center', 'oversample', 'linear_interp',
or 'integrate'. See astropy docs for details.
factor : float or int
Factor of oversampling.
Returns
-------
kern : 2D ndarray
The convolution kernel.
"""
assert size%2!=0, 'ERROR: size must be odd'
x_range = (-(int(size) - 1) // 2, (int(size) - 1) // 2 + 1)
model = lambda x, y: np.exp(-np.sqrt(x**2 + y**2)/alpha)
kern = discretize_model(model, x_range, x_range, mode=mode, factor=factor)
if norm:
kern /= kern.sum()
return kern
```
#### File: hugs/pipeline/find_lsbgs.py
```python
from __future__ import division, print_function
import os
import numpy as np
from astropy.table import Table, hstack
from lsst.pipe.base import Struct
from .. import utils
from .. import imtools
from .. import primitives as prim
from ..cattools import xmatch
__all__ = ['run']
def run(cfg):
"""
Run hugs pipeline using SExtractor for the final detection
and photometry.
Parameters
----------
cfg : hugs_pipe.Config
Configuration object which stores all params
as well as the exposure object.
Returns
-------
results : lsst.pipe.base.Struct
Object containing results:
results.all_detections : catalog of all detections
results.sources : catalog of sources we are keeping
results.exp : exposure object for this run
results.exp_clean : cleaned exposure object for this run
results.success : boolean flag of run status
"""
assert cfg.tract and cfg.patch, 'No patch id given!'
cfg.timer # start timer
############################################################
# Get masked image and check if we have enough good data
############################################################
try:
mi = cfg.exp[cfg.band_detect].getMaskedImage()
mask = mi.getMask()
if cfg.exp.patch_meta.good_data_frac < cfg.min_good_data_frac:
cfg.logger.warning('***** not enough data!!! ****')
results = _null_return(cfg)
return results
############################################################
# Image thesholding at low and high thresholds. In both
# cases, the image is smoothed at the psf scale.
############################################################
mi_smooth = imtools.smooth_gauss(mi, cfg.psf_sigma)
cfg.logger.info('performing low threshold at '
'{} sigma'.format(cfg.thresh_low['thresh']))
fpset_low = prim.image_threshold(
mi_smooth, mask=mask, plane_name='THRESH_LOW', **cfg.thresh_low)
cfg.logger.info('performing high threshold at '
'{} sigma'.format(cfg.thresh_high['thresh']))
fpset_high = prim.image_threshold(
mi_smooth, mask=mask, plane_name='THRESH_HIGH', **cfg.thresh_high)
############################################################
# Get "cleaned" image, with noise replacement
############################################################
cfg.logger.info('generating cleaned exposure')
exp_clean = prim.clean(cfg.exp[cfg.band_detect], fpset_low, **cfg.clean)
mi_clean = exp_clean.getMaskedImage()
mask_clean = mi_clean.getMask()
############################################################
# Detect sources and measure props with SExtractor
############################################################
cfg.logger.info('detecting in {}-band'.format(cfg.band_detect))
label = '{}-{}-{}'.format(cfg.tract, cfg.patch[0], cfg.patch[-1])
cfg.logger.info('cleaning non-detection bands')
replace = cfg.exp.get_mask_array(cfg.band_detect)
for band in cfg.bands:
if band!=cfg.band_detect:
mi_band = cfg.exp[band].getMaskedImage()
noise_array = utils.make_noise_image(mi_band, cfg.rng)
mi_band.getImage().getArray()[replace] = noise_array[replace]
sources = Table()
for band in cfg.bands:
cfg.logger.info('measuring in {}-band'.format(band))
dual_exp = None if band==cfg.band_detect else cfg.exp[band]
sources_band = prim.detect_sources(
exp_clean, cfg.sex_config, cfg.sex_io_dir, label=label,
dual_exp=dual_exp, delete_created_files=cfg.delete_created_files,
original_fn=cfg.exp.fn[cfg.band_detect])
if len(sources_band)>0:
sources = hstack([sources, sources_band])
else:
cfg.logger.warn('**** no sources found by sextractor ****')
results = _null_return(cfg, exp_clean)
return results
############################################################
# Verify detections in other bands using SExtractor
############################################################
all_detections = sources.copy()
for band in cfg.band_verify:
cfg.logger.info('verifying dection in {}-band'.format(band))
sources_verify = prim.detect_sources(
cfg.exp[band], cfg.sex_config, cfg.sex_io_dir,
label=label, delete_created_files=cfg.delete_created_files,
original_fn=cfg.exp.fn[cfg.band_detect])
if len(sources_verify)>0:
match_masks, _ = xmatch(
sources, sources_verify, max_sep=cfg.verify_max_sep)
txt = 'cuts: {} out of {} objects detected in {}-band'.format(
len(match_masks[0]), len(sources), band)
cfg.logger.info(txt)
if len(match_masks[0])==0:
cfg.logger.warn('**** no matched sources with '+band+' ****')
results = _null_return(cfg, exp_clean)
return results
sources = sources[match_masks[0]]
else:
cfg.logger.warn('**** no sources detected in '+band+' ****')
results = _null_return(cfg, exp_clean)
return results
mask_fracs = utils.calc_mask_bit_fracs(exp_clean)
cfg.exp.patch_meta.cleaned_frac = mask_fracs['cleaned_frac']
cfg.exp.patch_meta.bright_obj_frac = mask_fracs['bright_object_frac']
cfg.logger.info('task completed in {:.2f} min'.format(cfg.timer))
results = Struct(all_detections=all_detections,
sources=sources,
hugs_exp=cfg.exp,
exp_clean=exp_clean,
success=True,
synths=cfg.exp.synths)
cfg.reset_mask_planes()
return results
except Exception as e:
cfg.logger.critical(
'tract - patch {} - {} failed: {}'.format(cfg.tract, cfg.patch, e))
results = _null_return(cfg)
return results
def _null_return(config, exp_clean=None):
config.reset_mask_planes()
return Struct(all_detections=None,
sources=None,
hugs_exp=config.exp,
exp_clean=exp_clean,
success=False,
synths=None)
```
#### File: hugs-pipe/hugs/plot.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
__all__ = ['plot_sep_sources']
def plot_sep_sources(image, catalog, ec='lime', scale_ell=6, subplots=None,
mark_centers=False, subplot_kw=dict(figsize=(10, 10)),
ell_type='ab', per_lo=1.0, per_hi=99.0, mask=None,
mask_kws=dict(cmap='Blues_r', alpha=0.5)):
fig, ax = subplots if subplots is not None else plt.subplots(**subplot_kw)
if image is not None:
if len(image.shape)==2:
vmin, vmax = np.percentile(image, [per_lo, per_hi])
ax.imshow(image, vmin=vmin, vmax=vmax,
origin='lower', cmap='gray_r')
else:
ax.imshow(image, origin='lower')
ax.set_xlim(0, image.shape[1])
ax.set_ylim(0, image.shape[0])
ax.set(xticks=[], yticks=[])
for src in catalog:
if ell_type == 'ab':
a = src['a']
b = src['b']
elif 'kronrad' in ell_type:
kronrad = src[ell_type]
a = kronrad * src['a']
b = kronrad * src['b']
e = Ellipse((src['x'], src['y']),
width=scale_ell*a,
height=scale_ell*b,
angle=src['theta']*180/np.pi, fc='none', ec=ec)
ax.add_patch(e)
if mark_centers:
ax.plot(x_c, y_c, 'r+')
if mask is not None:
mask = mask.astype(float)
mask[mask==0.0] = np.nan
ax.imshow(mask, **mask_kws)
return fig, ax
```
#### File: hugs-pipe/hugs/randoms.py
```python
from __future__ import division, print_function
import numpy as np
import lsst.geom
__all__ = ['get_mask_array', 'find_randoms_in_footprint']
DEFAULT_PLANES = ['CLEANED', 'BRIGHT_OBJECT', 'NO_DATA']
def get_mask_array(exp, planes=DEFAULT_PLANES):
mask = exp.getMaskedImage().getMask()
arr = np.zeros(mask.getArray().shape, dtype=bool)
for p in planes:
if p in mask.getMaskPlaneDict().keys():
arr |= mask.getArray() & mask.getPlaneBitMask(p) != 0
return arr
def find_randoms_in_footprint(db_fn, exp, return_db=True):
"""
Find random points that fall within patch footprint and
are not masked by hugs-pipe.
Parameters
----------
db_fn : string
Database file name.
exp : lsst.afw.ExposureF
Exposure with WCS and mask.
update_db : bool
If True, update database. Else, return dataframe
Returns
-------
df : pandas.DataFrame
SkyRandoms table with updated detected column.
db : SkyRandomsDatabase, it return_db=True
Database manager.
"""
import lsstutils
from skyrandoms import SkyRandomsDatabase
# load randoms database
db = SkyRandomsDatabase(db_fn)
# query database
corners = lsstutils.bbox_to_radec(exp)
ra_lim = [corners[:, 0].min(), corners[:, 0].max()]
dec_lim = [corners[:, 1].min(), corners[:, 1].max()]
df = db.query_region(ra_lim, dec_lim)
afwcoords = lsstutils.make_afw_coords(df[['ra', 'dec']].values)
# get mask array and find detected randoms
xy0 = exp.getXY0()
wcs = exp.getWcs()
bbox = exp.getBBox()
mask_arr = get_mask_array(exp)
detected = []
for coord in afwcoords:
pixel = wcs.skyToPixel(coord)
if bbox.contains(lsst.geom.Point2I(pixel)):
j, i = pixel - xy0
detected.append(int(not mask_arr[int(i), int(j)]))
else:
detected.append(0)
df['detected'] = detected
return (df, db) if return_db else df
```
#### File: hugs/synths/factory.py
```python
from __future__ import division, print_function
import numpy as np
import pandas as pd
from scipy.signal import fftconvolve
from scipy.special import gammaincinv
import lsst.afw.image
import lsst.geom
from .sersic import Sersic
from ..utils import pixscale, zpt, check_random_state
from ..utils import embed_slices
try:
import galsim
HAS_GALSIM = True
except:
print('galsim not installed --> no inclined disk synths')
HAS_GALSIM = False
__all__ = ['inject_synths']
def _make_galaxy(pset, bbox_num_reff=10, band='i'):
"""
Make synthetic Sersic galaxy.
Parameters
----------
pset : dict, astropy.table.Row
Sersic parameters. Uses imfit's convention:
mu_0, r_e, n, X0, Y0, ell, and PA (except for mu_0)
bbox_num_reff : int, optional
Number of r_eff to extend the bounding box.
band : string, optional
Photometric band (need for central surface brightness).
Returns
-------
galaxy : ndarray
Image with synthetic galaxy.
"""
# convert mu_0 to I_e and r_e to pixels
mu_0 = pset['mu_0_' + band.lower()]
b_n = gammaincinv(2.*pset['n'], 0.5)
mu_e = mu_0 + 2.5*b_n/np.log(10)
I_e = (pixscale**2)*10**((zpt-mu_e)/2.5)
r_e = pset['r_e'] / pixscale
# calculate image shape
side = 2*int(bbox_num_reff*r_e) + 1
img_shape = (side, side)
params = dict(X0=img_shape[1]//2,
Y0=img_shape[0]//2,
I_e=I_e,
r_e=r_e,
n=pset['n'],
ell=pset['ell'],
PA=pset['PA'])
# generate image with synth
model = Sersic(params)
galaxy = model.array(img_shape)
return galaxy
def _make_inclined_disk(src, bbox_num_reff=10, band='i'):
assert HAS_GALSIM, 'you need galsim to make inclined disks'
r_pix = src['r_e'] / pixscale
side = 2*int(bbox_num_reff * r_pix) + 1
nx, ny = (side, side)
incl = src['incl'] * galsim.degrees
model = galsim.InclinedExponential(incl, half_light_radius=src['r_e'],
scale_h_over_r=src['q0'])
model = model.rotate(src['PA'] * galsim.degrees)
flux = 10**(0.4 * (zpt - src[f'm_{band}']))
galaxy = model.drawImage(nx=nx, ny=ny, scale=pixscale).array * flux
return galaxy
def inject_synths(cat, exp, bbox_num_reff=10, band='i', psf_convolve=True,
set_mask=True, return_synths=False, synth_model='sersic'):
image_shape = exp.getDimensions().getY(), exp.getDimensions().getX()
synth_image = np.zeros(image_shape)
# make synthetic image
for src in cat:
if synth_model == 'sersic':
galaxy = _make_galaxy(
src, band=band.lower(), bbox_num_reff=bbox_num_reff)
elif synth_model == 'disk' or synth_model == 'inclined disk':
galaxy = _make_inclined_disk(
src, band=band.lower(), bbox_num_reff=bbox_num_reff)
gal_pos = np.array([int(src['y']), int(src['x'])])
img_slice, gal_slice = embed_slices(gal_pos,
galaxy.shape,
synth_image.shape)
synth_image[img_slice] += galaxy[gal_slice]
if psf_convolve:
psf = exp.getPsf().computeKernelImage().getArray()
synth_image = fftconvolve(synth_image, psf, 'same')
if set_mask:
mask = exp.getMask()
mask.addMaskPlane('SYNTH')
for src in cat:
center = lsst.geom.Point2I(int(src['x']),
int(src['y']))
bbox = lsst.geom.Box2I(center, center)
bbox.grow(20)
bbox.clip(exp.getBBox(lsst.afw.image.LOCAL))
cutout = mask.Factory(mask, bbox, lsst.afw.image.LOCAL)
cutout.getArray()[:] += mask.getPlaneBitMask('SYNTH')
exp.getImage().getArray()[:] += synth_image
if return_synths:
return synth_image
```
#### File: hugs-pipe/scripts/random-patch-completeness.py
```python
from __future__ import division, print_function
import os
from time import time
import mpi4py.MPI as MPI
import schwimmbad
from hugs.pipeline import next_gen_search
from hugs.utils import PatchMeta, project_dir
from astropy.table import vstack
import hugs
def ingest_data(args):
"""
Write data to database with the master process.
"""
timer = time()
success, cats, meta_data = args
sources, recovered, injected, synth_cat = cats
run_name, tract, patch, patch_meta = meta_data
db_ingest = hugs.database.HugsIngest(session, run_name)
if success and (len(sources) > 0):
db_ingest.add_all(tract, patch, patch_meta, sources.to_pandas())
all_recovered.append(recovered)
all_injected.append(injected)
all_synth_cat.append(synth_cat)
else:
failed_patches['tract'].append(tract)
failed_patches['patch'].append(patch)
failed_patches['good_data_frac'].append(patch_meta.good_data_frac)
failed_patches['success'].append(success)
delta_time = time() - timer
print('time to ingest =', delta_time)
def worker(p):
"""
Workers initialize pipe configuration and run pipeline.
"""
rank = MPI.COMM_WORLD.Get_rank()
if p['seed'] is None:
tract, p1, p2 = p['tract'], int(p['patch'][0]), int(p['patch'][-1])
seed = [int(time()), tract, p1, p2, rank]
else:
seed = p['seed']
config = hugs.PipeConfig(run_name=p['run_name'],
config_fn=p['config_fn'],
random_state=seed,
rerun_path=p['rerun_path'])
config.set_patch_id(p['tract'], p['patch'])
config.logger.info('random seed set to {}'.format(seed))
results = next_gen_search.run(config)
pm = results.hugs_exp.patch_meta
patch_meta = PatchMeta(
x0 = pm.x0,
y0 = pm.y0,
small_frac = pm.small_frac,
cleaned_frac = pm.cleaned_frac,
bright_obj_frac = pm.bright_obj_frac,
good_data_frac = pm.good_data_frac
)
meta_data = [
config.run_name,
config.tract,
config.patch,
patch_meta,
]
if results.success:
sources = results.sources
sources['flags'] = sources['flags'].astype(int)
synth_cat = config.synth_cat
synth_cat['tract'] = config.tract
synth_cat['patch'] = config.patch
synth_cat.rename_column('x', 'x_image')
synth_cat.rename_column('y', 'y_image')
(match, match_synth), _ = hugs.cattools.xmatch(
sources, synth_cat, max_sep=config.synth_max_match_sep)
recovered = results.sources[match]
injected = synth_cat[match_synth]
injected['tract'] = config.tract
injected['patch'] = config.patch
txt = '{} injected, {} recovered'.format(len(synth_cat),
len(injected))
config.logger.info(txt)
else:
sources = None
recovered = None
injected = None
synth_cat = None
config.logger.info('passing results to master process')
cats = [sources, recovered, injected, synth_cat]
return results.success, cats, meta_data
if __name__=='__main__':
from argparse import ArgumentParser
from astropy.table import Table
rank = MPI.COMM_WORLD.Get_rank()
config_dir = os.path.join(project_dir, 'pipe-configs')
# parse command-line arguments
parser = ArgumentParser('Run hugs pipeline')
parser.add_argument('-t', '--tract', type=int, help='HSC tract')
parser.add_argument('-p', '--patch', type=str, help='HSC patch')
parser.add_argument('-c', '--config_fn', help='hugs config file',
default=os.path.join(config_dir, 'hugs-run-dev.yml'))
parser.add_argument('--patches_fn', help='patches file')
parser.add_argument('-r', '--run_name', type=str, default='synth-run')
parser.add_argument('--seed', help='rng seed', default=None)
parser.add_argument('--rerun_path', help='full rerun path', default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('--ncores', default=1, type=int,
help='Number of processes (uses multiprocessing).')
group.add_argument('--mpi', default=False, action="store_true",
help="Run with MPI.")
args = parser.parse_args()
config_params = hugs.utils.read_config(args.config_fn)
outdir = config_params['hugs_io']
#######################################################################
# run on a single patch
#######################################################################
if args.tract is not None:
assert args.patch is not None
tract, patch = args.tract, args.patch
patches = Table([[tract], [patch]], names=['tract', 'patch'])
run_dir_name = '{}-{}-{}'.format(args.run_name, tract, patch)
outdir = os.path.join(outdir, run_dir_name)
hugs.utils.mkdir_if_needed(outdir)
log_fn = os.path.join(outdir, 'hugs-pipe.log')
patches['outdir'] = outdir
patches['log_fn'] = log_fn
#######################################################################
# OR run on all patches in file
#######################################################################
elif args.patches_fn is not None:
patches = Table.read(args.patches_fn)
if rank==0:
time_label = hugs.utils.get_time_label()
outdir = os.path.join(
outdir, '{}-{}'.format(args.run_name, time_label))
hugs.utils.mkdir_if_needed(outdir)
log_dir = os.path.join(outdir, 'log')
hugs.utils.mkdir_if_needed(log_dir)
log_fn = []
for tract, patch in patches['tract', 'patch']:
fn = os.path.join(log_dir, '{}-{}.log'.format(tract, patch))
log_fn.append(fn)
patches['outdir'] = outdir
patches['log_fn'] = log_fn
else:
print('\n**** must give tract and patch --or-- a patch file ****\n')
parser.print_help()
exit()
patches['rerun_path'] = args.rerun_path
patches['seed'] = args.seed
patches['config_fn'] = args.config_fn
patches['run_name'] = args.run_name
if rank==0:
# master process lists for results
db_fn = os.path.join(outdir, args.run_name+'.db')
engine = hugs.database.connect(db_fn, True)
session = hugs.database.Session()
all_recovered = []
all_injected = []
all_synth_cat = []
failed_patches = {'tract': [],
'patch': [],
'good_data_frac': [],
'success': []}
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.ncores)
list(pool.map(worker, patches, callback=ingest_data))
pool.close()
if rank==0:
fn = lambda lab: os.path.join(outdir, args.run_name + lab + '.csv')
if len(all_recovered) > 0:
all_recovered = vstack(all_recovered)
all_injected = vstack(all_injected)
all_synth_cat = vstack(all_synth_cat)
all_recovered.write(fn('-recovered'), overwrite=True)
all_injected.write(fn('-injected'), overwrite=True)
all_synth_cat.write(fn('-synth-cat'), overwrite=True)
failed_patches = Table(failed_patches)
failed_patches.write(fn('-failed-patches'), overwrite=True)
``` |
{
"source": "johnnygreco/nerb",
"score": 3
} |
#### File: src/nerb/named_entities.py
```python
from __future__ import annotations
# Standard library
import re
from copy import deepcopy
from dataclasses import dataclass
from typing import Callable, Optional
__all__ = ['NamedEntity', 'NamedEntityList']
@dataclass(frozen=True)
class NamedEntity:
name: str
entity: str
string: str
span: tuple[int, int]
class NamedEntityList:
"""Named entity list class."""
def __init__(self, init_list: Optional[list] = None):
init_list = [] if init_list is None else init_list
self._list = init_list
def append(self, entity: NamedEntity):
"""Append entity to this list, where the element must be of type NamedEntity."""
if not isinstance(entity, NamedEntity):
raise TypeError(
f'{self.__class__.__name__} holds {NamedEntity} objects. You gave {type(entity)}.')
self._list.append(entity)
def copy(self):
return deepcopy(self)
def extend(self, entity_list: NamedEntityList | list[NamedEntity]):
"""Extend list. Similar to the standard python list object, extend takes an iterable as an argument."""
if not isinstance(entity_list, (NamedEntityList, list)):
raise TypeError(
f'Expected object of type {self.__class__.__name__} or list. You gave {type(entity_list)}.'
)
for elem in entity_list:
self.append(elem)
def get_unique_names(self) -> set[str]:
"""Return set of the unique names in this NamedEntityList."""
return set([entity.name for entity in self])
def sort(self, key: Callable, *, reverse: bool = False) -> None:
"""
Sort the list according to the given key. The sort is executed in-place.
Parameters
----------
key : callable (e.g., a lambda function)
Function that defines how the list should be sorted.
reverse : bool, optional
If True, sort in descending order.
"""
self._list.sort(key=key, reverse=reverse)
def __add__(self, other: NamedEntityList):
"""Define what it means to add two list objects together."""
concatenated_list = list(self) + list(other)
return self.__class__(concatenated_list)
def __getitem__(self, item):
if isinstance(item, list):
return self.__class__([self._list[i] for i in item])
elif isinstance(item, slice):
return self.__class__(self._list[item])
else:
return self._list[item]
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __repr__(self):
repr = '\n'.join([f'[{i}] {p.__repr__()}' for i, p in enumerate(self)])
repr = re.sub(r'^', ' ' * 4, repr, flags=re.M)
repr = f'(\n{repr}\n)' if len(self) > 0 else f'([])'
return f'{self.__class__.__name__}{repr}'
```
#### File: src/nerb/regex_builder.py
```python
from __future__ import annotations
# Standard library
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional
# Project
from . import utils
from .named_entities import NamedEntity, NamedEntityList
__all__ = ['NERB']
class NERB:
"""
Named Entity Regex Builder (NERB): Streamlined named catupre groups.
Parameters
----------
pattern_config : Path or str or dict
Configuration with the named entities and regex patterns. If
Path or str, must be the full path to a yaml config file.
flags : re.RegexFlag or int, optional
Regular Expresion flags to be applied to all
compiled regex (default: re.IGNORECASE).
add_word_boundaries : bool, optional
If True, add word boundaries to all terms in the regex
patterns (default: True).
Examples
--------
A pattern_config dict for a music document might look like this:
pattern_config = dict(
ARTIST = {
'Coheed': r'coheed(?:\sand\scambria)?',
'Thelonious Monk': r'thelonious\smonk',
},
GENRE = {
'Rock': r'(?:(?:progressive|alternative|punk)\s)?rock|rock\s(?:and\s)roll',
'Jazz': r'(?:smooth\s)?jazz',
'Hip Hop': r'rap|hip\shop',
'Pop': r'pop(?:ular)?'
}
)
This pattern config will create a NERB instance with ARTIST and GENRE entities, which
are accessible via compiled regex attributes composed of named capture groups.
"""
def __init__(
self,
pattern_config: Path | str | dict[str, dict[str, str]],
add_word_boundaries: bool = True
):
self.add_word_boundaries = add_word_boundaries
if isinstance(pattern_config, (Path, str)):
self.pattern_config = utils.load_yaml_config(pattern_config)
elif isinstance(pattern_config, dict):
self.pattern_config = deepcopy(pattern_config)
else:
raise TypeError(
f'{type(pattern_config)} is not a valid type for pattern_config. '
'Must be of type Path, str, or dict.'
)
self._build_regex()
@staticmethod
def _add_word_boundaries(pattern: str) -> str:
"""
Add word boundaries to every term within the given pattern.
Parameters
----------
pattern : str
Regex pattern with terms that need word boundaries.
Returns
-------
pattern : str
Modified regex pattern with word boundaries around every term.
"""
pattern = re.sub(r'\|(?![^(]*\))', r'\\b|\\b', pattern)
pattern = r'{b}{r}{b}'.format(b=r'\b', r=pattern)
return pattern
def _build_regex(self):
"""Build and compile vocab regex patterns."""
for entity in self.pattern_config.keys():
# Get flags. Pop '_flags' keyword if it exists.
flags = self._generate_regex_flags(entity)
term_dict = {}
setattr(self, f'{entity}_names', list(self.pattern_config[entity].keys()))
for name, pattern in self.pattern_config[entity].items():
# Add word boundaries to all terms.
pattern = self._add_word_boundaries(pattern) if self.add_word_boundaries else fr'{pattern}'
term_dict[name.replace(' ', '_')] = pattern
# Build final pattern and compile regex.
pattern = '|'.join([fr'(?P<{k}>{v})' for k, v in term_dict.items()])
setattr(self, entity, re.compile(pattern, flags=flags))
def _generate_regex_flags(self, entity: str) -> re.RegexFlag:
"""Generate regex flags from input config if the '_flags' parameter is given."""
flags = self.pattern_config[entity].pop('_flags', 0)
if not isinstance(flags, int):
flags = flags if isinstance(flags, list) else [flags]
combined_flags = getattr(re, flags[0].upper())
for flag in flags[1:]:
combined_flags |= getattr(re, flag.upper())
flags = combined_flags
return flags
@property
def entity_list(self):
return list(self.pattern_config.keys())
def extract_named_entity(self, entity: str, text: str) -> NamedEntityList:
"""
Extract names of the given entity group from the given text.
Parameters
----------
entity : str
Entity to extract from text.
text : str
Text from which to extract the given entity.
Returns
-------
named_entity_list: NamedEntityList
List of extracted named entities.
"""
if not hasattr(self, entity):
raise AttributeError(f'This NERB instance does not have a compiled regex called {entity}.')
regex = getattr(self, entity)
named_entity_list = NamedEntityList()
for match in regex.finditer(text):
name = match.lastgroup.replace('_', ' ')
named_entity_list.append(
NamedEntity(entity=entity, name=name, string=match.group(), span=match.span())
)
return named_entity_list
def isolate_named_capture_group(
self,
entity: str,
name: str,
text: str,
method: str = 'search'
) -> Optional[re.Match | list[re.Match] | list[tuple[str]]]:
"""
Apply regex method to the given compiled regex attribute, isolating the results for the
given named capture group.
Parameters
----------
entity : str
Entity compiled regex attribute that contains the named capture group.
name : str
Named capture group to be isolated.
text : str
The regex method will be applied to this text.
method : str, optional
Regex method to be applied to the given text (search, finditer, or findall).
Returns
-------
result : match object, list of match objects, list of tuples, or None
Result from applying the given regex method. If no match is found,
None will be returned.
Note
----
Normally, `finditer` returns an iterator. However, if you select this method here,
we need to loop over all the matches, so the results will be returned as a list.
"""
result = None
regex = getattr(self, entity)
named_groups = list(regex.groupindex.keys())
name = name.replace(' ', '_')
if name not in named_groups:
raise KeyError(f"'{name}' is not a valid group name for '{entity}'. "
f'Allowed values: {named_groups}.')
if method == 'search':
# The search method returns the first occurrence of the pattern.
for m in regex.finditer(text):
if m.lastgroup == name:
result = m
break
elif method == 'finditer':
matches = [m for m in regex.finditer(text) if m.lastgroup == name]
if len(matches) > 0:
result = matches
elif method == 'findall':
group_idx = regex.groupindex[name] - 1
matches = [m for m in regex.findall(text) if m[group_idx] != '']
if len(matches) > 0:
result = matches
else:
raise NameError(
f"'{method}' is not a valid regex method. Allowed values: search, finditer, or findall."
)
return result
def __repr__(self):
return f'{self.__class__.__name__}(entities: {self.entity_list.__repr__()})'
```
#### File: src/nerb/utils.py
```python
from __future__ import annotations
# Standard library
from pathlib import Path
# Third-party
import yaml
__all__ = ['load_yaml_config']
def load_yaml_config(file_path: str | Path) -> dict:
"""
Parameters
----------
file_path : str or Path
Yaml config file name. The file is assumed to be in
the repo's config directory.
Returns
-------
config : dict
Configuration parameters stored in a dictionary.
"""
file_path = Path(file_path)
with open(file_path) as file:
config = yaml.load(file, Loader=yaml.CLoader)
return config
``` |
{
"source": "johnnygreco/pcmdpy",
"score": 2
} |
#### File: pcmdpy/aws/create-batch-entities.py
```python
import boto3
import argparse
import time
import sys
batch = boto3.client(
service_name='batch',
region_name='us-east-2',
endpoint_url='https://batch.us-east-2.amazonaws.com')
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--compute-environment", help="name of the compute environment", type=str, required=True)
parser.add_argument("--jobdef-name", help="name of the job definition", type=str, required=True)
parser.add_argument("--subnets", help="comma delimited list of subnets", type=str, default='subnet-3e880473,subnet-823978f9,subnet-126e6f7b')
parser.add_argument("--security-groups", help="comma delimited list of security group ids", type=str, default='sg-2e00be46')
parser.add_argument("--instance-role", help="instance role", type=str, default='arn:aws:iam::845819056159:instance-profile/ecsInstanceRole')
parser.add_argument("--service-role", help="service role", type=str, default='arn:aws:iam::845819056159:role/AWSBatchServiceRole')
parser.add_argument("--image-id", help="image id", type=str, default='ami-a62f07c3')
parser.add_argument("--key-pair", help="ec2 key pair", type=str, default='Cook_Anaconda')
parser.add_argument("--max-nodes", help='max number of nodes', type=int, default=4)
args = parser.parse_args()
spin = ['-', '/', '|', '\\', '-', '/', '|', '\\']
def create_compute_environment(computeEnvironmentName, instanceType, maxVCpus, imageId, serviceRole, instanceRole,
subnets, securityGroups, keyPair):
response = batch.create_compute_environment(
computeEnvironmentName=computeEnvironmentName,
type='MANAGED',
serviceRole=serviceRole,
computeResources={
'type': 'EC2',
'imageId': imageId,
'minvCpus': 0,
'maxvCpus': maxVCpus,
'desiredvCpus': 4,
'instanceTypes': [instanceType],
'subnets': subnets,
'securityGroupIds': securityGroups,
'ec2KeyPair': keyPair,
'instanceRole': instanceRole
}
)
spinner = 0
while True:
describe = batch.describe_compute_environments(computeEnvironments=[computeEnvironmentName])
computeEnvironment = describe['computeEnvironments'][0]
status = computeEnvironment['status']
if status == 'VALID':
print('\rSuccessfully created compute environment %s' % (computeEnvironmentName))
break
elif status == 'INVALID':
reason = computeEnvironment['statusReason']
raise Exception('Failed to create compute environment: %s' % (reason))
print '\rCreating compute environment... %s' % (spin[spinner % len(spin)]),
sys.stdout.flush()
spinner += 1
time.sleep(1)
return response
def create_job_queue(computeEnvironmentName):
jobQueueName = computeEnvironmentName + '_queue'
response = batch.create_job_queue(jobQueueName=jobQueueName,
priority=0,
computeEnvironmentOrder=[
{
'order': 0,
'computeEnvironment': computeEnvironmentName
}
])
spinner = 0
while True:
describe = batch.describe_job_queues(jobQueues=[jobQueueName])
jobQueue = describe['jobQueues'][0]
status = jobQueue['status']
if status == 'VALID':
print('\rSuccessfully created job queue %s' % (jobQueueName))
break
elif status == 'INVALID':
reason = jobQueue['statusReason']
raise Exception('Failed to create job queue: %s' % reason)
print '\rCreating job queue... %s' % (spin[spinner % len(spin)]),
sys.stdout.flush()
spinner += 1
time.sleep(1)
return response
def register_job_definition(jobDefName, image, unitVCpus, unitMemory):
response = batch.register_job_definition(jobDefinitionName=jobDefName,
type='container',
containerProperties={
'image': image,
'vcpus': 1,
'memory': 2048,
'privileged': True,
'volumes': [
{
'host': {
'sourcePath': '/var/lib/nvidia-docker/volumes/nvidia_driver/latest'
},
'name': 'nvidia'
}
],
'mountPoints': [
{
'containerPath': '/usr/local/nvidia',
'readOnly': False,
'sourceVolume': 'nvidia'
}
],
"command": ["./pcmdpy_run.sh",
"Ref::config_file",
"Ref::data_file",
"Ref::results_file",
"Ref::stdout_file",
"Ref::stderr_file"
]
})
print 'Created job definition %s' % response['jobDefinitionName']
return response
def main():
computeEnvironmentName = args.compute_environment
jobDefName = args.jobdef_name
imageId = args.image_id
serviceRole = args.service_role
instanceRole = args.instance_role
subnets = args.subnets.split(",")
securityGroups = args.security_groups.split(",")
keyPair = args.key_pair
# vcpus and memory in a p2.xlarge
unitMemory = 61000
unitVCpus = 4
maxMemory = args.max_nodes * unitMemory
maxVCpus = args.max_nodes * unitVCpus
create_compute_environment(computeEnvironmentName=computeEnvironmentName,
instanceType='p2.xlarge',
maxVCpus=maxVCpus,
imageId=imageId,
serviceRole=serviceRole,
instanceRole=instanceRole,
subnets=subnets,
securityGroups=securityGroups,
keyPair=keyPair)
create_job_queue(computeEnvironmentName)
register_job_definition(jobDefName=jobDefName, image='bacook17/pcmdpy_gpu', unitVCpus=unitVCpus, unitMemory=unitMemory)
print 'Successfully created batch entities for %s (compute environment, job queue, job definition)'%computeEnvironmentName
if __name__ == "__main__":
main()
```
#### File: pcmdpy/aws/submit_mock.py
```python
import argparse
from batch_utils import submitJob
import sys
def submit_mock(run_name, job_queue, job_definition, config_file,
region='us-east-1', verbose=True):
parameters = {'config_file': config_file,
'run_name': run_name}
kwargs = {'verbose': verbose, 'region': region, 'parameters': parameters}
return submitJob(run_name, job_queue, job_definition, **kwargs)
if __name__ == '__main__':
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=formatter)
parser.add_argument("--array-job",
help="submit an array of jobs, defined by files",
action='store_true')
parser.add_argument("--run-name", help="name of the run", type=str,
required=True)
parser.add_argument("--config-file", help="the configuration file",
type=str, required=True)
parser.add_argument("--job-queue",
help="name of the job queue to submit this job",
type=str, default="pcmdpy_queue_p2")
parser.add_argument("--job-definition", help="name of the job definition",
type=str, default="pcmdpy_data")
parser.add_argument("--region", help="AWS region to submit job to",
type=str, default='us-east-1')
parser.add_argument("--quiet",
help="silence printing of job metadata to STDOUT",
action='store_true')
args = parser.parse_args()
verbose = ~args.quiet
if args.array_job:
if verbose:
print('Submitting array job')
with open(args.run_name, 'r') as f:
run_names = f.readlines()
with open(args.config_file, 'r') as f:
config_files = f.readlines()
if len(run_names) != len(config_files):
print(('number of run_names in {:s} does not match number of'
'config_files in {:s}. Exiting').format(args.run_name,
args.config_file))
sys.exit(1)
for rn, cf in zip(run_names, config_files):
run_name = rn.strip('\n')
config_file = cf.strip('\n')
submit_mock(run_name, args.job_queue, args.job_definition,
config_file, region=args.region, verbose=verbose)
else:
if verbose:
print('Submitting single job')
submit_mock(args.run_name, args.job_queue, args.job_definition,
args.config_file,
region=args.region, verbose=verbose)
```
#### File: pcmdpy/data/sources.py
```python
import numpy as np
from astropy.io import fits
import sep
import pyregion
from .utils import ALL_FLAGS, regions_to_mask, _add_masked
from .alignment import _add_flag
def estimate_background(image_file, bkg_kwargs={}):
"""
Estimate the smooth background of an image
"""
hdulist = fits.open(image_file, mode='update')
image = hdulist['SCI'].data
kwargs = {}
kwargs['bw'] = kwargs['bh'] = 8 # size of background boxes
kwargs['fw'] = kwargs['fh'] = 3 # size of the filters
kwargs['fthresh'] = 0 # filter threshold
kwargs.update(bkg_kwargs)
bkg = sep.Background(image.astype(np.float64),
**bkg_kwargs)
bkg_hdu = fits.ImageHDU(data=bkg.back())
bkg_hdu.header['EXTNAME'] = 'BKGD'
rms_hdu = fits.ImageHDU(data=bkg.rms())
rms_hdu.header['EXTNAME'] = 'BKGDRMS'
sub_hdu = fits.ImageHDU(data=(image / bkg.back()))
sub_hdu.header['EXTNAME'] = 'BKGDSUB'
for h in [bkg_hdu.header, rms_hdu.header, sub_hdu.header, hdulist['FLAGS'].header]:
h.add_history('SExtractor Background Params:')
h.add_history(' Background Box Size (pixels): {:d}'.format(kwargs['bw']))
h.add_history(' Background Filter Size (pixels): {:d}'.format(kwargs['fw']))
h.add_history(' Background Filter Threshold (pixels): {:.2f}'.format(kwargs['fthresh']))
h['GLOBBKG'] = bkg.globalback
h['GLOBRMS'] = bkg.globalrms
for k, v in kwargs.items():
if k not in ['bw', 'bh', 'fw', 'fh', 'fthresh']:
h.add_history(' {:}: {:}'.format(k, v))
if 'BKGD' in hdulist:
hdulist.pop('BKGD')
if 'BKGDRMS' in hdulist:
hdulist.pop('BKGDRMS')
if 'BKGDSUB' in hdulist:
hdulist.pop('BKGDSUB')
hdulist.insert(-1, sub_hdu)
hdulist.insert(-1, rms_hdu)
hdulist.insert(-1, bkg_hdu)
hdulist[0].header['BKGDCOMP'] = "COMPLETE"
hdulist.close()
return bkg_hdu
def mask_sources_manual(image_file, region_file):
hdulist = fits.open(image_file, mode='update')
if 'FLAGS' not in hdulist:
hdulist.close()
_add_flag(image_file)
hdulist = fits.open(image_file, mode='update')
mask = regions_to_mask(region_file, image_file)
# Unset pixels already flagged
old_mask = (hdulist['FLAGS'].data & ALL_FLAGS['MANUAL']).astype(np.bool)
hdulist['FLAGS'].data[old_mask] -= ALL_FLAGS['MANUAL']
hdulist['FLAGS'].data[mask] += ALL_FLAGS['MANUAL']
h = hdulist['FLAGS'].header
h.add_history('Manual Regions Masked from file:')
h.add_history(' {:s}'.format(region_file))
hdulist[0].header['MANUAL'] = "COMPLETE"
hdulist.close()
_add_masked(image_file)
def mask_sources_auto(image_file, threshold=10.0, r_scale=10.0,
global_rms=False, max_npix_object=500,
obj_kwargs={}, **kwargs):
hdulist = fits.open(image_file, mode='update')
if 'FLAGS' not in hdulist:
hdulist.close()
_add_flag(image_file)
hdulist = fits.open(image_file, mode='update')
if 'BKGD' not in hdulist:
hdulist.close()
estimate_background(image_file, kwargs.get('bkg_kwargs', {}))
hdulist = fits.open(image_file, mode='update')
kwargs = {
'thresh': threshold,
'minarea': 9,
}
kwargs.update(obj_kwargs)
image = np.copy(hdulist['SCI'].data)
sub_im = image - hdulist['BKGD'].data
# Undo previous SEXTRACTOR source masks
old_mask = (hdulist['FLAGS'].data & ALL_FLAGS['SEXTRACTOR']).astype(np.bool)
hdulist['FLAGS'].data[old_mask] -= ALL_FLAGS['SEXTRACTOR']
mask = np.zeros_like(image, dtype=np.bool)
if global_rms:
err = hdulist['BKGD'].header['GLOBRMS']
else:
err = hdulist['BKGDRMS'].data.byteswap().newbyteorder()
objects = sep.extract(sub_im, err=err,
mask=mask, segmentation_map=False,
**kwargs)
to_use = (objects['npix'] < max_npix_object)
mask = np.zeros_like(image, dtype=np.bool)
sep.mask_ellipse(mask, objects['x'][to_use], objects['y'][to_use],
objects['a'][to_use], objects['b'][to_use],
objects['theta'][to_use], r=r_scale)
# unset pixels already flagged
hdulist['FLAGS'].data[mask] += ALL_FLAGS['SEXTRACTOR']
h = hdulist['FLAGS'].header
h.add_history('SExtractor Regions Masked')
h.add_history(' Detection Threshold (sigma): {:.2f}'.format(kwargs['thresh']))
h.add_history(' Min Source Size (pixels): {:d}'.format(kwargs['minarea']))
hdulist[0].header['SEXTRACT'] = "COMPLETE"
hdulist.close()
_add_masked(image_file)
```
#### File: pcmdpy/data/utils.py
```python
import numpy as np
from astropy.io import fits
from ..instrument.filter import AVAILABLE_FILTERS
import pyregion
import pysynphot as pyS
ALL_FLAGS = {
'CTX': 2**0,
'EXPOSURE': 2**1,
'DARK': 2**2,
'SEXTRACTOR': 2**3,
'MANUAL': 2**4,
}
def regions_to_mask(region_file, image_file):
hdulist = fits.open(image_file)
mask = pyregion.open(region_file).as_imagecoord(
header=hdulist[0].header).get_mask(shape=hdulist['SCI'].shape)
return mask
def _add_masked(image_file, mask_val=np.nan,
mask_flags=ALL_FLAGS.values()):
hdulist = fits.open(image_file, mode='update')
if 'MASKED' in hdulist:
hdulist.pop('MASKED')
clean_hdu = hdulist['SCI'].copy()
clean_hdu.header['EXTNAME'] = 'MASKED'
mask = np.zeros_like(hdulist['FLAGS'].data, dtype=bool)
for f in mask_flags:
mask |= (hdulist['FLAGS'].data & f).astype(np.bool)
clean_hdu.data[mask] = mask_val
if 'BKGDSUB' in hdulist:
if 'MASKEDSUB' in hdulist:
hdulist.pop('MASKEDSUB')
sub_hdu = hdulist['BKGDSUB'].copy()
sub_hdu.header['EXTNAME'] = 'MASKEDSUB'
sub_hdu.data[hdulist['FLAGS'].data > 0] = mask_val
hdulist.insert(2, sub_hdu)
hdulist.insert(2, clean_hdu)
hdulist.close()
def combine_flags(file_dict):
all_filters = list(file_dict.keys())
h1 = fits.open(file_dict[all_filters[0]])
flags = np.zeros_like(h1['FLAGS'].data, dtype=np.int32)
h1.close()
for i, filt in enumerate(all_filters):
with fits.open(file_dict[filt]) as h:
flags += 2**i * h['FLAGS'].data
for filt in all_filters:
with fits.open(file_dict[filt], mode='update') as h:
h['FLAGS'].data = flags
_add_masked(file_dict[filt])
def compute_zpts(instrument, detector, band, mjd):
bandpass = pyS.ObsBandpass('{:s},{:s},{:s},mjd#{:d}'.format(instrument, detector, band, mjd))
spec_bb = pyS.BlackBody(50000)
spec_bb_norm = spec_bb.renorm(1, 'counts', bandpass)
obs = pyS.Observation(spec_bb_norm, bandpass)
zps = {}
zps['vega'] = obs.effstim('vegamag')
zps['st'] = obs.effstim('stmag')
zps['ab'] = obs.effstim('abmag')
return zps
def filter_from_fits(file_name):
with fits.open(file_name) as hdu:
header = hdu[0].header
instrument = header['INSTRUME'].lower().strip(' ')
detector = header['DETECTOR'].lower().strip(' ')
if detector == 'wfc':
detector = 'wfc1'
band = None
for k in ['FILTER', 'FILTER1', 'FILTER2']:
if k in header:
b_temp = header[k]
if 'CLEAR' in b_temp:
continue
else:
band = b_temp
break
if band is None:
raise KeyError('Unable to identify filter from FITS file')
mjd = int(header['EXPSTART'])
exposure = header['EXPTIME']
zpts = compute_zpts(instrument, detector, band, mjd)
if band in AVAILABLE_FILTERS:
filt = AVAILABLE_FILTERS[band](
exposure=exposure,
zpt_vega=zpts['vega'],
zpt_ab=zpts['ab'],
zpt_st=zpts['st'])
else:
filt = None
print('Filter: {:s}'.format(band))
print('Observation Date: {:d} (MJD)'.format(mjd))
print('Vega ZeroPoint: {:.4f}'.format(zpts['vega']))
print('AB ZeroPoint: {:.4f}'.format(zpts['ab']))
print('ST ZeroPoint: {:.4f}'.format(zpts['st']))
print('Exposure Time: {:.1f}'.format(exposure))
if filt is not None:
print('A pre-made filter is available')
else:
print("A custom filter must be made (no matching filter found)")
return filt
```
#### File: pcmdpy/galaxy/galaxy.py
```python
__all__ = ['CustomGalaxy', 'SSPSimple', 'TauSimple',
'NonParamSimple', 'TauFull', 'NonParamFull']
import numpy as np
from ..sampling import priors
from .metalmodels import (BaseMetalModel, get_metal_model)
from .sfhmodels import (BaseSFHModel, get_sfh_model)
from .distancemodels import (BaseDistanceModel, get_distance_model)
from .dustmodels import (BaseDustModel, get_dust_model)
from .imf import (salpeter_IMF, kroupa_IMF, salpeter_meanmass,
kroupa_meanmass)
class CustomGalaxy:
def __init__(self, metal_model, dust_model, sfh_model, distance_model,
mdf_sig=None,
dust_sig=None,
dmod=None,
imf='salpeter',
imf_kwargs={},
initial_params=None):
# set the metallicity model
if not isinstance(metal_model, BaseMetalModel):
kwargs = {}
if mdf_sig is not None:
kwargs['sig'] = mdf_sig
metal_model = get_metal_model(metal_model, **kwargs) # parse a passed string
self.metal_model = metal_model
# set the dust model
if not isinstance(dust_model, BaseDustModel):
kwargs = {}
if dust_sig is not None:
kwargs['sig'] = dust_sig
dust_model = get_dust_model(dust_model, **kwargs)
self.dust_model = dust_model
# set the SFH model
if not isinstance(sfh_model, BaseSFHModel):
sfh_model = get_sfh_model(sfh_model)
self.sfh_model = sfh_model
# set the distance modulus
if not isinstance(distance_model, BaseDistanceModel):
kwargs = {}
if dmod is not None:
kwargs['dmod'] = dmod
distance_model = get_distance_model(distance_model, **kwargs)
self.distance_model = distance_model
# set the IMF model
self._imf = imf
self._imf_kwargs = imf_kwargs
if imf.lower() == 'salpeter':
self.imf_func = salpeter_IMF
kws = imf_kwargs.copy()
kws.pop('norm_by_mass', False)
self.meanmass = salpeter_meanmass(**kws)
self.imf_kwargs = imf_kwargs
elif imf.lower() == 'kroupa':
self.imf_func = kroupa_IMF
kws = imf_kwargs.copy()
kws.pop('norm_by_mass', False)
self.meanmass = kroupa_meanmass(**kws)
self.imf_kwargs = imf_kwargs
else:
raise NotImplementedError('Only salpeter and kroupa '
'IMFs are currently implemented')
if initial_params is not None:
self.set_params(initial_params)
else:
if None not in self._params:
self.set_params(self._params)
def set_params(self, gal_params):
# make sure is array, with right length
assert(len(gal_params) == self.p_total)
gal_params = np.array(gal_params, dtype=float)
# set metal parameters
feh_params = gal_params[:self.p_feh]
self.metal_model.set_params(feh_params)
# set dust parameters
dust_params = gal_params[self.p_feh:self.p_feh+self.p_dust]
self.dust_model.set_params(dust_params)
# set sfh parameters
sfh_params = gal_params[self.p_feh+self.p_dust:
self.p_feh+self.p_dust+self.p_sfh]
self.sfh_model.set_params(sfh_params)
# set distance parameters
if self.p_distance > 0:
dist_mod = gal_params[-self.p_distance:]
self.distance_model.set_params(dist_mod)
def get_flat_prior(self, feh_bounds=None, dust_bounds=None,
sfh_bounds=None, dmod_bounds=None, **kwargs):
if feh_bounds is None:
bounds = self.metal_model._default_prior_bounds
else:
assert(len(feh_bounds) == self.p_feh)
bounds = feh_bounds
if dust_bounds is None:
bounds += self.dust_model._default_prior_bounds
else:
assert(len(dust_bounds) == self.p_dust)
bounds += dust_bounds
# for backwards compatability, allow "age_bounds" in place of "sfh_bounds"
sfh_bounds = sfh_bounds or kwargs.get('age_bounds', None)
if sfh_bounds is None:
bounds += self.sfh_model._default_prior_bounds
else:
assert(len(sfh_bounds) == self.p_sfh)
bounds += sfh_bounds
if dmod_bounds is None:
bounds += self.distance_model._default_prior_bounds
else:
assert len(dmod_bounds) == self.p_distance
bounds += dmod_bounds
return priors.FlatPrior(bounds)
def copy(self):
new_gal = CustomGalaxy(
self.metal_model.copy(),
self.dust_model.copy(),
self.sfh_model.copy(),
self.distance_model.copy(),
imf=self._imf,
imf_kwargs=self._imf_kwargs,
initial_params=self._params)
return new_gal
def describe(self):
pass
@property
def ages(self):
return np.tile(self.sfh_model.ages, self.metal_model._num_fehs)
@property
def delta_ts(self):
return np.tile(self.sfh_model.delta_ts,
self.metal_model._num_feh_bins)
@property
def fehs(self):
return np.repeat(self.metal_model.fehs, self.sfh_model._num_isochrones)
@property
def SFH(self):
_, feh_weights = self.metal_model.get_vals()
_, sfh_weights = self.sfh_model.get_vals()
return np.outer(feh_weights, sfh_weights).flatten()
@property
def dmod(self):
return self.distance_model.dmod
@property
def d_mpc(self):
"""
Distance to galaxy (in Mpc)
"""
return 10.**(0.2 * (self.dmod - 25.))
@property
def Npix(self):
"""
Number of stars formed per pixel
"""
return np.sum(self.SFH)
@property
def Mpix(self):
"""
Mass of stars formed (in solar masses) per pixel
"""
return self.Npix * self.meanmass
@property
def logSFH(self):
"""
Log10 number of stars formed in each age bin, per pixel
"""
return np.log10(self.SFH)
@property
def logNpix(self):
"""
Log10 number of stars formed per pixel
"""
return np.log10(self.Npix)
@property
def logMpix(self):
"""
Log10 mass of stars (in stellar masses) formed per pixel
"""
return np.log10(self.Mpix)
@property
def SFR(self):
"""
Star-formation rate (in solar masses per Gyr) per pixel
"""
return self.SFH * self.meanmass / self.delta_ts
@property
def logSFR(self):
"""
Log10 star-formation rate (in solar masses per Gyr) per pixel
"""
return np.log10(self.SFR)
@property
def num_SSPs(self):
"""
Number of individual SSPs making up the galaxy model
"""
return len(self.ages)
def iter_SSPs(self):
"""
Iterate through all SSPs making up the galaxy model
Yields
------
age :
feh :
SFH :
dmod :
"""
for i in range(self.num_SSPs):
yield self.ages[i], self.fehs[i], self.SFH[i], self.dmod
@property
def p_feh(self):
return self.metal_model._num_params
@property
def p_dust(self):
return self.dust_model._num_params
@property
def p_sfh(self):
return self.sfh_model._num_params
@property
def p_distance(self):
return self.distance_model._num_params
@property
def p_total(self):
return self.p_feh + self.p_dust + self.p_sfh + self.p_distance
@property
def _params(self):
all_params = []
for mod in [self.metal_model, self.dust_model, self.sfh_model,
self.distance_model]:
all_params += list(mod._params)
return all_params
@property
def _num_params(self):
return self.p_total
@property
def _param_names(self):
all_names = []
for mod in [self.metal_model, self.dust_model, self.sfh_model,
self.distance_model]:
all_names += list(mod._param_names)
return all_names
@property
def _fancy_names(self):
all_names = []
for mod in [self.metal_model, self.dust_model, self.sfh_model,
self.distance_model]:
all_names += list(mod._fancy_names)
return all_names
class TauSimple(CustomGalaxy):
def __init__(self, initial_params=None, dmod=30.):
super().__init__(
metal_model='single',
dust_model='single',
sfh_model='tau',
distance_model='fixed',
dmod=dmod,
initial_params=initial_params)
class SSPSimple(CustomGalaxy):
def __init__(self, initial_params=None, dmod=30.):
super().__init__(
metal_model='single',
dust_model='single',
sfh_model='ssp',
distance_model='fixed',
dmod=dmod,
initial_params=initial_params)
class NonParamSimple(CustomGalaxy):
def __init__(self, initial_params=None, dmod=30.):
super().__init__(
metal_model='single',
dust_model='single',
sfh_model='nonparam',
distance_model='fixed',
dmod=dmod,
initial_params=initial_params)
class TauFull(CustomGalaxy):
def __init__(self, initial_params=None):
super().__init__(
metal_model='fixedwidth',
dust_model='fixedwidth',
sfh_model='tau',
distance_model='variable',
mdf_sig=0.3,
dust_sig=0.2,
initial_params=initial_params)
class NonParamFull(CustomGalaxy):
def __init__(self, initial_params=None):
super().__init__(
metal_model='fixedwidth',
dust_model='fixedwidth',
sfh_model='nonparam',
distance_model='variable',
mdf_sig=0.3,
dust_sig=0.2,
initial_params=initial_params)
```
#### File: pcmdpy/sampling/priors.py
```python
__all__ = ['FlatPrior', 'SSPFlatPrior', 'ConstFlatPrior', 'TauFlatPrior',
'FullFlatPrior']
import numpy as np
# from scipy.stats import dirichlet # , gamma
class FlatPrior(object):
"""
Encapsulates an N-dimensional flat prior as an object
Methods
---------
lnprior(params)
Compute log of prior for given parameters
prior_transform(normed_params)
Convert normalized parameters [0,1] to physical parameters
using the inverse of the prior
"""
def __init__(self, bounds):
"""
Yield a `FlatPrior` object for given bounds
Parameters
----------
bounds : array_like with shape (Ndim, 2)
upper and lower bounds for each dimension
Yields
------
`FlatPrior`
Object representing a `FlatPrior` with given bounds
Raises
------
`ValueError`
If input bounds are not array_like with dimension Nx2
OR if upper bounds are lesser than lower bounds in any dimension
"""
if type(bounds) is not np.ndarray:
bounds = np.array(bounds).astype(float)
if bounds.ndim != 2:
raise ValueError('The input bounds must be Ndim x 2')
if bounds.shape[1] != 2:
raise ValueError('The input bounds must be Ndim x 2')
self.ndim = bounds.shape[0]
self.lower_bounds = bounds[:, 0]
self.upper_bounds = bounds[:, 1]
self.widths = self.upper_bounds - self.lower_bounds
if np.any(self.lower_bounds > self.upper_bounds):
raise ValueError('The upper bounds must be greater than'
'the lower bounds in all dimensions')
def update_bound(self, index, bounds):
self.lower_bounds[index] = bounds[0]
self.upper_bounds[index] = bounds[1]
self.widths = self.upper_bounds - self.lower_bounds
if np.any(self.lower_bounds > self.upper_bounds):
raise ValueError('The upper bounds must be greater than'
'the lower bounds in all dimensions')
def add_bound(self, bounds):
self.lower_bounds = np.append(self.lower_bounds, 0.)
self.upper_bounds = np.append(self.upper_bounds, 0.)
self.update_bound(-1, bounds)
def lnprior(self, params):
"""
Return ln of prior for given parameters. Typically either 0
if inside range or -`~numpy.inf` if outside range.
Parameters
----------
params : array_like
Physical parameters in the space of the prior
Returns
-------
float
Natural log of the computed prior (0 if inside range,
-`numpy.inf` if outside range)
Raises
------
`ValueError`
If length of params doesn't match prior object's dimension
"""
if len(params) != self.ndim:
raise ValueError('len(params) must '
'equal {:d}'.format(self.ndim))
if (np.any(params < self.lower_bounds)
or np.any(params > self.upper_bounds)):
return -np.inf
else:
return 0.
def prior_transform(self, normed_params):
"""
Return physical params corresponding to the normed [0,1] params.
Parameters
----------
normed_params : array_like
Array of unit-scaled [0,1] parameters
Returns
-------
array of floats
Physical parameters represented by normalized parameters
Raises
------
`ValueError`
If length of normed_params doesn't match prior object's dimension
OR if any of normed_params are outside [0,1]
"""
if len(normed_params) != self.ndim:
raise ValueError('len(normed_params) must '
'equal {0:d}. Instead is '
'{1:d}'.format(self.ndim, len(normed_params)))
if np.any(normed_params < 0.) or np.any(normed_params > 1.):
raise ValueError('All normalized parameters must be within [0,1]')
return self.lower_bounds + self.widths*normed_params
def inverse_prior_transform(self, params):
if len(params) != self.ndim:
raise ValueError('len(params) must '
'equal {0:d}. Instead is '
'{1:d}'.format(self.ndim, len(params)))
return (params - self.lower_bounds) / self.widths
class SSPFlatPrior(FlatPrior):
"""
A `FlatPrior` object representing an SSP (Simple Stellar Population)
with 4 free-parameters: metallicity (logzh), dust (log E(B-V)), log_Npix,
and age (in log years).
Corresponds to the `~pcmdpy.galaxy.Galaxy_SSP` model
Methods
---------
lnprior(params)
Compute log of prior for given parameters
prior_transform(normed_params)
Convert normalized parameters [0,1] to physical parameters
using the inverse of the prior
"""
def __init__(self, z_bound=[-2., 0.5], dust_bound=[-3., 0.5],
npix_bound=[-1., 8.], age_bound=[6., 10.3],
dmod_bound=[23.5, 30.]):
"""
Yields an `SSPFlatPrior` object with specified bounds in each
dimension.
Parameters
----------
z_bound : array-like with shape (2,), optional
lower (default -2.) and upper (default 0.5) bounds of metallicity,
in units log_10(z/z_solar).
dust_bound : array-like with shape (2,), optional
lower (default -3.) and upper (default 0.5) bounds of dust
extinction, in units log_10 E(B-V).
npix_bound : array-like with shape (2,), optional
lower (default -1.) and upper (default 8.) bounds of
star-per-pixel, in units log_10 N_pix
age_bound : array-like with shape (2,), optional
lower (default 6.) and upper (default 10.3) bounds of age,
in units log_10 years
dmod_bound : array-like with shape (2,), optional
lower (default 23.5 = 0.5 Mpc) and upper (default 30.0 = 10 Mpc)
bounds of distance modulus
Yields
------
`SSPFlatPrior`
Object representing a flat SSP prior with given bounds
Raises
------
`ValueError`
If any key-word arguments are not array-like with length 2
OR if upper bounds are lesser than lower bounds in any dimension
"""
bounds = np.array([z_bound, dust_bound, npix_bound, age_bound,
dmod_bound])
FlatPrior.__init__(self, bounds)
class ConstFlatPrior(FlatPrior):
"""
A `FlatPrior` object representing a 7-part SFH (Star Formation
History) that assumes constant star-formation, and has 3
free-parameters: metallicity (logzh), dust (log E(B-V)), and log_Npix.
Corresponds to the `~pcmdpy.galaxy.Const_SFR` model
Methods
---------
lnprior(params)
Compute log of prior for given parameters
prior_transform(normed_params)
Convert normalized parameters [0,1] to physical parameters
using the inverse of the prior
"""
def __init__(self, z_bound=[-2., 0.5], dust_bound=[-3., 0.5],
npix_bound=[-1., 8.], dmod_bound=[23.5, 30.]):
"""
Yields a `ConstFlatPrior` object with specified bounds in each
dimension.
Parameters
----------
z_bound : array-like (length 2), optional
lower (default -2.) and upper (default 0.5) bounds of metallicity,
in units log_10(z/z_solar).
dust_bound : array-like (length 2), optional
lower (default -3.) and upper (default 0.5) bounds of dust
extinction, in units log_10 E(B-V).
npix_bound : array-like (length 2), optional
lower (default -1.) and upper (default 8.) bounds of
star-per-pixel, in units log_10 N_pix
dmod_bound : array-like with shape (2,), optional
lower (default 23.5 = 0.5 Mpc) and upper (default 30.0 = 10 Mpc)
bounds of distance modulus
Yields
------
`ConstFlatPrior`
Object representing a flat prior with given bounds
Raises
------
`ValueError`
If any key-word arguments are not array-like with length 2
OR if upper bounds are lesser than lower bounds in any dimension
"""
bounds = np.array([z_bound, dust_bound, npix_bound, dmod_bound])
FlatPrior.__init__(self, bounds)
class TauFlatPrior(FlatPrior):
"""
A `FlatPrior` object representing a 7-part SFH (Star Formation
History) that assumes a tau-model star-formation history, and has 4
free-parameters: metallicity (logzh), dust (log E(B-V)), log_Npix,
and tau (in Gyrs).
Corresponds to the `~pcmdpy.galaxy.Tau_Model` model
Methods
---------
lnprior(params)
Compute log of prior for given parameters
prior_transform(normed_params)
Convert normalized parameters [0,1] to physical parameters
using the inverse of the prior
"""
def __init__(self, z_bound=[-2., 0.5], dust_bound=[-3., 0.5],
npix_bound=[-1., 8.], tau_bound=[.1, 20.],
dmod_bound=[23.5, 30.]):
"""
Yields a `TauFlatPrior` object with specified bounds in each dimension.
Parameters
----------
z_bound : array-like with shape (2,), optional
lower (default -2.) and upper (default 0.5) bounds of metallicity,
in units log_10(z/z_solar).
dust_bound : array-like with shape (2,), optional
lower (default -3.) and upper (default 0.5) bounds of dust
extinction, in units log_10 E(B-V).
npix_bound : array-like with shape (2,), optional
lower (default -1.) and upper (default 8.) bounds of
star-per-pixel, in units log_10 N_pix
tau_bound : array-like with shape (2,), optional
lower (default 0.1) and upper (default 20.) bounds of tau, the
star-formation timescale, in units of Gyrs
dmod_bound : array-like with shape (2,), optional
lower (default 23.5 = 0.5 Mpc) and upper (default 30.0 = 10 Mpc)
bounds of distance modulus
Yields
------
`TauFlatPrior`
Object representing a flat prior with given bounds
Raises
------
Value Error
If any key-word arguments are not array-like with length 2
OR if upper bounds are lesser than lower bounds in any dimension
"""
bounds = np.array([z_bound, dust_bound, npix_bound, tau_bound,
dmod_bound])
FlatPrior.__init__(self, bounds)
class TauMDFFlatPrior(FlatPrior):
"""
A `FlatPrior` object representing a 7-part SFH (Star Formation
History) that assumes a tau-model star-formation history, and has 4
free-parameters: metallicity (logzh), dust (log E(B-V)), log_Npix,
and tau (in Gyrs).
Corresponds to the `~pcmdpy.galaxy.Tau_Model` model
Methods
---------
lnprior(params)
Compute log of prior for given parameters
prior_transform(normed_params)
Convert normalized parameters [0,1] to physical parameters
using the inverse of the prior
"""
def __init__(self, z_bound=[-2., 0.5], sigz_bound=[0., 1.],
dust_bound=[-3., 0.5], npix_bound=[-1., 8.],
tau_bound=[.1, 20.], dmod_bound=[23.5, 30.]):
"""
Yields a `TauMDFFlatPrior` object with specified bounds in each dimension.
Parameters
----------
z_bound : array-like with shape (2,), optional
lower (default -2.) and upper (default 0.5) bounds of metallicity,
in units log_10(z/z_solar).
dust_bound : array-like with shape (2,), optional
lower (default -3.) and upper (default 0.5) bounds of dust
extinction, in units log_10 E(B-V).
npix_bound : array-like with shape (2,), optional
lower (default -1.) and upper (default 8.) bounds of
star-per-pixel, in units log_10 N_pix
tau_bound : array-like with shape (2,), optional
lower (default 0.1) and upper (default 20.) bounds of tau, the
star-formation timescale, in units of Gyrs
dmod_bound : array-like with shape (2,), optional
lower (default 23.5 = 0.5 Mpc) and upper (default 30.0 = 10 Mpc)
bounds of distance modulus
Yields
------
`TauFlatPrior`
Object representing a flat prior with given bounds
Raises
------
Value Error
If any key-word arguments are not array-like with length 2
OR if upper bounds are lesser than lower bounds in any dimension
"""
bounds = np.array([z_bound, sigz_bound, dust_bound, npix_bound,
tau_bound, dmod_bound])
FlatPrior.__init__(self, bounds)
class FullFlatPrior(FlatPrior):
"""A `FlatPrior` object representing a fully non-parametric, 7-part SFH
(Star Formation History), which has 9 free-parameters: metallicity (logzh),
dust (log E(B-V)), and 7 star-formation history bins (log sfhX).
Corresponds to the `~pcmdpy.galaxy.Galaxy_Model` model
Methods
---------
lnprior(params)
Compute log of prior for given parameters
prior_transform(normed_params)
Convert normalized parameters [0,1] to physical parameters
using the inverse of the prior
"""
def __init__(self, z_bound=[-2., 0.5], dust_bound=[-3., 0.5],
sfh0_bound=[-3.1, -1.1], sfh1_bound=[-2.1, -0.1],
sfh2_bound=[-1.7, 0.3], sfh3_bound=[-1.2, 0.8],
sfh4_bound=[-0.5, 1.5], sfh5_bound=[0.4, 2.4],
sfh6_bound=[0.9, 2.9], dmod_bound=[23.5, 30.]):
"""
Yields a `FullFlatPrior` object with specified bounds in each
dimension.
Parameters
----------
z_bound : array-like with shape (2,), optional
lower (default -2.) and upper (default 0.5) bounds of metallicity,
in units log_10(z/z_solar).
dust_bound : array-like with shape (2,), optional
lower (default -3.) and upper (default 0.5) bounds of dust
extinction, in units log_10 E(B-V).
sfh0_bound, ... , sfh6_bound : array-like with shape (2,), optional
lower and upper bounds of star-formation in each age bin, in units
log_10 M_star.
default is set for Npix=1e2, tau=5 SFH.
dmod_bound : array-like with shape (2,), optional
lower (default 23.5 = 0.5 Mpc) and upper (default 30.0 = 10 Mpc)
bounds of distance modulus
Yields
------
`FullFlatPrior`
Object representing a flat prior with given bounds
Raises
------
`ValueError`
If any key-word arguments are not array-like with length 2
OR if upper bounds are lesser than lower bounds in any dimension
"""
bounds = np.array([z_bound, dust_bound, sfh0_bound, sfh1_bound,
sfh2_bound, sfh3_bound, sfh4_bound, sfh5_bound,
sfh6_bound, dmod_bound])
FlatPrior.__init__(self, bounds)
```
#### File: pcmdpy/sampling/results.py
```python
import numpy as np
import matplotlib.pyplot as plt
from corner import corner
import pandas as pd
from scipy.special import logsumexp
import sys
from datetime import datetime
import time
from ..plotting.plotting import step_plot, step_fill
from ..galaxy.sfhmodels import all_sfh_models, NonParam, SSPModel
from ..galaxy.dustmodels import all_dust_models
from ..galaxy.distancemodels import all_distance_models
from ..galaxy.metalmodels import all_metal_models
from ..galaxy.galaxy import CustomGalaxy
from dynesty import utils as dyfunc
from dynesty import plotting as dyplot
from dynesty.results import Results
class ResultsLogger(object):
def __init__(self, sampler, out_file, verbose=True, print_loc=sys.stdout,
out_df=None, live_file=None,
save_every=10, param_names=None):
ndim = sampler.npdim
self.sampler = sampler
self.print_loc = print_loc
self.last_time = time.time()
self.start_time = time.time()
self.out_file = out_file
if out_df is None:
self.colnames = ['niter', 'nc', 'eff', 'logl', 'logwt',
'logvol', 'logz', 'logzerr', 'h', 'delta_logz',
'time_elapsed']
if param_names is not None:
self.colnames += list(param_names)
else:
self.colnames += ['param{:d}'.format(i) for i in range(ndim)]
self.out_df = pd.DataFrame(columns=self.colnames)
else:
assert np.all(np.in1d(param_names, out_df.columns)), (
"provided parameter names {} not in output Dataframe".format(param_names))
self.out_df = out_df
self.live_file = live_file
self.verbose = verbose
self.save_every = save_every
self.param_names = param_names
def collect(self, results, niter, ncall, nbatch=None, dlogz=None,
logl_max=None, add_live_it=None, stop_val=None,
logl_min=-np.inf):
(worst, ustar, vstar, loglstar, logvol,
logwt, logz, logzvar, h, nc, worst_it,
boundidx, bounditer, eff, delta_logz) = results
if delta_logz > 1e6:
delta_logz = np.inf
if logzvar >= 0. and logzvar <= 1e6:
logzerr = np.sqrt(logzvar)
else:
logzerr = np.nan
if logz <= -1e6:
logz = -np.inf
last = self.last_time
self.last_time = time.time()
dt = self.last_time - last
total_time = self.last_time - self.start_time
ave_t = dt/nc
if self.verbose:
# constructing output
print_str = 'iter: {:d}'.format(niter)
if add_live_it is not None:
print_str += "+{:d}".format(add_live_it)
print_str += " | "
if nbatch is not None:
print_str += "batch: {:d} | ".format(nbatch)
print_str += "nc: {:d} | ".format(nc)
print_str += "ncalls: {:d} | ".format(ncall)
print_str += "bounds: {:d} | ".format(bounditer)
print_str += "eff(%): {:6.3f} | ".format(eff)
print_str += "logz: {:.1e} +/- {:.1e} | ".format(logz, logzerr)
if dlogz is not None:
print_str += "dlogz: {:6.3f} > {:6.3f}".format(delta_logz,
dlogz)
else:
print_str += "stop: {:6.3f}".format(stop_val)
print_str += "\n loglike: {:.1e} | ".format(loglstar)
print_str += "params: {:s}".format(str(vstar))
print_str += "\n Average call time: {:.2f} sec | ".format(ave_t)
print_str += "Current time: {:s}".format(str(datetime.now()))
print_str += '\n --------------------------'
print(print_str, file=self.print_loc)
sys.stdout.flush()
# Saving results to df
row = {'niter': niter}
row['time_elapsed'] = total_time
row['logl'] = loglstar
row['logvol'] = logvol
row['logwt'] = logwt
row['logz'] = logz
row['h'] = h
row['eff'] = eff
row['nc'] = nc
row['delta_logz'] = delta_logz
row['logzerr'] = logzerr
if self.param_names is not None:
for i, pname in enumerate(self.param_names):
row[pname] = vstar[i]
else:
for i, v in enumerate(vstar):
row['param{0:d}'.format(i)] = v
self.out_df = self.out_df.append(row, ignore_index=True)
# Save current live points
if ((niter+1) % self.save_every == 0):
self.flush_to_csv()
def flush_to_csv(self):
self.out_df.to_csv(self.out_file, mode='a', index=False,
header=False, float_format='%.4e')
self.out_df.drop(self.out_df.index, inplace=True)
# track current live points
if self.live_file is not None:
live_df = pd.DataFrame(columns=self.param_names,
data=self.sampler.live_v)
live_df['logl'] = self.sampler.live_logl
live_df.to_csv(self.live_file, mode='w', index=False,
header=True, float_format='%.4e')
class ResultsPlotter(object):
def __init__(self, df_file, live_file=None, gal_model=None,
model_is_truth=False, run_name=None):
try:
self.df = pd.read_csv(df_file)
except UnicodeDecodeError:
self.df = pd.read_csv(df_file, compression='gzip')
self.df['live'] = False
if live_file is not None:
try:
live_df = pd.read_csv(live_file)
except FileNotFoundError:
print('Unable to find live_file: {}. Continuing without '
'live points'.format(live_file))
else:
# Check if live points have already been added
n_live = len(live_df)
if (self.df.nc.tail(n_live) == 1.0).mean() < 0.9:
live_df['live'] = True
live_df['nc'] = 1
live_df['eff'] = self.df['eff'].values[-1]
logvols = self.df['logvol'].values[-1]
logvols += np.log(1. - (np.arange(n_live)+1.) / (n_live+1.))
logvols_pad = np.concatenate(([self.df['logvol'].values[-1]], logvols))
logdvols = logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(n_live),
-np.ones(n_live)])
logdvols += np.log(0.5)
dlvs = logvols_pad[:-1] - logvols_pad[1:]
logz = self.df['logz'].values[-1]
logzvar = self.df['logzerr'].values[-1]**2
h = self.df['h'].values[-1]
loglstar = self.df['logl'].values[-1]
lsort_idx = np.argsort(live_df.logl.values)
loglmax = live_df.logl.max()
logwts = []
hs = []
logzs = []
logzerrs = []
delta_logzs = []
for i in range(n_live):
idx = lsort_idx[i]
logvol, logdvol, dlv = logvols[i], logdvols[i], dlvs[i]
loglstar_new = live_df.logl.values[idx]
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol
logz_new = np.logaddexp(logz, logwt)
lzterm = (np.exp(loglstar - logz_new) * loglstar +
np.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (np.exp(logdvol) * lzterm +
np.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
loglstar = loglstar_new
logz_remain = loglmax + logvol
delta_logz = np.logaddexp(logz, logz_remain) - logz
logwts.append(logwt)
hs.append(h)
logzs.append(logz)
logzerrs.append(np.sqrt(logzvar))
delta_logzs.append(delta_logz)
live_df.sort_values('logl', inplace=True)
live_df['logwt'] = logwts
live_df['logvol'] = logvols
live_df['h'] = hs
live_df['logz'] = logzs
live_df['logzerr'] = logzerrs
live_df['delta_logz'] = delta_logzs
live_df['niter'] = np.arange(n_live) + self.df['niter'].max() + 1
self.df = self.df.append(live_df, ignore_index=True,
sort=False)
self.gal_model = gal_model
self.true_model = None
self.run_name = run_name
self.n_iter = len(self.df)
self.n_live = self.df['live'].sum()
self.n_dead = self.n_iter - self.n_live
self.true_params = None
if gal_model is not None:
if model_is_truth:
self.true_model = self.gal_model
self.true_params = list(self.true_model._params)
else: # If no model provided, must guess the model used
cols = self.df.columns
# Identify the metal model from parameters found
metal_model = None
for mm in all_metal_models:
if np.all(np.in1d(mm._param_names, cols)):
metal_model = mm()
break
if metal_model is None:
raise ValueError(
'params found to not match a known metal model:\n'
'{}'.format(cols))
# Identify the dust model from parameters found
dust_model = None
for dm in all_dust_models:
if np.all(np.in1d(dm._param_names, cols)):
dust_model = dm()
break
if dust_model is None:
raise ValueError(
'params found to not match a known dust model:\n'
'{}'.format(cols))
# Identify the SFH model from parameters found
sfh_model = None
for sfhm in all_sfh_models:
params = sfhm._param_names
if isinstance(params, property):
params = sfhm()._param_names
if np.all(np.in1d(params, cols)):
sfh_model = sfhm()
break
if sfh_model is None:
raise ValueError(
'params found to not match a known sfh model:\n'
'{}'.format(cols))
# Identify the distance model from parameters found
distance_model = None
for dm in all_distance_models:
if np.all(np.in1d(dm._param_names, cols)):
distance_model = dm()
break
if distance_model is None:
raise ValueError(
'params found to not match a known distance model:\n'
'{}'.format(cols))
self.gal_model = CustomGalaxy(
metal_model, dust_model, sfh_model, distance_model)
self.params, self.labels = [], []
for m in [self.metal_model, self.dust_model, self.sfh_model,
self.distance_model]:
self.params.extend(m._param_names)
self.labels.extend(m._fancy_names)
if isinstance(self.sfh_model, NonParam):
nbins = self.sfh_model._num_params
sfhs = 10.**self.df[['logSFH{:d}'.format(i) for i in range(nbins)]]
self.df['logNpix'] = np.log10(np.sum(sfhs.values, axis=1))
self.params.append('logNpix')
self.labels.append(r'$\log_{10} N_\mathrm{pix}$')
if self.true_params is not None:
self.true_params += [np.log10(self.true_model.sfh_model.Npix)]
self.n_params = len(self.params)
# weights defined by Dynesty
self.df['log_weights'] = (self.df.logwt.values -
logsumexp(self.df.logwt.values))
self.df['dynesty_weights'] = np.exp(self.df['log_weights'])
# weights purely from log likelihoods
logl_ws = (self.df.logl.values -
logsumexp(self.df.logl.values))
self.df['likelihood_weights'] = np.exp(logl_ws)
self.df['weights'] = self.df['dynesty_weights']
self.df['time_elapsed'] /= 3600.
try:
self.df['logfeh'] = self.df.logzh
except AttributeError:
pass
@property
def metal_model(self):
return self.gal_model.metal_model
@property
def dust_model(self):
return self.gal_model.dust_model
@property
def sfh_model(self):
return self.gal_model.sfh_model
@property
def distance_model(self):
return self.gal_model.distance_model
def as_dynesty(self, burn=0, trim=0, max_logl=None):
if trim > 0:
sub_df = self.df.iloc[burn:-trim]
samples = self.get_chains().values[burn:-trim]
else:
sub_df = self.df.iloc[burn:]
samples = self.get_chains().values[burn:]
logwt = sub_df.logwt.values
logwt -= logsumexp(logwt)
logwt += sub_df.logz.values[-1]
results = [
('nlive', 0),
('niter', len(sub_df)),
('ncall', sub_df.nc.values.astype(int)),
('eff', sub_df.eff.values[-1]),
('samples', samples),
('logwt', logwt),
('logl', sub_df.logl.values),
('logvol', sub_df.logvol.values),
('logz', sub_df.logz.values),
('logzerr', sub_df.logzerr.values),
('information', sub_df.h.values)]
results = Results(results)
if max_logl is not None:
new_logl = np.array(sub_df.logl.values)
new_logl[new_logl >= max_logl] = max_logl
results = dyfunc.reweight_run(results,
logp_new=new_logl)
return results
def get_samples(self, burn=0, trim=0, max_logl=None):
results = self.as_dynesty(burn=burn, trim=trim)
return results['samples']
def get_weights(self, burn=0, trim=0, max_logl=None):
results = self.as_dynesty(burn=burn, trim=trim,
max_logl=max_logl)
return np.exp(results['logwt'] - logsumexp(results['logwt']))
# @property
# def samples(self):
# return self.get_chains().values
# @property
# def weights(self):
# return self.df['weights'].values
def get_chains(self):
return self.df[self.params]
def means(self, burn=0, trim=0, max_logl=None):
kwargs = {'burn': burn,
'trim': trim,
'max_logl': max_logl}
samples = self.get_samples(**kwargs)
weights = self.get_weights(**kwargs)
means, _ = dyfunc.mean_and_cov(samples, weights)
return means
def cov(self, burn=0, trim=0, max_logl=None):
kwargs = {'burn': burn,
'trim': trim,
'max_logl': max_logl}
samples = self.get_samples(**kwargs)
weights = self.get_weights(**kwargs)
_, cov = dyfunc.mean_and_cov(samples, weights)
return cov
def stds(self, burn=0, trim=0, max_logl=None):
cov = self.cov(burn=burn, trim=trim, max_logl=max_logl)
return np.sqrt([cov[i, i] for i in range(self.n_params)])
@property
def best_params(self):
if isinstance(self.sfh_model, NonParam):
return self.df.tail(1)[self.params[:-1]].values[0]
else:
return self.df.tail(1)[self.params].values[0]
@property
def best_model(self):
from ..galaxy.galaxy import CustomGalaxy
gal = CustomGalaxy(self.metal_model, self.dust_model, self.sfh_model,
self.distance_model)
gal.set_params(self.best_params)
return gal
def plot_trace(self, axes=None, burn=0, trim=0, max_logl=None, smooth=0.02,
show_truth=True, full_range=False, **traceplot_kwargs):
"""
Returns
-------
fig, axes
"""
dynesty_kwargs = {'burn': burn,
'trim': trim,
'max_logl': max_logl}
results = self.as_dynesty(**dynesty_kwargs)
kwargs = {'labels': self.labels,
'smooth': smooth,
'truths': self.true_params if show_truth else None,
'fig': None,
'span': None,
'show_titles': True}
weights = np.exp(results['logwt'] - logsumexp(results['logwt']))
if full_range:
kwargs['span'] = [[results['samples'][:, i].min(),
results['samples'][:, i].max()] for i in range(self.n_params)]
else:
means = self.means(**dynesty_kwargs)
stds = self.stds(**dynesty_kwargs)
kwargs['span'] = [[means[i] - max(5*stds[i], 1e-3),
means[i] + max(5*stds[i], 1e-3)] for i in range(self.n_params)]
kwargs.update(traceplot_kwargs)
if (axes is not None) and (axes.shape == (self.n_params, 2)):
kwargs['fig'] = (axes.flatten()[0].get_figure(), axes)
fig, axes = dyplot.traceplot(results, **kwargs)
return fig, axes
def plot_chains(self, axes=None, burn=0, title=None, dlogz=0.5,
include_live=True, show_prior=True, chains_only=False,
plot_kwargs=None):
nr = self.n_params + 3
if chains_only:
nr = self.n_params
if axes is None:
fig, axes = plt.subplots(nrows=nr, figsize=(8, 2+nr), sharex=True)
else:
assert(len(axes) == nr)
if title is None:
title = self.run_name
if plot_kwargs is None:
plot_kwargs = {}
else:
plot_kwargs = dict(plot_kwargs)
is_live = self.df['live'].values
is_dead = ~is_live
xs_live = np.arange(self.n_live) + self.n_dead
live_plot_kwargs = plot_kwargs.copy()
live_plot_kwargs.update({'color': 'c',
'ls': ':'})
for i, p in enumerate(self.params):
axes[i].plot(self.df[p].values[is_dead],
**plot_kwargs)
if include_live:
axes[i].plot(xs_live, self.df[p].values[is_live],
**live_plot_kwargs)
axes[i].set_ylabel(self.labels[i])
if not chains_only:
axes[-3].plot(np.log10(self.df['delta_logz'].values[is_dead]),
**plot_kwargs)
axes[-3].axhline(y=np.log10(dlogz), ls='--', color='r')
axes[-3].set_ylabel(r'log $\Delta$ln Z')
axes[-2].plot(self.df['eff'].values[is_dead],
**plot_kwargs)
axes[-2].set_ylabel('eff (%)')
axes[-1].plot(self.df['time_elapsed'].values[is_dead],
**plot_kwargs)
axes[-1].set_ylabel('run time (hrs)')
axes[-1].set_xlabel('Iteration')
if self.true_model is not None:
for i in range(self.n_params):
axes[i].axhline(y=self.true_params[i], color='r', ls='--')
if show_prior and (self.prior is not None):
for i in range(self.n_params):
axes[i].axhline(y=self.prior.lower_bounds[i], color='k',
ls=':')
axes[i].axhline(y=self.prior.upper_bounds[i], color='k',
ls=':')
if burn > 0:
for ax in axes:
ax.axvline(x=burn, ls=':', color='k')
if title is not None:
axes[0].set_title(title)
return axes
def plot_corner(self, burn=0, trim=0, max_logl=None, axes=None, title=None,
smooth=0.02, show_truth=True, full_range=False,
sig_levels=[1,2,3], **corner_kwargs):
"""
Returns
-------
fig, axes
"""
dynesty_kwargs = {'burn': burn,
'trim': trim,
'max_logl': max_logl}
results = self.as_dynesty(**dynesty_kwargs)
kwargs = {'labels': self.labels,
'smooth': smooth,
'truths': self.true_params if show_truth else None,
'fig': None,
'span': None,
'show_titles': True}
levels = 1.0 - np.exp(-0.5 * np.array(sig_levels)**2)
kwargs['hist2d_kwargs'] = kwargs.get('hist2d_kwargs', {})
kwargs['hist2d_kwargs']['levels'] = levels
weights = np.exp(results['logwt'] - logsumexp(results['logwt']))
if full_range:
kwargs['span'] = [[results['samples'][:, i].min(),
results['samples'][:, i].max()] for i in range(self.n_params)]
else:
means = self.means(**dynesty_kwargs)
stds = self.stds(**dynesty_kwargs)
kwargs['span'] = [[means[i] - max(5*stds[i], 1e-1),
means[i] + max(5*stds[i], 1e-1)] for i in range(self.n_params)]
kwargs.update(corner_kwargs)
if (axes is not None) and (axes.shape == (self.n_params,
self.n_params)):
kwargs['fig'] = (axes.flatten()[0].get_figure(), axes)
fig, axes = dyplot.cornerplot(results, **kwargs)
return fig, axes
# def plot_corner(self, fig=None, title=None, burn=0, trim=0, bins=30,
# include_live=True, smooth_frac=.01, smooth1d=0.,
# weight=False, full_range=False,
# show_prior=False, plot_density=False, fill_contours=True,
# sig_levels=None, plot_datapoints=True, show_truth=True,
# **corner_kwargs):
# if trim > 0:
# df_temp = self.df.iloc[burn:-trim]
# else:
# df_temp = self.df.iloc[burn:]
# if not include_live:
# df_temp = df_temp[~df_temp['live']]
# vals = df_temp[self.params].values
# smooth = smooth_frac * bins
# if sig_levels is None:
# sig_levels = np.arange(1, 4)
# # convert from sigma to 2d CDF (equivalent of 68-95-99.7 rule)
# levels = 1. - np.exp(-0.5 * sig_levels**2.)
# if full_range:
# lims = []
# for p in self.params:
# lims += [[self.df[p].min(), self.df[p].max()]]
# else:
# lims = None
# if corner_kwargs is None:
# corner_kwargs = {}
# else:
# corner_kwargs = dict(corner_kwargs)
# if weight:
# corner_kwargs['weights'] = df_temp['weights'].values
# else:
# corner_kwargs['weights'] = None
# truths = self.true_params if show_truth else None
# corner_kwargs.update({'labels': self.labels,
# 'truths': truths, 'fig': fig,
# 'bins': bins, 'smooth': smooth,
# 'plot_density': plot_density,
# 'fill_contours': fill_contours,
# 'levels': levels,
# 'range': lims,
# 'smooth1d': smooth1d,
# 'plot_datapoints': plot_datapoints})
# fig = corner(vals, **corner_kwargs)
# axes = np.array(fig.get_axes()).reshape(self.n_params, self.n_params)
# if show_prior:
# for i in range(self.n_params):
# a = axes[i, i]
# lower, upper = a.get_ylim()
# y = len(df_temp) / bins
# if weight:
# y *= np.mean(corner_kwargs['weights'])
# a.axhline(y=y, ls=':')
# if title is None:
# fig.suptitle(self.run_name)
# else:
# fig.suptitle(title)
# return (fig, axes)
def plot_sfr(self, width=68., ax=None, title=None,
burn=0, stop_after=None, show_prior=False, **plot_kwargs):
assert (0. <= width <= 100.), "width must be between 0 and 100"
if isinstance(self.sfh_model, SSPModel):
print('Cannot plot cumulative SFH for SSP')
return
cols = self.sfh_model._param_names
take = slice(burn, stop_after)
vals = self.df[cols].values[take]
edges = self.sfh_model.default_edges
lookback_gyr = 10.**(edges - 9.)
logdt = np.log10(np.diff(lookback_gyr * 1e9))
sfr = np.array([self.sfh_model.set_params(v).logSFH - logdt
for v in vals])
if self.true_model is not None:
p_sfh = self.sfh_model._num_params
p_dist = self.distance_model._num_params
if p_dist > 0:
vals_true = self.true_model._params[-p_sfh-p_dist:-p_dist]
else:
vals_true = self.true_model._params[-p_sfh:]
true_sfr = self.sfh_model.set_params(vals_true).logSFH - logdt
if ax is None:
fig, ax = plt.subplots()
med = np.percentile(sfr, 50., axis=0)
upper = np.percentile(sfr, 50. + 0.5*width, axis=0)
lower = np.percentile(sfr, 50. - 0.5*width, axis=0)
color = plot_kwargs.pop('color', 'k')
alpha = plot_kwargs.pop('alpha', 0.3)
step_plot(lookback_gyr, med, ax=ax, ls='-', color=color, **plot_kwargs)
step_fill(lookback_gyr, y1=lower, y2=upper, ax=ax,
alpha=alpha, color=color,
**plot_kwargs)
if self.true_model is not None:
step_plot(lookback_gyr, true_sfr, ax=ax, ls='--',
color='r', **plot_kwargs)
ax.set_yscale('linear')
if title is None:
ax.set_title(self.run_name)
else:
ax.set_title(title)
ax.set_xlabel('Lookback Time (Gyr)')
ax.set_ylabel('Log Instantaneous SFR')
if show_prior:
if self.prior is None:
self.plot_sfr(burn=0, stop_after=500, ax=ax, width=width,
color='b', alpha=0.1, zorder=-1,
show_prior=False, title=title, **plot_kwargs)
else:
if p_dist > 0:
lower_p = self.prior.lower_bounds[-p_sfh-p_dist:-p_dist]
upper_p = self.prior.upper_bounds[-p_sfh-p_dist:-p_dist]
else:
lower_p = self.prior.lower_bounds[-p_sfh:]
upper_p = self.prior.upper_bounds[-p_sfh:]
lower = self.sfh_model.set_params(lower_p).logSFH - logdt
upper = self.sfh_model.set_params(upper_p).logSFH - logdt
step_fill(lookback_gyr, y1=lower, y2=upper, ax=ax, alpha=0.1,
color='b', zorder=-1, **plot_kwargs)
return ax
def plot_cum_sfh(self, width=68., ax=None, title=None,
burn=0, stop_after=None, show_prior=False, **plot_kwargs):
assert (0. <= width <= 100.), "width must be between 0 and 100"
if isinstance(self.sfh_model, SSPModel):
print('Cannot plot cumulative SFH for SSP')
return
cols = self.sfh_model._param_names
take = slice(burn, stop_after)
vals = self.df[cols].values[take]
cum_sfh = np.array([self.sfh_model.set_params(v).get_cum_sfh()
for v in vals])
if self.true_model is not None:
p_sfh = self.sfh_model._num_params
p_dist = self.distance_model._num_params
if p_dist > 0:
vals_true = self.true_model._params[-p_sfh-p_dist:-p_dist]
else:
vals_true = self.true_model._params[-p_sfh:]
true_cum_sfh = self.sfh_model.set_params(vals_true).get_cum_sfh()
edges = self.sfh_model.default_edges
# time_gyr = 10.**(edges[-1] - 9.) - 10.**(edges - 9.)
time_gyr = 10.**(edges - 9.)
if ax is None:
fig, ax = plt.subplots()
med = np.percentile(cum_sfh, 50., axis=0)
upper = np.percentile(cum_sfh, 50. + 0.5*width, axis=0)
lower = np.percentile(cum_sfh, 50. - 0.5*width, axis=0)
color = plot_kwargs.pop('color', 'k')
alpha = plot_kwargs.pop('alpha', 0.3)
ax.plot(time_gyr, med, ls='-', color=color, **plot_kwargs)
ax.fill_between(time_gyr, y1=lower, y2=upper, alpha=alpha,
color=color, **plot_kwargs)
if self.true_model is not None:
ax.plot(time_gyr, true_cum_sfh, ls='--', color='r',
**plot_kwargs)
ax.set_yscale('linear')
if title is None:
ax.set_title(self.run_name)
else:
ax.set_title(title)
ax.set_xlabel('Lookback Time (Gyr)')
ax.set_ylabel('cumulative SFH')
if show_prior:
if self.prior is None:
self.plot_cum_sfh(burn=0, stop_after=500, ax=ax, width=width,
color='b', alpha=0.1, zorder=-1,
show_prior=False, title=title, **plot_kwargs)
else:
if p_dist > 0:
lower_p = self.prior.lower_bounds[-p_sfh-p_dist:-p_dist]
upper_p = self.prior.upper_bounds[-p_sfh-p_dist:-p_dist]
else:
lower_p = self.prior.lower_bounds[-p_sfh:]
upper_p = self.prior.upper_bounds[-p_sfh:]
lower = self.sfh_model.set_params(lower_p).get_cum_sfh()
upper = self.sfh_model.set_params(upper_p).get_cum_sfh()
ax.fill_between(time_gyr, y1=lower, y2=upper, alpha=0.1,
color='b', zorder=-1, **plot_kwargs)
return ax
def plot_everything(self, chain_kwargs=None, cum_sfh_kwargs=None,
corner_kwargs=None, **all_kwargs):
if chain_kwargs is None:
chain_kwargs = {}
chain_kwargs.update(all_kwargs)
if cum_sfh_kwargs is None:
cum_sfh_kwargs = {}
cum_sfh_kwargs.update(all_kwargs)
if corner_kwargs is None:
corner_kwargs = {}
corner_kwargs.update(all_kwargs)
chain_axes = self.plot_chains(**chain_kwargs)
fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
axes = axes.flatten()
self.plot_sfr(ax=axes[0], **cum_sfh_kwargs)
self.plot_cum_sfh(ax=axes[1], **cum_sfh_kwargs)
corner_fig, corner_axes = self.plot_corner(**corner_kwargs)
return (chain_axes, axes, (corner_fig, corner_axes))
```
#### File: pcmdpy/simulation/driver.py
```python
import numpy as np
from ..utils import utils
from . import gpu_utils
import warnings
from scipy.stats import multivariate_normal, poisson, norm
from sys import stderr
class Driver:
def __init__(self, iso_model, max_Nim=1024, gpu=True, **kwargs):
self.iso_model = iso_model
self.fixed_states = None
self.random_states = None
self.max_Nim = max_Nim
if self.iso_model is None:
self.filters = None
self.n_filters = 2
else:
self.filters = iso_model.filters
self.n_filters = len(self.filters)
if gpu:
if gpu_utils._GPU_AVAIL:
self.gpu_on = True
self.update_states()
else:
stderr.write('GPU acceleration not available. Continuing without.')
self.gpu_on = False
else:
# No GPU acceleration
self.gpu_on = False
# No data has been initialized
self._data_init = False
def update_states(self):
if not self.gpu_on:
return
stderr.write('Please wait while the GPU states are initialized')
self.fixed_states = gpu_utils.XORWOWStatesArray(self.max_Nim*self.max_Nim,
fixed_seed=True)
self.random_states = gpu_utils.XORWOWStatesArray(self.max_Nim*self.max_Nim,
fixed_seed=False)
def __del__(self):
del self.fixed_states
del self.random_states
def initialize_data(self, pcmd, bins=None, **kwargs):
if bins is None:
magbins = np.arange(-12, 45, 0.05)
colorbins = np.arange(-1.5, 5.6, 0.05)
bins = [magbins]
for _ in range(1, self.n_filters):
bins.append(colorbins)
bins = np.array(bins)
self.hess_bins = bins
self.n_data = pcmd.shape[1]
# fit a 2D gaussian to the points
means = np.mean(pcmd, axis=1)
cov = np.cov(pcmd)
self.gaussian_data = multivariate_normal(mean=means, cov=cov)
# compute the mean magnitudes
self.mean_mags_data = utils.mean_mags(pcmd)
self.mean_pcmd_data = utils.make_pcmd(self.mean_mags_data)
counts, hess, err = utils.make_hess(pcmd, self.hess_bins)
self.counts_data = counts
self.hess_data = hess
self.err_data = err
self._data_init = True
self.pcmd_data = pcmd
def loglike_map(self, pcmd, like_mode=2, signed=True):
counts_model, hess_model, err_model = utils.make_hess(
pcmd, self.hess_bins)
combined_var = (self.err_data**2. + err_model**2.)
hess_diff = (hess_model - self.hess_data)
if like_mode == 1: # Poisson model
hess_model[(counts_model == 0) & (self.counts_data > 0)] = 0.1 / pcmd.shape[1] # add 0.1 fake counts in each empty model bin
loglike = poisson.logpmf(self.counts_data,
mu=(hess_model * self.n_data))
elif like_mode == 2: # Gaussian model (no variance term included)
loglike = -1. * hess_diff**2 / (2.*combined_var)
elif like_mode == 3: # Gaussian model (variance included, downweights high-count bins)
loglike = norm.logpdf(hess_model,
loc=self.hess_data,
scale=np.sqrt(combined_var))
else:
raise NotImplementedError('like_mode only defined for [1,2,3]')
if signed:
return loglike * np.sign(hess_diff)
else:
return loglike
def loglike(self, pcmd, like_mode=2, **kwargs):
assert self._data_init, ('Cannot evaluate, as data has not been '
'initialized (use driver.initialize_data)')
# compute the mean magnitudes
mean_mags_model = utils.mean_mags(pcmd)
mean_pcmd_model = utils.make_pcmd(mean_mags_model)
# add terms relating to mean magnitude and colors
var_mag = 0.05**2
var_color = 0.05**2
var_pcmd = np.append([var_mag],
[var_color for _ in range(1, self.n_filters)])
mean_term = -1. * np.sum((mean_pcmd_model - self.mean_pcmd_data)**2 /
(2*var_pcmd))
# ONLY use the normal approximation
if like_mode == 0:
# fit a multi-D gaussian to the points
means = np.mean(pcmd, axis=1)
cov = np.cov(pcmd)
normal_model = multivariate_normal(mean=means, cov=cov)
normal_term = np.sum(normal_model.logpdf(self.pcmd_data.T))
log_like = normal_term
elif like_mode in [1, 2, 3]:
llmap = self.loglike_map(pcmd, like_mode=like_mode, signed=False)
log_like = mean_term + np.sum(llmap)
else:
raise NotImplementedError('like_mode only defined for [0,1,2,3]')
return log_like
def simulate(self, gal_model, Nim, psf=True, fixed_seed=False,
shot_noise=True, sky_noise=None, downsample=5,
fudge_mag=0.0, gain=1.0, dark_per_sec=0.0,
mag_system='vega', lum_cut=np.inf, **kwargs):
if self.gpu_on:
if Nim > self.max_Nim:
self.max_Nim = Nim
self.update_states()
if fixed_seed:
temp_states = self.fixed_states.copy()
states = temp_states.gpudata
else:
states = self.random_states.gpudata
else:
states = None
IMF, mags = self.iso_model.model_galaxy(
gal_model, downsample=downsample, mag_system=mag_system,
return_mass=False, lum_cut=lum_cut)
fluxes = np.array([f.mag_to_counts(m) for f, m in zip(self.filters,
mags)])
dust_frac, dust_mean, dust_std = gal_model.dust_model.get_props()
images = gpu_utils.draw_image(IMF, fluxes, Nim, self.filters,
dust_frac, dust_mean, dust_std,
fudge_mag=fudge_mag,
d_states=states, gpu=self.gpu_on,
fixed_seed=fixed_seed,
**kwargs)
if psf:
images = np.array([f.psf_convolve(im, **kwargs) for f,im in zip(self.filters,images)])
if sky_noise is not None:
# add sky level (in counts) to each image
for im, sky in zip(images, sky_noise):
im += sky
if shot_noise:
if fixed_seed:
np.random.seed(0)
else:
np.random.seed()
dark_images = np.array([np.ones_like(im)*f._exposure*dark_per_sec for f,im in zip(self.filters, images)])
images += dark_images
images = np.random.poisson(images).astype(np.float32)
images -= dark_images
images[images <= 0.] = 1e-3 # avoid nan issues by adding 0.001 counts
mags = np.array([f.counts_to_mag(im.flatten(), **kwargs) for f,im in zip(self.filters, images)])
pcmd = utils.make_pcmd(mags)
return pcmd, images
``` |
{
"source": "johnnygreco/pymfit",
"score": 2
} |
#### File: pymfit/pymfit/configs.py
```python
from __future__ import division, print_function
from astropy.io import fits
__all__ = ['DEFAULT_SERSIC', 'sersic_config']
DEFAULT_SERSIC = {'X0': None ,
'Y0': None,
'PA': [20., 0, 360],
'ell': [0.2, 0, 0.99],
'n': [1.0, 0.01, 5],
'I_e': [0.05, 0.0, 1000],
'r_e': [20., 0.0, 5000]}
def sersic_config(init_params={}, gal_pos='center', img_shape=None, delta_pos=50.0):
"""
Create a Sersic model config dictionary for pymfit.run.
Parameters
----------
init_params: dict, optional
Initial parameters that are different from DEFAULT_SERSIC.
See pymfit.write_config doc string for syntax.
gal_pos: tuple, optional
(X0, Y0) position of galaxy. If 'center', will use center of image.
In this case, must give image shape of file name.
img_shape: tuple or str, optional
Image shape or the image file name.
delta_pos: float, optional
The +/- limits for the position.
"""
imfit_config = DEFAULT_SERSIC.copy()
for k, v in list(init_params.items()):
imfit_config[k] = v
if 'X0' not in list(init_params.keys()):
if gal_pos == 'center':
assert img_shape is not None, 'must give shape!'
if type(img_shape) == str:
img_shape = fits.getdata(img_shape).shape
gal_pos = img_shape[1]/2, img_shape[0]/2
imfit_config['X0'] = [gal_pos[0], gal_pos[0]-delta_pos,
gal_pos[0]+delta_pos]
imfit_config['Y0'] = [gal_pos[1], gal_pos[1]-delta_pos,
gal_pos[1]+delta_pos]
return imfit_config
```
#### File: pymfit/erwin_utils/imfit.py
```python
from __future__ import division, print_function
# Code for reading in and analyzing output of imfit
import glob
import numpy as np
from . import imfit_funcs as imfuncs
# dictionary mapping imfit function short names (as found in the config/parameter file) to
# corresponding 1-D Python functions in imfit_funcs.py, along with some useful information:
# "function" = corresponding imfit_funcs.py function, if one exists
# "nSkip" = the number of 2D-related parameters to skip (e.g., PA, ellipticity),
# "ell" = index for ellipticity parameter, if it exists,
# "a" = index or indices for semi-major-axis parameters (r_e, h, sigma, etc.)
imfitFunctionMap = {"Exponential": {"function": imfuncs.Exponential, "nSkip": 2, "ell": 1, "a": [3]},
"Exponential_GenEllipse": {"function": imfuncs.Exponential, "nSkip": 3, "ell": 1, "a": [4]},
"Sersic": {"function": imfuncs.Sersic, "nSkip": 2, "ell": 1, "a": [4]},
"Sersic_GenEllipse": {"function": imfuncs.Sersic, "nSkip": 3, "ell": 1, "a": [5]},
"Gaussian": {"function": imfuncs.Gauss, "nSkip": 2, "ell": 1, "a": [3]},
"GaussianRing": {"function": imfuncs.GaussRing, "nSkip": 2, "ell": 1, "a": [3,4]},
"GaussianRing2Side": {"function": imfuncs.GaussRing2Side, "nSkip": 2, "ell": 1, "a": [3,4,5]},
"Moffat": {"function": imfuncs.Moffat, "nSkip": 2, "ell": 1, "a": [3]},
"BrokenExponential": {"function": imfuncs.BrokenExp, "nSkip": 2, "ell": 1, "a": [3,4,5]}}
def ChopComments( theLine ):
return theLine.split("#")[0]
def GetFunctionImageNames( baseName, funcNameList ):
"""Generate a list of FITS filenames as would be created by makeimage in "--output-functions"
mode.
"""
nImages = len(funcNameList)
imageNameList = [ "%s%d_%s.fits" % (baseName, i + 1, funcNameList[i]) for i in range(nImages) ]
return imageNameList
def ReadImfitConfigFile( fileName, minorAxis=False, pix=0.168, getNames=False, X0=0.0 ):
"""Function to read and parse an imfit-generated parameter file
(or input config file) and return a tuple consisting of:
(list of 1-D imfit_funcs functions, list of lists of parameters).
pix = scale in arcsec/pixel, if desired for plotting vs radii in arcsec.
We assume that all functions have a center at x = 0; this can be changed by setting
X0.
Returns tuple of (functionList, trimmedParameterList)
If getNames == True:
Returns tuple of (functionNameList, functionList, trimmedParameterList)
"""
dlines = [ line for line in open(fileName) if len(line.strip()) > 0 and line[0] != "#" ]
funcNameList = []
paramMetaList = []
currentParamList = []
nLines = len(dlines)
for line in dlines:
trimmedLine = ChopComments(line)
#print(trimmedLine)
if trimmedLine.find("X0") == 0:
continue
if trimmedLine.find("Y0") == 0:
continue
if trimmedLine.find("FUNCTION") == 0:
# if this isn't the first function, store the previous set of parameters
if len(currentParamList) > 0:
paramMetaList.append(currentParamList)
# make a new parameter list for the new function
currentParamList = [X0]
pp = trimmedLine.split()
fname = pp[1].strip()
funcNameList.append(fname)
continue
else:
pp = trimmedLine.split()
newValue = float(pp[1])
currentParamList.append(newValue)
# ensure that final set of parameters get stored:
paramMetaList.append(currentParamList)
# process function list to remove unneeded parameters (and convert size measures
# from major-axis to minor-axis, if requested)
funcList = [ imfitFunctionMap[fname]["function"] for fname in funcNameList ]
trimmedParamList = []
nFuncs = len(funcList)
for i in range(nFuncs):
fname = funcNameList[i]
nSkipParams = imfitFunctionMap[fname]["nSkip"]
fullParams = paramMetaList[i]
# calculate scaling factor for minor-axis values, if needed
if minorAxis is True:
print(fname)
ellIndex = imfitFunctionMap[fname]["ell"]
print(ellIndex)
ell = fullParams[ellIndex+1]
q = 1.0 - ell
else:
q = 1.0
print(i, fname)
smaIndices = imfitFunctionMap[fname]["a"]
# convert length values to arcsec and/or minor-axis, if needed,
for smaIndex in smaIndices:
# +1 to account for X0 value at beginning of parameter list
fullParams[smaIndex+1] = pix*q*fullParams[smaIndex+1]
# construct the final 1-D parameter set for this function: X0 value, followed
# by post-2D-shape parameters
trimmedParams = [fullParams[0]]
trimmedParams.extend(fullParams[nSkipParams+1:])
trimmedParamList.append(trimmedParams)
if getNames is True:
return (funcNameList, funcList, trimmedParamList)
else:
return (funcList, trimmedParamList)
# Code for reading output of bootstrap resampling and MCMC chains
def GetBootstrapOutput( filename ):
"""Reads imfit's bootstrap-resampling output when saved using the
--save-bootstrap command-line option.
Parameters
----------
filename : str
name of file with bootstrap-resampling output
Returns
-------
(column_names, data_array) : tuple of (list, np.ndarray)
column_names = list of column names (strings)
data_array = numpy array of parameter values
with shape = (n_iterations, n_parameters)
"""
# get first 100 lines
# FIXME: file *could* be shorter than 100 lines; really complicated
# model could have > 100 lines of header...
with open(filename) as theFile:
firstLines = [next(theFile) for x in range(100)]
# find header line with column names and extract column names
for i in range(len(firstLines)):
if firstLines[i].find("# Bootstrap resampling output") >= 0:
columnNamesIndex = i + 1
break
columnNames = firstLines[columnNamesIndex][1:].split()
for i in range(len(columnNames)):
if columnNames[i] == "likelihood":
nParamColumns = i
break
# get the data
d = np.loadtxt(filename)
return (columnNames, d)
def GetSingleChain( filename, getAllColumns=False ):
"""Reads a single MCMC chain output file and returns a tuple of column names
and a numpy array with the data.
Parameters
----------
filename : str
name of file with MCMC output chain
getAllColumns: bool, optional
if False [default], only model parameter-value columns are retrieved;
if True, all output columns (including MCMC diagnostics) are retrieved
Returns
-------
(column_names, data_array) : tuple of (list, np.ndarray)
column_names = list of column names (strings)
data_array = numpy array of parameter values
with shape = (n_iterations, n_parameters)
"""
# get first 100 lines
# FIXME: file *could* be shorter than 100 lines; really complicated
# model could have > 100 lines of header...
with open(filename) as theFile:
firstLines = [next(theFile) for x in range(100)]
# find header line with column names and extract column names
for i in range(len(firstLines)):
if firstLines[i].find("# Column Headers") >= 0:
columnNamesIndex = i + 1
break
columnNames = firstLines[columnNamesIndex][1:].split()
for i in range(len(columnNames)):
if columnNames[i] == "likelihood":
nParamColumns = i
break
# get data for all columns, or just the model parameters?
whichCols = None
if not getAllColumns:
whichCols = list(range(nParamColumns))
outputColumnNames = columnNames[:nParamColumns]
else:
whichCols = None
outputColumnNames = columnNames
# get the data
d = np.loadtxt(filename, usecols=whichCols)
return (outputColumnNames, d)
def MergeChains( fname_root, maxChains=None, getAllColumns=False, start=10000, last=None,
secondHalf=False ):
"""
Reads and concatenates all MCMC output chains with filenames = fname_root.*.txt,
using data from t=start onwards. By default, all generations from each chain
are extracted; this can be modified with the start, last, or secondHalf keywords.
Parameters
----------
fname_root : str
root name of output chain files (e.g., "mcmc_out")
maxChains : int or None, optional
maximum number of chain files to read [default = None = read all files]
getAllColumns : bool, optional
if False [default], only model parameter-value columns are retrieved;
if True, all output columns (including MCMC diagnostics) are retrieved
start : int, optional
extract samples from each chain beginning with time = start
ignored if "secondHalf" is True or if "last" is not None
last : int or None, optional
extract last N samples from each chain
ignored if "secondHalf" is True
secondHalf : bool, optional
if True, only the second half of each chain is extracted
if False [default],
Returns
-------
(column_names, data_array) : tuple of (list, np.ndarray)
column_names = list of column names (strings)
data_array = numpy array of parameter values
with shape = (n_samples, n_parameters)
"""
# construct list of filenames
if maxChains is None:
globPattern = "{0}.*.txt".format(fname_root)
filenames = glob.glob(globPattern)
else:
filenames = ["{0}.{1}.txt".format(fname_root, n) for n in range(maxChains)]
nFiles = len(filenames)
# get the first chain so we can tell how long the chains are
(colNames, dd) = GetSingleChain(filenames[0], getAllColumns=getAllColumns)
nGenerations = dd.shape[0]
# figure out what part of full chain to extract
if secondHalf is True:
startTime = int(np.floor(nGenerations / 2))
elif last is not None:
startTime = -last
else:
startTime = start
# get first chain and column names; figure out if we get all columns or just
# model parameters
if (startTime >= nGenerations):
txt = "WARNING: # generations in MCMC chain file {0} ({1:d}) is <= ".format(filenames[0],
nGenerations)
txt += "requested start time ({0:d})!\n".format(startTime)
print(txt)
return None
dd_final = dd[startTime:,:]
if getAllColumns is False:
nParamColumns = len(colNames)
whichCols = list(range(nParamColumns))
else:
whichCols = None
# get and append rest of chains if more than 1 chain-file was requested
if nFiles > 1:
for i in range(1, nFiles):
dd_next = np.loadtxt(filenames[i], usecols=whichCols)
dd_final = np.concatenate((dd_final, dd_next[startTime:,:]))
return (colNames, dd_final)
```
#### File: pymfit/pymfit/pymfitter.py
```python
from __future__ import division, print_function
import os
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from .core import run
__all__ = ['PymFitter']
class PymFitter(object):
def __init__(self, model, save_files=False):
self.model = model
self.save_files = save_files
self.results = OrderedDict()
self.img_fn = None
self.res_fn = None
self.model_fn = None
self.out_fn = None
self.mask_fn = None
self.model_arr = None
self.res_arr = None
def write_config(self, fn):
with open(fn, 'w') as file:
for i in range(self.model.ncomp):
comp = getattr(self.model, 'comp_'+str(i+1))
if comp.X0 is not None:
print('\n' + comp.X0.config_line, file=file)
print(comp.Y0.config_line, file=file)
print('FUNCTION '+ comp.name, file=file)
for par in comp.param_names:
line = getattr(comp, par).config_line
print(line, file=file)
def print_config(self):
for i in range(self.model.ncomp):
comp = getattr(self.model, 'comp_' + str(i+1))
if comp.X0 is not None:
print('\n' + comp.X0.config_line)
print(comp.Y0.config_line)
print('FUNCTION ' + comp.name)
for par in comp.param_names:
line = getattr(comp, par).config_line
print(line)
def read_results(self):
file = open(self.out_fn, 'r')
lines = file.readlines()
file.close()
comments = [l for l in lines if l[0]=='#']
params = [l for l in lines if l[0] != '#' if l[:2] != '\n'\
if l[0] != 'F' if l[:2] != 'X0'\
if l[:2] != 'Y0']
cen_text = [l for l in lines if l[0] != '#'\
if (l[:2] == 'X0' or l[:2] == 'Y0')]
centers = []
for i in range(len(cen_text)//2):
_, x0, _, _, xerr = cen_text[2*i].split()
_, y0, _, _, yerr = cen_text[2*i+1].split()
pos_list = [float(x0), float(y0), float(xerr), float(yerr)]
centers.append(pos_list)
par_num = 0
cen_num = -1
for i in range(self.model.ncomp):
comp = getattr(self.model, 'comp_'+str(i+1))
self.results['comp_'+str(i+1)] = {}
self.results['comp_'+str(i+1)]['function'] = comp.name
if comp.X0 is not None:
cen_num += 1
self.results['comp_'+str(i+1)]['X0'] = centers[cen_num][0]
self.results['comp_'+str(i+1)]['Y0'] = centers[cen_num][1]
self.results['comp_'+str(i+1)]['X0_err'] = centers[cen_num][2]
self.results['comp_'+str(i+1)]['Y0_err'] = centers[cen_num][3]
for par in comp.param_names:
name, val = params[par_num].split()[:2]
err = params[par_num].split()[-1]
assert name == par
self.results['comp_'+str(i+1)].update({par: float(val)})
self.results['comp_'+str(i+1)].update({par+'_err': float(err)})
par_num += 1
reduced_chisq = [c for c in comments if
c.split()[1] == 'Reduced'][0].split()[-1]
if reduced_chisq != 'none':
self.results['reduced_chisq'] = float(reduced_chisq)
def print_results(self):
for i in range(self.model.ncomp):
comp = self.results['comp_'+str(i+1)]
params = getattr(self.model, 'comp_'+str(i+1)).param_names
print('\nComponent {}'.format(i+1))
print('---------------------')
print('Function {}'.format(comp['function']))
print('X0 {}'.format(comp['X0']))
print('Y0 {}'.format(comp['Y0']))
for p in params:
val = comp[p]
print('{:9} {:.4f}'.format(p, val))
def run(self, img_fn, mask_fn=None, var_fn=None, psf_fn=None,
config_fn='config.txt', out_fn='best-fit.txt', will_viz=False,
outdir='.', save_model=False, save_residual=False, **run_kws):
config_fn = os.path.join(outdir, config_fn)
out_fn = os.path.join(outdir, out_fn)
self.write_config(fn=config_fn)
self.out_fn = out_fn
self.mask_fn = mask_fn
if will_viz or save_model:
run_kws['save_model'] = True
if will_viz or save_residual:
run_kws['save_res'] = True
run(img_fn, config_fn, mask_fn=mask_fn, var_fn=var_fn,
out_fn=out_fn, psf_fn=psf_fn, pymfitter=True, **run_kws)
self.read_results()
self.img_fn = img_fn[:-3] if img_fn[-1] == ']' else img_fn
self.res_fn = img_fn[:-8] if img_fn[-1] == ']' else img_fn[:-5]
self.res_fn += '_res.fits'
self.model_fn = img_fn[:-8] if img_fn[-1] == ']' else img_fn[:-5]
self.model_fn += '_model.fits'
if will_viz:
self.model_arr = fits.getdata(self.model_fn)
self.res_arr = fits.getdata(self.res_fn)
if not self.save_files:
os.remove(out_fn)
os.remove(config_fn)
if will_viz and not save_model:
os.remove(self.model_fn)
if will_viz and not save_residual:
os.remove(self.res_fn)
def viz_results(self, subplots=None, show=True, save_fn=None,
titles=True, dpi=200, **kwargs):
from astropy.visualization import ZScaleInterval
zscale = ZScaleInterval()
if subplots:
fig, axes = subplots
else:
subplot_kw = dict(xticks=[], yticks=[])
if 'figsize' not in kwargs.keys():
kwargs['figsize'] = (16, 6)
fig, axes = plt.subplots(1, 3, subplot_kw=subplot_kw, **kwargs)
fig.subplots_adjust(wspace=0.08)
img = fits.getdata(self.img_fn)
model = self.model_arr
res = self.res_arr
vmin, vmax = zscale.get_limits(img)
if titles:
titles = ['Original Image', 'Model', 'Residual']
else:
titles = ['']*3
for i, data in enumerate([img, model, res]):
axes[i].imshow(data, vmin=vmin, vmax=vmax, origin='lower',
cmap='gray_r', aspect='equal', rasterized=True)
axes[i].set_title(titles[i], fontsize=20, y=1.01)
if self.mask_fn is not None:
mask = fits.getdata(self.mask_fn)
mask = mask.astype(float)
mask[mask == 0.0] = np.nan
axes[0].imshow(mask, origin='lower', alpha=0.4,
vmin=0, vmax=1, cmap='rainbow_r')
if show:
plt.show()
if save_fn is not None:
fig.savefig(save_fn, bbox_inches='tight', dpi=dpi)
return fig, axes
``` |
{
"source": "johnnygreco/sbf",
"score": 2
} |
#### File: johnnygreco/sbf/setup.py
```python
import os, sys
import builtins
from setuptools import setup, find_packages
sys.path.append('src')
def readme():
with open("README.rst") as f:
return f.read()
# HACK: fetch version
builtins.__SBF_SETUP__ = True
import sbf
version = sbf.__version__
# Publish the library to PyPI.
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist bdist_wheel")
os.system(f"python3 -m twine upload dist/*{version}*")
sys.exit()
# Push a new tag to GitHub.
if "tag" in sys.argv:
os.system("git tag -a {0} -m 'version {0}'".format(version))
os.system("git push --tags")
sys.exit()
setup(
name='sbf',
version=version,
description='SBF made simple.',
long_description=readme(),
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
url='https://github.com/johnnygreco/sbf',
install_requires=[
'numpy>=1.17',
'scipy>=1',
'matplotlib>=3',
'astropy>=4',
'sep>=1'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Astronomy",
],
python_requires='>=3.6',
)
```
#### File: src/sbf/masking.py
```python
import numpy as np
import scipy.ndimage as ndimage
from astropy.io import fits
from astropy.convolution import Gaussian2DKernel
from astropy.utils.misc import isiterable
import sep
__all__ = [
'meas_back',
'detect_sources',
'make_seg_mask',
'make_obj_mask',
'elliptical_mask',
'make_mask'
]
def _byteswap(arr):
"""
If array is in big-endian byte order (as astropy.io.fits
always returns), swap to little-endian for SEP.
"""
if arr.dtype.byteorder=='>':
arr = arr.byteswap().newbyteorder()
return arr
def _outside_circle(cat, xc, yc, r):
"""
Returns a mask of all objectes that fall outside a
circle centered at (xc, yc) of radius r.
"""
return np.sqrt((cat['x']-xc)**2 + (cat['y']-yc)**2) > r
def make_seg_mask(seg, grow_sig=6.0, mask_thresh=0.01, mask_max=1000.0):
"""
Make mask from segmentation image. The mask is convolved with
a Gaussian to "grow the mask".
Parameters
----------
seg : `~numpy.ndarray`
Segmentation map from SEP.
grow_sig : float, optional
Sigma of Gaussian kernel in pixels.
mask_thresh : float, optional
All pixels above this value will be masked.
mask_max : float, optional
All pixels >0 will be set to this value
prior to the convolution.
Returns
-------
mask : `~numpy.ndarray`
Mask with same shape as seg.
"""
mask = seg.copy()
mask[mask>0] = mask_max
mask = ndimage.gaussian_filter(mask, sigma=grow_sig)
mask = mask > (mask_max*mask_thresh)
return mask.astype(int)
def make_obj_mask(cat, img_shape, grow_r=1.0):
"""
Use SEP to build a mask based on objects in input catalog.
Parameters
----------
cat : astropy.table.Table
Source catalog form SEP.
img_shape : array-like
The shape of the image to be masked.
grow_r : float, optional
Fraction to grow the objects sizes.
Returns
-------
mask : `~numpy.ndarray`
Mask with same shape as img_shape.
"""
mask = np.zeros(img_shape, dtype='uint8')
sep.mask_ellipse(mask, cat['x'], cat['y'], cat['a'],
cat['b'], cat['theta'], grow_r)
return mask
def meas_back(img, backsize, backffrac=0.5, mask=None, sub_from_img=True):
"""
Measure the sky background of image.
Parameters
----------
img : `~numpy.ndarray`
2D numpy array of image.
backsize : int
Size of background boxes in pixels.
backffrac : float, optional
The fraction of background box size for the
filter size for smoothing the background.
mask : `~numpy.ndarray`, optional
Mask array for pixels to exclude from background
estimation.
sub_from_img : bool, optional
If True, also return background subtracted image.
Returns
-------
bkg : sep.Background object
See SEP documentation for methods & attributes.
img_bsub : `~numpy.ndarray`, if sub_from_img is True
"""
img = _byteswap(img)
mask = mask if mask is None else mask.astype(bool)
bw = bh = backsize
fw = fh = int(backffrac*backsize)
bkg = sep.Background(img, mask=mask, bw=bw, bh=bh, fw=fw, fh=fh)
if sub_from_img:
bkg.subfrom(img)
return bkg, img
else:
return bkg
def detect_sources(img, thresh, backsize, backffrac=0.5,
mask=None, return_all=False, kern_sig=5.0, **kwargs):
"""
Detect sources to construct a mask for photometry.
Parameters
----------
img : `~numpy.ndarray`
Image to be masked.
thresh : float
Detection threshold with respect to background
for source extraction.
backsize : int
Size of background boxes in pixels.
backffrac : float, optional
The fraction of background box size for the
filter size for smoothing the background.
mask : `~numpy.ndarray`, optional
Mask to apply before background estimation.
Must have same shape as img.
return_all : bool, optional
If True, return the catalog objects, seg map,
background image, and the background subtracted
image.
kern_sig : float, optional
Sigma of smoothing Gaussian in pixels.
kwargs : dict, optional
Keyword args for sep.extract.
Returns
-------
obj : astropy.table.Table
Source catalog from SEP.
seg : `~numpy.ndarray`
Segmentation map from the source extraction.
Same shape as input image.
bck : `~numpy.ndarray`, if return_all=True
Background image measured by SEP.
img : `~numpy.ndarray`, if return_all=True
Background subtracted image.
"""
img = _byteswap(img)
if kern_sig:
kern = Gaussian2DKernel(kern_sig)
kern.normalize()
kern = kern.array
else:
kern = None
bkg, img = meas_back(img, backsize, backffrac, mask)
thresh *= bkg.globalrms
obj, seg = sep.extract(
img, thresh, segmentation_map=True, filter_kernel=kern, **kwargs)
return (obj, seg, bkg, img) if return_all else (obj, seg)
def elliptical_mask(shape, a, ellip=0., theta=0., center=None):
"""
Generate an elliptical mask, where the masked pixels equal 1 and
the unmasked pixels equal 0.
Paramters
---------
shape : list-like of int
Shape of the mask.
a : float
Semi-major axis of the ellipse.
ellip : float, optional
Ellipticity of the ellipse.
theta : float, optional
Rotation angle in degrees, counterclockwise from the positive x-axis.
center : list like of float, optional
Center of the ellipse in image coordinates. If None, the center will be
assumed to be the center of `shape`.
Returns
-------
mask : `~numpy.ndarray`
Elliptical mask.
"""
mask = np.zeros(shape, dtype='uint8')
if not isiterable(a):
a = [a]
if not isiterable(ellip):
ellip = [ellip]
if not isiterable(theta):
theta = [theta]
b = [a[0] * (1 - ellip[0])]
if center is not None:
x = [center[0]]
y = [center[1]]
else:
x = shape[1] / 2
y = shape[0] / 2
sep.mask_ellipse(mask, x, y, a, b, np.deg2rad(theta))
return mask
def make_mask(image, thresh=1.5, backsize=110, backffrac=0.5,
out_fn=None, gal_pos='center', seg_rmin=100.0, obj_rmin=15.0,
grow_sig=6.0, mask_thresh=0.02, grow_obj=3.0, kern_sig=4.0,
sep_extract_kws={}):
"""
Generate a mask for galaxy photometry using SEP. Many of these
parameters are those of SEP, so see its documentation for
more info.
Parameters
----------
image : str or `~numpy.ndarray`
Image file name or image array.
thresh : float, optional
Detection threshold for source extraction.
backsize : int
Size of box for background estimation.
backffrac : float, optional
Fraction of backsize to make the background median filter.
gal_pos : array-like, optional
(x,y) position of galaxy in pixels. If 'center', the
center of the image is assumed.
seg_rmin : float, optional
Minimum radius with respect to gal_pos for the
segmentation mask.
obj_rmin : float, optional
Minimum radius with respect to gal_pos for the
object mask.
grow_sig : float, optional
Sigma of the Gaussian that the segmentation mask
is convolved with to 'grow' the mask.
mask_thresh : float, optional
All pixels above this threshold will be masked
in the seg mask.
grow_obj : float, optional
Fraction to grow the objects of the obj mask.
out_fn : string, optional
If not None, save the mask with this file name.
kern_sig: float, optional
Sigma (in pixels) of Gaussian for pre-source detection smoothing.
sep_extract_kws: dict, optional
Keywords from sep.extract.
Returns
-------
final_mask : `~numpy.ndarray`
Final mask to apply to img, where 0 represents good pixels
and 1 masked pixels. The final mask is a combination of
a segmentation, object, and HSC's detection footprints.
"""
if type(image) == str:
img = fits.getdata(image)
else:
assert type(image) == np.ndarray, 'image must be str or ndarray'
img = image.copy()
if gal_pos=='center':
gal_x, gal_y = (img.shape[1]/2, img.shape[0]/2)
gal_pos = (gal_x, gal_y)
else:
gal_x, gal_y = gal_pos
#################################################################
# Detect sources in image to mask before we do photometry.
#################################################################
obj, seg, bkg, img = detect_sources(
img, thresh, backsize, backffrac,
None, True, kern_sig, **sep_extract_kws)
#################################################################
# Exclude objects inside seg_rmin and obj_rmin. Note that the
# segmentation label of the object at index i is i+1.
#################################################################
exclude_labels = np.where(~_outside_circle(obj, gal_x, gal_y, seg_rmin))[0]
exclude_labels += 1
for label in exclude_labels:
seg[seg==label] = 0
keepers = _outside_circle(obj, gal_x, gal_y, obj_rmin)
obj = obj[keepers]
#################################################################
# Generate segmentation and object masks and combine
#################################################################
seg_mask = make_seg_mask(seg, grow_sig, mask_thresh)
obj_mask = make_obj_mask(obj, img.shape, grow_obj)
final_mask = (seg_mask | obj_mask).astype(int)
if out_fn is not None:
fits.writeto(out_fn, final_mask, overwrite=True)
return final_mask
``` |
{
"source": "johnnygreco/viz-inspect",
"score": 2
} |
#### File: vizinspect/backend/images.py
```python
import logging
from vizinspect import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os.path
import pathlib
import pickle
import numpy as np
import numpy.random as npr
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['mathtext.fontset'] = 'cm'
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from vizinspect import bucketstorage
from .catalogs import get_object, get_objects
###########################
## LOADING GALAXY IMAGES ##
###########################
def load_galaxy_image(image_file,
local_imgdir,
bucket_client=None):
'''This loads a Galaxy image from a PNG into an array
readable by matplotlib.
If `image_file` starts with dos:// or s3://, this function will assume you
mean to download images from a remote bucket. It will then do the following:
- check for the image_file's existence in local_imgdir
- if found, will load it from there
- if not found, will download it from the specified bucket URL (this should
be in the image_file) and write it to the local_imgdir. This requires a
valid `bucketstorage.client` in `bucket_client`.
Parameters
----------
image_file : str
The name of the file to load.
local_imgdir : str
The local image directory to check for images in and to write them to.
bucket_client : bucketstorage.client instance
This is a client to connect to S3/DOS buckets and download files.
Returns
-------
image : np.array
This returns an image that's loadable directly using
`matplotlib.pyplot.imshow`.
'''
# check if the image is remote
if image_file.startswith('dos://') or image_file.startswith('s3://'):
bucket_imagepath = image_file.replace('dos://','').replace('s3://','')
bucket_name = os.path.dirname(bucket_imagepath)
file_name = os.path.basename(bucket_imagepath)
download_to = os.path.abspath(os.path.join(local_imgdir, file_name))
if os.path.exists(download_to):
use_image_file = download_to
LOGINFO('Using local cached copy of %s.' % image_file)
else:
use_image_file = bucketstorage.get_file(
bucket_name,
file_name,
download_to,
client=bucket_client
)
LOGINFO('Downloaded %s from remote.' % image_file)
else:
use_image_file = image_file
# finally, load the image
try:
image = mpimg.imread(use_image_file)
# touch this file so we know it was recently accessed and won't get
# evicted from the cache if it's accessed often
pathlib.Path(use_image_file).touch()
return image
except Exception as e:
LOGEXCEPTION('could not load the requested image: %s' % image_file)
return None
##################
## MAKING PLOTS ##
##################
def make_main_plot(
objectid,
dbinfo,
outdir,
plot_fontsize=15,
color_plot_xlim=None,
color_plot_ylim=None,
reff_plot_xlim=None,
reff_plot_ylim=None,
random_sample=None,
random_sample_percent=2.0,
save_random_sample=True,
bucket_client=None,
):
'''This generates the main plot.
Parameters
----------
objectid : int
The objectid of the object to make the plot for.
dbinfo : tuple
This is a tuple of two items:
- the database URL or the connection instance to use
- the database metadata object
If the database URL is provided, a new engine will be used. If the
connection itself is provided, it will be re-used.
outdir : str
The directory where the plot file will be written. This is also the
directory where images will be downloaded from a remote bucket.
plot_fontsize: int
The font-size of the plot to make in points.
color_plot_xlim : tuple of two ints or None
This sets the xlim of the color-color plot.
color_plot_ylim : tuple of two ints or None
This sets the ylim of the color-color plot.
reff_plot_xlim : tuple of two ints or None
This sets the xlim of the reff-mu plot.
reff_plot_ylim : tuple of two ints or None
This sets the ylim of the reff-mu plot.
random_sample : int
The number of objects to sample randomly from the database to make the
plot.
random_sample_percent: float or None
If this is provided, will be used preferentially over `random_sample` to
push the random sampling into the Postgres database itself. This must be
a float between 0.0 and 100.0 indicating the percentage of rows to
sample.
save_random_sample : bool
This saves the random sample to a pickle file. If
`random_sample_percent` is not None on a subsequent call to this
function, this function will attempt to load the random sample from the
saved pickle and use that instead of doing another resample. This should
save time when plotting.
bucket_client : bucketstorage.client instance
This is a client used to download files from S3/DOS.
Returns
-------
str
The path of the file where the plot was written to.
'''
# get this object's info
this_object = get_object(objectid, dbinfo)
if not this_object or len(this_object) == 0:
LOGERROR("No information found for objectid: %s" % objectid)
return None
this_gi_color = this_object[0]['extra_columns']['g-i']
this_gr_color = this_object[0]['extra_columns']['g-r']
this_r_e = this_object[0]['extra_columns']['flux_radius_ave_g']
this_mu_e_ave_forced_g = (
this_object[0]['extra_columns']['mu_ave_g']
)
this_object_image = this_object[0]['filepath']
plot_fontsize = 15
fig = plt.figure(figsize=(10, 6))
adjust = dict(wspace=0.13,
hspace=0.25,
bottom=0.1,
top=0.97,
right=0.96,
left=-0.02)
grid = plt.GridSpec(2, 3, **adjust)
ax_img = fig.add_subplot(
grid[0:2, 0:2], xticks=[], yticks=[])
ax_top = fig.add_subplot(grid[0,2])
ax_bot = fig.add_subplot(grid[1,2])
# add in the image of the object
img = load_galaxy_image(this_object_image,
outdir,
bucket_client=bucket_client)
if img is not None:
# FIXME: check if the image's origin really is 0,0 in the
# bottom-left. If not, can remove origin kwarg below.
ax_img.imshow(img)
if random_sample_percent is not None:
random_sample = None
# check for the existence of the sample pickle
sample_picklef = os.path.join(
outdir,
'random-sample-percent-%.1f.pkl' % random_sample_percent
)
if os.path.exists(sample_picklef):
LOGINFO("Using cached random sample from %s" % sample_picklef)
with open(sample_picklef,'rb') as infd:
gi_color, gr_color, r_e, mu_e_ave_forced_g = pickle.load(
infd
)
else:
# get the info from the database
full_catalog, start_keyid, end_keyid = (
get_objects(
dbinfo,
getinfo='plotcols',
end_keyid=None,
random_sample_percent=random_sample_percent
)
)
gi_color = np.array([x[1] for x in full_catalog])
gr_color = np.array([x[2] for x in full_catalog])
r_e = np.array([x[3] for x in full_catalog])
mu_e_ave_forced_g = np.array([x[4] for x in full_catalog])
# write the random sampled arrays to the pickle file
with open(sample_picklef,'wb') as outfd:
pickle.dump((gi_color, gr_color, r_e, mu_e_ave_forced_g),
outfd,
pickle.HIGHEST_PROTOCOL)
# this is using numpy sampling
if random_sample is not None:
sample_index = npr.choice(
gi_color.size,
random_sample,
replace=False
)
sampled_gi_color = gi_color[sample_index]
sampled_gr_color = gr_color[sample_index]
sampled_re = r_e[sample_index]
sampled_mue = mu_e_ave_forced_g[sample_index]
else:
sampled_gi_color = gi_color
sampled_gr_color = gr_color
sampled_re = r_e
sampled_mue = mu_e_ave_forced_g
# make the color plot for all of the objects
ax_top.plot(
sampled_gi_color,
sampled_gr_color,
alpha=0.3,
rasterized=True,
linestyle='None',
marker='.',
ms=1,
)
ax_top.set_xlabel('$g-i$', fontsize=plot_fontsize)
ax_top.set_ylabel('$g-r$', fontsize=plot_fontsize)
if color_plot_xlim is not None:
ax_top.set_xlim(color_plot_xlim)
else:
ax_top.set_xlim(gi_color.min()-0.1,
gi_color.max()+0.1)
if color_plot_ylim is not None:
ax_top.set_ylim(color_plot_ylim)
else:
ax_top.set_ylim(gr_color.min()-0.1,
gr_color.max()+0.1)
# overplot the current object as a star
ax_top.plot(
this_gi_color,
this_gr_color,
linestyle='None',
ms=20,
markeredgecolor='k',
markerfacecolor='k',
marker='*'
)
# make the half-light radius and surface-brightness plot
ax_bot.plot(
sampled_re,
sampled_mue,
alpha=0.3,
rasterized=True,
linestyle='None',
marker='.',
ms=1,
)
ax_bot.set_xlabel(
r'$r_\mathrm{eff}\ \mathrm{[arcsec]}$',
fontsize=plot_fontsize)
ax_bot.set_ylabel(
r'$\langle\mu_e(g)\rangle\ \mathrm{[mag/arcsec^2]}$',
fontsize=plot_fontsize)
if reff_plot_xlim is not None:
ax_bot.set_xlim(reff_plot_xlim)
else:
ax_bot.set_xlim(0, 20)
if reff_plot_ylim is not None:
ax_bot.set_ylim(reff_plot_ylim)
else:
ax_bot.set_ylim(22, 28.5)
# overplot this object as a star
ax_bot.plot(
this_r_e,
this_mu_e_ave_forced_g,
linestyle='None',
ms=20,
markeredgecolor='k',
markerfacecolor='k',
marker='*'
)
outfile = os.path.join(
outdir,
'plot-objectid-{objectid}.png'.format(
objectid=objectid
)
)
fig.savefig(outfile,dpi=100)
plt.close('all')
return outfile
```
#### File: vizinspect/frontend/indexhandlers.py
```python
import logging
import numpy as np
from datetime import datetime
import copy
# for generating encrypted token information
from cryptography.fernet import Fernet
######################################
## CUSTOM JSON ENCODER FOR FRONTEND ##
######################################
# we need this to send objects with the following types to the frontend:
# - bytes
# - ndarray
import json
class FrontendEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, complex):
return (obj.real, obj.imag)
elif (isinstance(obj, (float, np.float64, np.float_)) and
not np.isfinite(obj)):
return None
elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):
return int(obj)
else:
return json.JSONEncoder.default(self, obj)
# this replaces the default encoder and makes it so Tornado will do the right
# thing when it converts dicts to JSON when a
# tornado.web.RequestHandler.write(dict) is called.
json._default_encoder = FrontendEncoder()
#############
## LOGGING ##
#############
# get a logger
LOGGER = logging.getLogger(__name__)
#####################
## TORNADO IMPORTS ##
#####################
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
###################
## LOCAL IMPORTS ##
###################
from .basehandler import BaseHandler
#####################
## MAIN INDEX PAGE ##
#####################
class IndexHandler(BaseHandler):
'''This handles the index page.
This page shows the current project.
'''
def initialize(self,
currentdir,
templatepath,
assetpath,
executor,
basedir,
siteinfo,
authnzerver,
session_expiry,
fernetkey,
ratelimit,
cachedir):
'''
handles initial setup.
'''
self.currentdir = currentdir
self.templatepath = templatepath
self.assetpath = assetpath
self.executor = executor
self.basedir = basedir
self.siteinfo = siteinfo
self.authnzerver = authnzerver
self.session_expiry = session_expiry
self.fernetkey = fernetkey
self.ferneter = Fernet(fernetkey)
self.httpclient = AsyncHTTPClient(force_instance=True)
self.ratelimit = ratelimit
self.cachedir = cachedir
@gen.coroutine
def get(self):
'''This handles GET requests to the index page.
'''
redacted_siteinfo = copy.deepcopy(self.siteinfo)
del redacted_siteinfo['access_token']
del redacted_siteinfo['secret_key']
del redacted_siteinfo['region']
del redacted_siteinfo['endpoint']
self.render(
'index.html',
flash_messages=self.render_flash_messages(),
user_account_box=self.render_user_account_box(),
page_title='viz-inspect',
siteinfo=redacted_siteinfo,
current_user=self.current_user,
)
```
#### File: vizinspect/frontend/vizserver.py
```python
import logging
#############
## IMPORTS ##
#############
import os
import os.path
import signal
import time
import sys
import socket
import json
import multiprocessing as mp
from datetime import datetime
import subprocess
from functools import partial
import numpy as np
# setup signal trapping on SIGINT
def recv_sigint(signum, stack):
'''
handler function to receive and process a SIGINT
'''
raise KeyboardInterrupt
class FrontendEncoder(json.JSONEncoder):
'''
This handles encoding weird things.
'''
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, complex):
return (obj.real, obj.imag)
elif (isinstance(obj, (float, np.float64, np.float_)) and
not np.isfinite(obj)):
return None
elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):
return int(obj)
else:
return json.JSONEncoder.default(self, obj)
# this replaces the default encoder and makes it so Tornado will do the right
# thing when it converts dicts to JSON when a
# tornado.web.RequestHandler.write(dict) is called.
json._default_encoder = FrontendEncoder()
#####################
## TORNADO IMPORTS ##
#####################
# experimental, probably will remove at some point
try:
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
IOLOOP_SPEC = 'uvloop'
except Exception:
HAVE_UVLOOP = False
IOLOOP_SPEC = 'asyncio'
import tornado.ioloop
import tornado.httpserver
import tornado.web
import tornado.options
from tornado.options import define, options
###############################
### APPLICATION SETUP BELOW ###
###############################
modpath = os.path.abspath(os.path.dirname(__file__))
# define our commandline options
# the port to serve on
# indexserver will serve on 14005 by default
define('port',
default=14005,
help='Run on the given port.',
type=int)
# the address to listen on
define('serve',
default='0.0.0.0',
help='Bind to given address and serve content.',
type=str)
# whether to run in debugmode or not
define('debugmode',
default=0,
help='start up in debug mode if set to 1.',
type=int)
# number of background threads in the pool executor
define('backgroundworkers',
default=4,
help=('number of background workers to use '),
type=int)
# the template path
define('templatepath',
default=os.path.abspath(os.path.join(modpath,'templates')),
help=('Sets the tornado template path.'),
type=str)
# the assetpath
define('assetpath',
default=os.path.abspath(os.path.join(modpath,'static')),
help=('Sets the asset (server images, css, JS) path.'),
type=str)
# basedir is the directory where the server will work.
define('basedir',
default=os.getcwd(),
help=('The base work directory of server.'),
type=str)
## this tells the testserver about the backend authnzerver
define('authnzerver',
default='http://127.0.0.1:12690',
help=('This tells the server the address of '
'the local authentication and authorization server.'),
type=str)
## this tells the testserver about the default session expiry time in days
define('sessionexpiry',
default=30,
help=('This tells the server the session-expiry time in days.'),
type=int)
###########################
## DATABASE AND CATALOGS ##
###########################
define('catalogcsv',
default=None,
help=("This tells the server to load the provided catalog into the DB."),
type=str)
define('imagedir',
default=None,
help=("This tells the server where the HUGS images are."),
type=str)
define('flagkeys',
default='galaxy, candy, junk, tidal, outskirts, cirrus',
help=("This tells the server what object flags to use for the catalog."),
type=str)
define('firststart',
default=False,
help=("This tells the server to assume "
"that we're starting over from scratch "
"and to recreate all the DBs, etc.."),
type=bool)
###########
## UTILS ##
###########
def setup_worker(siteinfo):
'''This sets up the workers to ignore the INT signal, which is handled by
the main process.
Sets up the backend database instance. Also sets up the bucket client if
required.
'''
from ..backend import database
# unregister interrupt signals so they don't get to the worker
# and the executor can kill them cleanly (hopefully)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# set up the database
currproc = mp.current_process()
# sets up the engine, connection, and metadata objects as process-local
# variables
currproc.engine, currproc.connection, currproc.metadata = (
database.get_vizinspect_db(
siteinfo['database_url'],
database.VIZINSPECT,
echo=True
)
)
if siteinfo['images_are_remote']:
from vizinspect import bucketstorage
currproc.bucket_client = bucketstorage.client(
(siteinfo['access_token'], siteinfo['secret_key']),
region=siteinfo['region'],
endpoint=siteinfo['endpoint']
)
else:
currproc.bucket_client = None
def close_database():
'''This is used to close the database when the worker loop
exits.
'''
currproc = mp.current_process()
if getattr(currproc, 'metadata', None):
del currproc.metadata
if getattr(currproc, 'connection', None):
currproc.connection.close()
del currproc.connection
if getattr(currproc, 'engine', None):
currproc.engine.dispose()
del currproc.engine
print('Shutting down database engine in process: %s' % currproc.name,
file=sys.stdout)
############
### MAIN ###
############
def main():
# parse the command line
tornado.options.parse_command_line()
DEBUG = True if options.debugmode == 1 else False
# get a logger
LOGGER = logging.getLogger(__name__)
if DEBUG:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
###################
## LOCAL IMPORTS ##
###################
from ..utils import ProcExecutor
###########################
## DEFINING URL HANDLERS ##
###########################
from ..authnzerver import authdb
from . import auth_handlers as ah
from . import indexhandlers as ih
from . import actionhandlers as actions
from . import admin_handlers as admin
###################
## SET UP CONFIG ##
###################
def periodic_cleanup_worker(imagedir=None, retention_days=7):
'''
This is a periodic worker to remove older images from imagedir.
'''
cmd = (
r"find {imagedir} -type f -name '*.png' "
r"-mtime +{mtime} -exec rm -v '{{}}' \;"
).format(imagedir=imagedir,
mtime=retention_days)
try:
LOGGER.info("Deleting images older than %s days in %s." %
(retention_days, imagedir))
proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
ndeleted = len(proc.stdout.decode().split('\n'))
LOGGER.warning('%s files older than %s days deleted.' %
(ndeleted, retention_days))
except Exception:
LOGGER.exception('Could not delete old files.')
MAXWORKERS = options.backgroundworkers
# various directories we need
BASEDIR = os.path.abspath(options.basedir)
TEMPLATEPATH = os.path.abspath(options.templatepath)
ASSETPATH = os.path.abspath(options.assetpath)
CURRENTDIR = os.path.abspath(os.getcwd())
# get our secret keys
SESSIONSECRET = authdb.get_secret_token(
'SESSIONSECRET',
os.path.join(
BASEDIR,
'.server.secret-session'
),
LOGGER
)
FERNETSECRET = authdb.get_secret_token(
'FERNETSECRET',
os.path.join(
BASEDIR,
'.server.secret-fernet'
),
LOGGER
)
# check if there's a first_start_done file to see if we need to copy over a
# site-info.json and email-server.json file to the basedir. also copy over
# the example bits
first_start_done_file = os.path.join(options.basedir,
'.first_start_done')
first_start_done = os.path.exists(first_start_done_file)
# on the first start, the server should ask for a catalog CSV and the flag
# values
if not first_start_done or options.firststart:
import shutil
# copy over the site-info.json file
try:
shutil.copy(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'data',
'site-info.json')),
os.path.abspath(options.basedir))
except FileExistsError:
LOGGER.warning("site-info.json already exists "
"in the basedir. Not overwriting.")
# copy over the email-server.json file
try:
shutil.copy(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'data',
'email-server.json')),
os.path.abspath(options.basedir))
except FileExistsError:
LOGGER.warning("email-server.json already exists "
"in the basedir. Not overwriting.")
# make a default data directory
try:
os.makedirs(os.path.join(options.basedir,'viz-inspect-data'))
except FileExistsError:
LOGGER.warning("The output plot PNG directory already "
"exists in the basedir. Not overwriting.")
#
# now, get site specific info
#
siteinfojson = os.path.join(BASEDIR, 'site-info.json')
with open(siteinfojson,'r') as infd:
SITEINFO = json.load(infd)
# 0a. confirm the flags to be used for this project
LOGGER.info('Please confirm the object flags '
'that will be used for this project.')
flag_keys = input("Object flags [default: %s]: " %
options.flagkeys)
if not flag_keys or len(flag_keys.strip()) == 0:
set_flag_keys = options.flagkeys
else:
set_flag_keys = flag_keys
SITEINFO['flag_keys'] = set_flag_keys
# 0a. confirm the good flags
LOGGER.info("Which object flags are associated with 'good' objects?")
good_flag_keys = input("Good object flags [default: galaxy, candy]: ")
if not good_flag_keys or len(flag_keys.strip()) == 0:
set_good_flag_keys = 'galaxy, candy'
else:
set_good_flag_keys = good_flag_keys
SITEINFO['good_flag_keys'] = set_good_flag_keys
# 0b. confirm the bad flags
LOGGER.info("Which object flags are associated with 'bad' objects?")
bad_flag_keys = input("Bad object flags [default: cirrus, junk, outskirts, tidal]: ")
if not bad_flag_keys or len(flag_keys.strip()) == 0:
set_bad_flag_keys = 'cirrus, junk, outskirts, tidal'
else:
set_bad_flag_keys = bad_flag_keys
SITEINFO['bad_flag_keys'] = set_bad_flag_keys
# 0c. confirm how many good flags are needed for object completion
LOGGER.info("How many votes for 'good' flags are required to mark an "
"object as complete?")
max_good_votes = input("Maximum good flag votes [default: 2]: ")
if not max_good_votes or len(flag_keys.strip()) == 0:
set_max_good_votes = 2
else:
set_max_good_votes = int(max_good_votes)
if set_max_good_votes <= 0:
set_max_good_votes = 2
SITEINFO['max_good_votes'] = set_max_good_votes
# 0c. confirm how many bad flags are needed for object completion
LOGGER.info("How many votes for 'bad' flags are required to mark an "
"object as complete?")
max_bad_votes = input("Maximum bad flag votes [default: 2]: ")
if not max_bad_votes or len(flag_keys.strip()) == 0:
set_max_bad_votes = 2
else:
set_max_bad_votes = int(max_bad_votes)
if set_max_bad_votes <= 0:
set_max_bad_votes = 2
SITEINFO['max_bad_votes'] = set_max_bad_votes
# 0d. confirm how flags in total are allowed per object
LOGGER.info("How many total votes are required to mark an "
"object as complete?")
max_all_votes = input("Maximum flag votes [default: 3]: ")
if not max_all_votes or len(flag_keys.strip()) == 0:
set_max_all_votes = 3
else:
set_max_all_votes = int(max_all_votes)
if set_max_all_votes <= 0:
set_max_all_votes = 3
SITEINFO['max_all_votes'] = set_max_all_votes
# 1. check if the --catalogcsv arg is present
if (options.catalogcsv is not None and
os.path.exists(options.catalogcsv)):
LOGGER.info(
"Doing first time setup. "
"Loading provided catalog: %s into DB at %s" %
(options.catalogcsv,
SITEINFO['database_url'])
)
catalog_path = options.catalogcsv
else:
LOGGER.info("First time setup requires a catalog CSV to load.")
catalog_path = input("Catalog CSV location: ")
# 2. check if the --imagedir arg is present
if (options.imagedir is not None and
os.path.exists(options.imagedir)):
LOGGER.info(
"Using dir: %s as the location of the HUGS images." %
(options.imagedir,)
)
image_dir = options.imagedir
else:
LOGGER.info(
"First time setup requires an "
"image directory to load HUGS images from. "
"If your images are in a Digital Ocean Spaces bucket, "
"use 'dos://<bucket-name>' here. "
"If your images are on AWS S3, use 's3://<bucket-name>' here."
)
image_dir = input("HUGS image directory location: ")
# 3. confirm the database_url in the site-info.json file
LOGGER.info('Please confirm the database URL '
'used to connect to the PostgreSQL DB server.')
database_url = input("Database URL [default: %s]: " %
SITEINFO['database_url'])
if not database_url or len(database_url.strip()) == 0:
set_database_url = SITEINFO['database_url']
else:
set_database_url = database_url
SITEINFO['database_url'] = set_database_url
# 4. if the image directory indicates it's dos:// or s3://, ask for
# credentials for the service, the
if image_dir.startswith('s3://') or image_dir.startswith('dos://'):
LOGGER.info(
"Image directory is '%s'. "
"An access token and secret key pair is required." % image_dir
)
access_token = input("Access Token for '%s': " % image_dir)
secret_key = input("Secret Key for '%s': " % image_dir)
LOGGER.info("We also need a region and endpoint URL.")
default_dos_region = 'sfo2'
default_dos_endpoint = 'https://sfo2.digitaloceanspaces.com'
default_s3_region = 'us-east-1'
default_s3_endpoint = 'https://s3.amazonaws.com'
if image_dir.startswith('dos://'):
region = input(
"Bucket region [default: %s]: " % default_dos_region
)
if not region or len(region.strip()) == 0:
region = default_dos_region
endpoint = input(
"Bucket endpoint [default: %s]: " % default_dos_endpoint
)
if not endpoint or len(endpoint.strip()) == 0:
endpoint = default_dos_endpoint
elif image_dir.startswith('s3://'):
region = input("Region [default: %s]: " % default_s3_region)
if not region or len(region.strip()) == 0:
region = default_s3_region
endpoint = input(
"Endpoint [default: %s]: " % default_s3_endpoint
)
if not endpoint or len(endpoint.strip()) == 0:
endpoint = default_s3_endpoint
# update the site-info.json file with these values
SITEINFO['access_token'] = access_token
SITEINFO['secret_key'] = secret_key
SITEINFO['region'] = region
SITEINFO['endpoint'] = endpoint
SITEINFO['images_are_remote'] = True
else:
SITEINFO['access_token'] = None
SITEINFO['secret_key'] = None
SITEINFO['region'] = None
SITEINFO['endpoint'] = None
SITEINFO['images_are_remote'] = False
# ask for the length of time in days that downloaded images
# and generated plots will be left around
LOGGER.info("To save local disk space, "
"older generated plots and downloaded "
"remote images will be periodically deleted.")
default_retention_days = 15
retention_days = input(
"How long should these be kept on disk? [in days, default: %s]: " %
default_retention_days
)
if not retention_days or len(retention_days.strip()) == 0:
retention_days = default_retention_days
else:
retention_days = int(retention_days)
SITEINFO['retention_days'] = retention_days
# ask for the sampling percentage of the object rows to use for the
# plots
LOGGER.info("To make the server more responsive, "
"only a certain percentage of objects in the "
"database will be used to make plots.")
default_random_sample_percent = 2.0
random_sample_percent = input(
"Percentage of rows to randomly sample for plots "
"[1.0-100.0, default: %.1f]: " %
default_random_sample_percent
)
if not random_sample_percent or len(random_sample_percent.strip()) == 0:
random_sample_percent = default_random_sample_percent
else:
random_sample_percent = float(random_sample_percent)
SITEINFO['random_sample_percent'] = random_sample_percent
# ask for the rows per page
LOGGER.info("To make the server more responsive, "
"object lists will be paginated.")
default_rows_per_page = 100
rows_per_page = input(
"Number of objects per page to use "
"[integer, default: %i]: " %
default_rows_per_page
)
if not rows_per_page or len(rows_per_page.strip()) == 0:
rows_per_page = default_rows_per_page
else:
rows_per_page = float(rows_per_page)
SITEINFO['rows_per_page'] = rows_per_page
#
# done with config
#
# update the site-info.json file
with open(siteinfojson,'w') as outfd:
json.dump(SITEINFO, outfd, indent=2)
# make it readable/writeable by this user only
os.chmod(siteinfojson, 0o100600)
# now we have the catalog CSV and image dir
# load the objects into the DB
from ..backend import database, catalogs
try:
database.new_vizinspect_db(set_database_url,
database.VIZINSPECT)
except Exception:
LOGGER.warning("The required tables already exist. "
"Will add this catalog to them.")
LOGGER.info("Loading objects. Using provided flag keys: %s" %
options.flagkeys)
# ask if existing objects should be overwritten
overwrite_ask = input(
"Should existing objects be overwritten? [Y/n]: "
)
if not overwrite_ask or len(overwrite_ask.strip()) == 0:
overwrite = True
elif overwrite_ask.strip().lower() == 'n':
overwrite = False
else:
overwrite = True
loaded = catalogs.load_catalog(
catalog_path,
image_dir,
(set_database_url,
database.VIZINSPECT),
overwrite=overwrite,
flags_to_use=[
x.strip() for x in SITEINFO['flag_keys'].split(',')
]
)
if loaded:
LOGGER.info("Objects loaded into catalog successfully.")
#
# end of first time setup
#
# set the first start done flag
with open(first_start_done_file,'w') as outfd:
outfd.write('server set up in this directory on %s UTC\n' %
datetime.utcnow().isoformat())
LOGGER.info("First run setup for vizserver complete.")
#
# now, get site specific info
#
siteinfojson = os.path.join(BASEDIR, 'site-info.json')
with open(siteinfojson,'r') as infd:
SITEINFO = json.load(infd)
# get the email info file if it exists
if ('email_settings_file' in SITEINFO and
os.path.exists(os.path.abspath(SITEINFO['email_settings_file']))):
with open(SITEINFO['email_settings_file'],'r') as infd:
email_settings = json.load(infd)
if email_settings['email_server'] != "smtp.emailserver.org":
SITEINFO.update(email_settings)
LOGGER.info('Site info: email server to use: %s:%s.' %
(email_settings['email_server'],
email_settings['email_port']))
LOGGER.info('Site info: email server sender to use: %s.' %
email_settings['email_sender'])
else:
LOGGER.warning('Site info: no email server is set up.')
SITEINFO['email_server'] = None
else:
LOGGER.warning('Site info: no email server is set up.')
SITEINFO['email_server'] = None
# get the user login settings
if SITEINFO['email_server'] is None:
LOGGER.warning('Site info: '
'no email server set up, '
'user logins cannot be enabled.')
SITEINFO['logins_allowed'] = False
elif ('logins_allowed' in SITEINFO and
SITEINFO['logins_allowed'] and
SITEINFO['email_server'] is not None):
LOGGER.info('Site info: user logins are allowed.')
elif ('logins_allowed' in SITEINFO and (not SITEINFO['logins_allowed'])):
LOGGER.warning('Site info: user logins are disabled.')
else:
SITEINFO['logins_allowed'] = False
LOGGER.warning('Site info: '
'settings key "logins_allowed" not found, '
'disabling user logins.')
# get the user signup and signin settings
if SITEINFO['email_server'] is None:
LOGGER.warning('Site info: '
'no email server set up, '
'user signups cannot be enabled.')
SITEINFO['signups_allowed'] = False
elif ('signups_allowed' in SITEINFO and
SITEINFO['signups_allowed'] and
SITEINFO['email_server'] is not None):
LOGGER.info('Site info: user signups are allowed.')
elif 'signups_allowed' in SITEINFO and not SITEINFO['signups_allowed']:
LOGGER.warning('Site info: user signups are disabled.')
else:
SITEINFO['signups_allowed'] = False
LOGGER.warning('Site info: '
'settings key "signups_allowed" not found, '
'disabling user signups.')
#
# authentication server options
#
AUTHNZERVER = options.authnzerver
SESSION_EXPIRY = options.sessionexpiry
#
# rate limit options
#
RATELIMIT = SITEINFO['rate_limit_active']
CACHEDIR = SITEINFO['cache_location']
###########################
## WORK AROUND APPLE BUG ##
###########################
# here, we have to initialize networking in the main thread
# before forking for MacOS. see:
# https://bugs.python.org/issue30385#msg293958
# if this doesn't work, Python will segfault.
# the workaround noted in the report is to launch
# lcc-server like so:
# env no_proxy='*' indexserver
if sys.platform == 'darwin':
import requests
requests.get('http://captive.apple.com/hotspot-detect.html')
####################################
## PERSISTENT BACKGROUND EXECUTOR ##
####################################
#
# this is the background executor we'll pass over to the handler
#
EXECUTOR = ProcExecutor(max_workers=MAXWORKERS,
initializer=setup_worker,
initargs=(SITEINFO,),
finalizer=close_database)
##################
## URL HANDLERS ##
##################
HANDLERS = [
#################
## BASIC STUFF ##
#################
# index page
(r'/',
ih.IndexHandler,
{'currentdir':CURRENTDIR,
'templatepath':TEMPLATEPATH,
'assetpath':ASSETPATH,
'executor':EXECUTOR,
'basedir':BASEDIR,
'siteinfo':SITEINFO,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'fernetkey':FERNETSECRET,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
###################################
## STATIC FILE DOWNLOAD HANDLERS ##
###################################
# this handles static file downloads for collection info
(r'/viz-inspect-data/(.*)',
tornado.web.StaticFileHandler,
{'path':SITEINFO['data_path']}),
##########################
## ACTUAL WORK HANDLERS ##
##########################
(r'/api/list-objects',
actions.ObjectListHandler,
{'currentdir':CURRENTDIR,
'templatepath':TEMPLATEPATH,
'assetpath':ASSETPATH,
'executor':EXECUTOR,
'basedir':BASEDIR,
'siteinfo':SITEINFO,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'fernetkey':FERNETSECRET,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
(r'/api/load-object/(\d{1,10})',
actions.LoadObjectHandler,
{'currentdir':CURRENTDIR,
'templatepath':TEMPLATEPATH,
'assetpath':ASSETPATH,
'executor':EXECUTOR,
'basedir':BASEDIR,
'siteinfo':SITEINFO,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'fernetkey':FERNETSECRET,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
(r'/api/save-object/(\d{1,10})',
actions.SaveObjectHandler,
{'currentdir':CURRENTDIR,
'templatepath':TEMPLATEPATH,
'assetpath':ASSETPATH,
'executor':EXECUTOR,
'basedir':BASEDIR,
'siteinfo':SITEINFO,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'fernetkey':FERNETSECRET,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
########################
## AUTH RELATED PAGES ##
########################
# this is the login page
(r'/users/login',
ah.LoginHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the logout page
(r'/users/logout',
ah.LogoutHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the new user page
(r'/users/new',
ah.NewUserHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the verification page for verifying email addresses
(r'/users/verify',
ah.VerifyUserHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is step 1 page for forgotten passwords
(r'/users/forgot-password-step1',
ah.ForgotPassStep1Handler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the verification page for verifying email addresses
(r'/users/forgot-password-step2',
ah.ForgotPassStep2Handler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the password change page
(r'/users/password-change',
ah.ChangePassHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the user-prefs page
(r'/users/home',
ah.UserHomeHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this is the user-delete page
(r'/users/delete',
ah.DeleteUserHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
####################
## ADMIN HANDLERS ##
####################
# this is the admin index page
(r'/admin',
admin.AdminIndexHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'basedir':BASEDIR,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this handles email settings updates
(r'/admin/email',
admin.EmailSettingsHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'basedir':BASEDIR,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
# this handles user updates
(r'/admin/users',
admin.UserAdminHandler,
{'fernetkey':FERNETSECRET,
'executor':EXECUTOR,
'authnzerver':AUTHNZERVER,
'basedir':BASEDIR,
'session_expiry':SESSION_EXPIRY,
'siteinfo':SITEINFO,
'ratelimit':RATELIMIT,
'cachedir':CACHEDIR}),
]
########################
## APPLICATION SET UP ##
########################
app = tornado.web.Application(
static_path=ASSETPATH,
handlers=HANDLERS,
template_path=TEMPLATEPATH,
static_url_prefix='/static/',
compress_response=True,
cookie_secret=SESSIONSECRET,
xsrf_cookies=True,
xsrf_cookie_kwargs={'samesite':'Lax'},
debug=DEBUG,
)
# FIXME: consider using this instead of handlers=HANDLERS above.
# http://www.tornadoweb.org/en/stable/guide/security.html#dns-rebinding
# FIXME: how does this work for X-Real-Ip and X-Forwarded-Host?
# if options.serve == '127.0.0.1':
# app.add_handlers(r'(localhost|127\.0\.0\.1)', HANDLERS)
# else:
# fqdn = socket.getfqdn()
# ip = options.serve.replace('.','\.')
# app.add_handlers(r'({fqdn}|{ip})'.format(fqdn=fqdn,ip=ip), HANDLERS)
# start up the HTTP server and our application. xheaders = True turns on
# X-Forwarded-For support so we can see the remote IP in the logs
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
######################
## start the server ##
######################
# make sure the port we're going to listen on is ok
# inspired by how Jupyter notebook does this
portok = False
serverport = options.port
maxtries = 10
thistry = 0
while not portok and thistry < maxtries:
try:
http_server.listen(serverport, options.serve)
portok = True
except socket.error:
LOGGER.warning('%s:%s is already in use, trying port %s' %
(options.serve, serverport, serverport + 1))
serverport = serverport + 1
if not portok:
LOGGER.error('could not find a free port after %s tries, giving up' %
maxtries)
sys.exit(1)
LOGGER.info('Started vizserver. listening on http://%s:%s' %
(options.serve, serverport))
LOGGER.info('Background worker processes: %s, IOLoop in use: %s' %
(MAXWORKERS, IOLOOP_SPEC))
LOGGER.info('The current base directory is: %s' % os.path.abspath(BASEDIR))
# register the signal callbacks
signal.signal(signal.SIGINT,recv_sigint)
signal.signal(signal.SIGTERM,recv_sigint)
# start the IOLoop and begin serving requests
try:
loop = tornado.ioloop.IOLoop.current()
periodic_clean = partial(
periodic_cleanup_worker,
imagedir=SITEINFO['data_path'],
retention_days=SITEINFO['retention_days']
)
# run once at start
periodic_clean()
# add our periodic callback for the imagedir cleanup
# runs every 24 hours
periodic_imagedir_clean = tornado.ioloop.PeriodicCallback(
periodic_clean,
86400000.0,
jitter=0.1,
)
periodic_imagedir_clean.start()
# start the IOLoop
loop.start()
except KeyboardInterrupt:
LOGGER.info('received Ctrl-C: shutting down...')
loop.stop()
# close down the processpool
EXECUTOR.shutdown()
time.sleep(2)
# run the server
if __name__ == '__main__':
main()
``` |
{
"source": "JohnnyHao/Py-Spider",
"score": 3
} |
#### File: JohnnyHao/Py-Spider/SpiderDouban.py
```python
import urllib2
import urllib
from BeautifulSoup import BeautifulSoup
def getAllImageLink():
html = urllib2.urlopen('http://www.dbmeizi.com').read()
soup = BeautifulSoup(html)
liResult = soup.findAll('li',attrs={"class":"span3"})
for li in liResult:
imageEntityArray = li.findAll('img')
for image in imageEntityArray:
link = image.get('data-src')
imageName = image.get('data-id')
filesavepath = '/Users/mll-001/Documents/Study_Space/Python-Git/Py-Spider/DoubanImage/%s.jpg' % imageName
urllib.urlretrieve(link,filesavepath)
print filesavepath
getAllImageLink()
```
#### File: JohnnyHao/Py-Spider/Spider.py
```python
# 程序:百度贴吧爬虫
# 版本:0.1
# 作者:why
# 日期:2013-05-14
# 语言:Python 2.7
# 操作:输入带分页的地址,去掉最后面的数字,设置一下起始页数和终点页数。
# 功能:下载对应页码内的所有页面并存储为html文件。
#---------------------------------------
import string, urllib2
#定义百度函数
def baidu_tieba(url,begin_page,end_page):
for i in range(begin_page, end_page+1):
sName = string.zfill(i,5) + '.html'#自动填充成六位的文件名
print '正在下载第' + str(i) + '个网页,并将其存储为' + sName + '......'
f = open(sName,'w+')
m = urllib2.urlopen(url + str(i)).read()
f.write(m)
f.close()
#-------- 在这里输入参数 ------------------
# 这个是山东大学的百度贴吧中某一个帖子的地址
#bdurl = 'http://tieba.baidu.com/p/2296017831?pn='
#iPostBegin = 1
#iPostEnd = 10
bdurl = str(raw_input('请输入贴吧的地址,去掉pn=后面的数字:\n'))
begin_page = int(raw_input('请输入开始的页数:\n'))
end_page = int(raw_input('请输入终点的页数:\n'))
#-------- 在这里输入参数 ------------------
#调用
baidu_tieba(bdurl,begin_page,end_page)
``` |
{
"source": "johnnyhchen/EPEtutorials",
"score": 2
} |
#### File: notebooks/OtherFiles/test_stats.py
```python
from nose.tools import assert_equal, assert_almost_equal, assert_true, \
assert_false, assert_raises, assert_is_instance
from stats import mean, median, mode, std, var
# mean tests
def test_mean1():
obs = mean([0, 0, 0, 0])
exp = 0
assert_equal(obs, exp)
obs = mean([0, 200])
exp = 100
assert_equal(obs, exp)
obs = mean([0, -200])
exp = -100
assert_equal(obs, exp)
obs = mean([0])
exp = 0
assert_equal(obs, exp)
def test_floating_mean1():
obs = mean([1, 2])
exp = 1.5
assert_equal(obs, exp)
# median tests
def test_median1():
obs = median([0, 0, 0, 0])
exp = 0
assert_equal(obs, exp)
obs = median([0, 0, 0, 1])
exp = 0
assert_equal(obs, exp)
obs = median([0, 0, 1, 0, 0])
exp = 0
assert_equal(obs, exp)
obs = median([0, 1, 2, 3, 4])
exp = 2
assert_equal(obs, exp)
obs = median([0, 1, -1, 2, 3])
exp = 1
assert_equal(obs, exp)
obs = median([0, 200])
exp = 100
assert_equal(obs, exp)
obs = median([0, -200])
exp = -100
assert_equal(obs, exp)
obs = median([0])
exp = 0
assert_equal(obs, exp)
def test_floating_median1():
obs = mean([1, 2])
exp = 1.5
assert_equal(obs, exp)
# FIXME Put Mode tests here
def test_std1():
obs = std([0.0, 2.0])
exp = 1.0
assert_equal(obs, exp)
def test_std2():
obs = std([])
exp = 0.0
assert_equal(obs, exp)
def test_std3():
obs = std([0.0, 4.0])
exp = 2.0
assert_equal(obs, exp)
def test_std4():
obs = std([1.0, 3.0])
exp = 1.0
assert_equal(obs, exp)
def test_std5():
obs = std([1.0, 1.0, 1.0])
exp = 0.0
assert_equal(obs, exp)
def test_std6():
obs = std([1e500])
exp = NotImplemented
assert_equal(obs, exp)
def test_std7():
obs = std([0.0, 1e4242])
exp = NotImplemented
assert_equal(obs, exp)
# FIXME Put Variance tests here
``` |
{
"source": "johnnyheineken/prague_air_quality",
"score": 3
} |
#### File: prague_air_quality/bot/main.py
```python
import json
import random
from datetime import datetime
import pandas as pd
import requests
from requests_oauthlib import OAuth1Session
from bot.helpers import create_logger
from bot.messages import AQI_MESSAGES, AQI_VALUES
REFERENCE_VALUES = {
'co': 4000,
'no2': 25,
'o3': 100,
'so2': 40,
'pm2_5': 15,
'pm10': 45
}
NAME_MAPPING = {
'co': 'CO',
'no': 'NO',
'no2': 'NO₂',
'o3': 'O₃',
'so2': 'SO₂',
'pm2_5': 'PM₂.₅',
'pm10': 'PM₁₀'
}
class AirQualityBot:
def __init__(self, credentials, lat='50.042', lon='14.411', mock=False, logger=None):
self.credentials = credentials
self.lat = lat
self.lon = lon
if logger is None:
logger = create_logger('AirQuality', keboola=False)
self.logger = logger
self.mock = mock
def get_aqi_data(self):
token = self.credentials.AirQualityCredentials.TOKEN
address = f'http://api.airvisual.com/v2/nearest_city' \
f'?lat={self.lat}&lon={self.lon}' \
f'&key={token}'
self.logger.info(f'Sending GET to {address}')
r = requests.get(address)
result = {"aqi": r.json()['data']['current']['pollution']['aqius']}
return result
def get_ow_data(self):
token = self.credentials.OpenWeatherCredentials.TOKEN
address = f"http://api.openweathermap.org/data/2.5/air_pollution" \
f"?lat={self.lat}&lon={self.lon}" \
f"&appid={token}"
self.logger.info(f'Sending GET to {address}')
r = requests.get(address)
# {'co': 507.36,
# 'no': 14.08,
# 'no2': 20.22,
# 'o3': 4.2,
# 'so2': 5.25,
# 'pm2_5': 26.43,
# 'pm10': 31.59,
# 'nh3': 1.58}
result = r.json()['list'][0]['components']
return result
def send_tweet(self, message):
if self.mock:
print(message)
return
oauth_tokens = self.credentials.TwitterCredentials.OAUTH_TOKENS
consumer_key = self.credentials.TwitterCredentials.CONSUMER_KEY
consumer_secret = self.credentials.TwitterCredentials.CONSUMER_SECRET
payload = {"text": message}
access_token = oauth_tokens["oauth_token"]
access_token_secret = oauth_tokens["oauth_token_secret"]
address = "https://api.twitter.com/2/tweets"
# Make the request
oauth = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=access_token,
resource_owner_secret=access_token_secret,
)
# Making the request
self.logger.info(f'Sending POST to {address} with {payload}')
response = oauth.post(
address,
json=payload,
)
if response.status_code != 201:
raise Exception(
"Request returned an error: {} {}".format(response.status_code, response.text)
)
print("Response code: {}".format(response.status_code))
# Saving the response as JSON
json_response = response.json()
print(json.dumps(json_response, indent=4, sort_keys=True))
def get_reference_value_message(self, ow_data):
references = []
for name, reference_value in REFERENCE_VALUES.items():
value = ow_data[name]
if (multiple := value / reference_value) > 1.5:
references += [f'{NAME_MAPPING[name]} ({multiple:.1f}x)']
if references:
final_message = '\nPřekračující hodnoty jsou: ' + ', '.join(references)
else:
final_message = ''
return final_message
def create_message(self, aq_data, ow_data):
aqi = aq_data['aqi']
message = random.choice(AQI_MESSAGES).format(aqi=aqi)
message += self.get_reference_value_message(ow_data)
for aqi_value, aqi_message in AQI_VALUES.items():
if aqi_value <= aqi:
message += '\n\n' + random.choice(aqi_message)
break
return message
@staticmethod
def save_data(ow_data, aq_data):
data = ow_data
data['aqius'] = aq_data['aqi']
data['timestamp'] = datetime.now()
data = pd.Series(data)
data.index.name = 'stat'
data = pd.DataFrame(data).T
data.to_csv('out/tables/current_data.csv', index=False)
def run(self):
try:
aq_data = self.get_aqi_data()
ow_data = self.get_ow_data()
message = self.create_message(aq_data, ow_data)
self.send_tweet(message=message)
self.save_data(ow_data, aq_data)
except:
self.send_tweet(message='Man, something broke. @janhynek should do something about that')
raise
``` |
{
"source": "JohnnyHendrix/RSA",
"score": 3
} |
#### File: JohnnyHendrix/RSA/LicenseKeyGenerator.py
```python
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA512
from base64 import b64encode, b64decode
class LicenseKeyGenerator:
key = "YOUR_PRIVATE_KEY_HERE"
def __init__(self, private_key):
self.key = private_key
@staticmethod
def sign_message(message):
global key
rsa_key = RSA.importKey(key)
signer = PKCS1_v1_5.new(rsa_key)
digest = SHA512.new(message)
sign = signer.sign(digest)
return b64encode(sign)
@staticmethod
def verify_signature(signature, message_to_compare):
global key
rsa_key = RSA.importKey(key)
signer = PKCS1_v1_5.new(rsa_key)
if signer.verify(message_to_compare, b64decode(signature)):
return True
return False
``` |
{
"source": "JohnnyHobo/NeuralDecisions",
"score": 4
} |
#### File: JohnnyHobo/NeuralDecisions/neuralDecisions.py
```python
import numpy as np
####################################################################################
# This program uses a basic method of evaluating a decision to determine whether a #
# choice is good or not. It is based off of a standard pros/cons list. In general, #
# there are five 'pros' and five 'cons' to all decisions- whether a choice will or #
# will not make you happier, healthier, wealthier- or if it is ethical or legal. #
# Each of these values are a simple boolean, although more complicated models can #
# be built on top of this structure to include more or less logical thought forms. #
####################################################################################
# input: x = (happier,healthier,wealthier,ethical,legal)
x = np.array([[0,0,0,0,0],
[0,0,0,0,1],
[0,0,0,1,1],
[0,0,1,1,1],
[0,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,0],
[1,1,1,0,0],
[1,1,0,0,0],
[1,0,0,0,0]])
# output: (Yes/No)
y = np.array([[0],[0],[0],[1],[1],
[1],[1],[1],[0],[0]])
class Neural_Network(object):
def __init__(self):
#parameters
self.inputSize = 5
self.outputSize = 1
self.hiddenSize = 6 #=inputSize+outputSize unless you want to rewrite the ENTIRE algorithm.
#weights
self.W1 = np.random.randn(self.inputSize, self.hiddenSize)
self.W2 = np.random.randn(self.hiddenSize, self.outputSize)
def forward(self, x):
#forward propagation through our network
self.z = np.dot(x, self.W1) # dot product of x input and first weights
self.z2 = self.sigmoid(self.z) #activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden and second weights
o = self.sigmoid(self.z3) #final activation function
return o
def sigmoid (self, s):
#activation function
return 1/(1+np.exp(-s))
def sigmoidPrime(self, s):
#derivative of sigmoid
return(s * (1 - s))
def backward(self, x, y, o):
#backward propogate through network
self.o_error = y - o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoid to error
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative of sigmoid to z2 error
self.W1 += x.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights
self.W2 = self.W2 + self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights
def train(self, x, y,i):
i+=1
o = self.forward(x)
self.backward(x, y, o)
def saveWeights(self):
np.save("w1.npy",self.W1)
np.save("w2.npy",self.W2)
def predict(self,inputs):
print "Predicted data based on trained weights: ";
print "Input (scaled): \n" + str(inputs);
print "Output: \n" + str(int(np.around(self.forward(inputs))));
print "How Sure: " +str(int(np.around(self.forward(inputs)*100)))+"%"
NN = Neural_Network()
def initialTraining():
for i in xrange(500): # trains the NN 500 times
print "Run: "+str(i)
print "Input: \n" + str(x)
print "Actual Output: \n" + str(y)
print "Predicted Output: \n" + str(np.around(NN.forward(x)))
print "Loss: \n" + str(np.mean(np.square(y - NN.forward(x)))) # mean sum squared loss
print "\n"
NN.train(x, y,0)
print("Training Complete.")
while True:
try:
a = sanitize(raw_input("Save Weight File? Y/n\n>"))
if a == 1:
NN.saveWeights()
else:
return
except ValueError as e:
print(e)
continue
def sanitize(i_string):
o_string = ''
if i_string in ['1','Y','YE','YES','YEs','Yes','yes','ye','y','T','TRUE','True','t','true']:
o_string = int(1)
elif i_string in ['0','N','NO','No','no','n','F','FALSE','False','false','f','']:
o_string = int(0)
else:
raise ValueError(i_string + " is not a valid answer.\nPlease Use Boolean Logic (e.g. Yes/No;1/0)")
return o_string
# x = (happier,healthier,wealthier,ethical,legal)
def run():
#weights file?
print("Welcome To Noah's Neural Network Decision Maker!")
title = raw_input("What Is Your Question? \n>")
while True:
print(title)
try:
#Get inputs, sanitize, and organize in array
happy = sanitize(raw_input("Will This Make You Happier? \n>"))
healthy = sanitize(raw_input("Will This Make You Healthier? \n>"))
wealthy = sanitize(raw_input("Will This Make You Wealthier? \n>"))
ethic = sanitize(raw_input("Is This Ethical? \n>"))
legal = sanitize(raw_input("Is This Legal? \n>"))
inputs = np.array([happy,healthy,wealthy,ethic,legal])
except ValueError as e:
print(e)
continue
try:
#Open weights file, if exists, create if not
NN.W1 = np.load("w1.npy")
NN.W2 = np.load("w2.npy")
print("Loaded Okay")
except Exception, e:
print(e)
if e.errno == 2:
initialTraining()
break
print("Now Processing Question: " + title)
NN.predict(inputs)
while True:
run()
#initialTraining()
``` |
{
"source": "JohnnyHopp/Part-Affinity-Field-Mobilenetv2",
"score": 2
} |
#### File: 20200307PAF/data_process/coco.py
```python
import os
import cv2
import torch.utils.data as data
from pycocotools.coco import COCO
import numpy as np
from .coco_process_utils import clean_annot, get_ignore_mask, get_heatmap, get_paf, get_keypoints, FLIP_INDICES
from .process_utils import flip, resize, color_augment, resize_hm_paf, normalize, affine_augment
class CocoDataSet(data.Dataset):
def __init__(self, data_path, opt, split='train'):
self.coco_year = 2017
self.coco = COCO(
os.path.join(data_path, 'annotations/person_keypoints_{}{}.json'.format(split, self.coco_year)))
self.split = split
self.data_path = data_path
self.do_augment = split == 'train'
# load annotations that meet specific standards
self.indices = clean_annot(self.coco, data_path, split)
# self.indices = np.array([1407,5608])
self.img_dir = os.path.join(data_path, split + str(self.coco_year))
self.opt = opt
print('Loaded {} images for {}'.format(len(self.indices), split))
def get_item_raw(self, index, to_resize = True):
index = self.indices[index]
anno_ids = self.coco.getAnnIds(index)
annots = self.coco.loadAnns(anno_ids)
img_path = os.path.join(self.img_dir, self.coco.loadImgs([index])[0]['file_name'])
img = self.load_image(img_path)
ignore_mask = get_ignore_mask(self.coco, img, annots)
keypoints = get_keypoints(self.coco, img, annots) #add neck
if self.do_augment:
img, ignore_mask, keypoints = self.augment(img, ignore_mask, keypoints, self.opt)
if to_resize:
img, ignore_mask, keypoints = resize(img, ignore_mask, keypoints, self.opt.imgSize)
heat_map = get_heatmap(self.coco, img, keypoints, self.opt.sigmaHM)
paf = get_paf(self.coco, img, keypoints, self.opt.sigmaPAF, self.opt.variableWidthPAF)
return img, heat_map, paf, ignore_mask, keypoints
def augment(self, img, ignore_mask, keypoints, opts):
if np.random.random() < opts.flipAugProb:
img, ignore_mask, keypoints = flip(img, ignore_mask, keypoints, FLIP_INDICES)
img, ignore_mask, keypoints = color_augment(img, ignore_mask, keypoints, opts.colorAugFactor)
rot_angle = 0
if np.random.random() < opts.rotAugProb:
rot_angle = np.clip(np.random.randn(),-2.0,2.0) * opts.rotAugFactor
img, ignore_mask, keypoints = affine_augment(img, ignore_mask, keypoints, rot_angle, opts.scaleAugFactor)
return img, ignore_mask, keypoints
def __getitem__(self, index):
img, heat_map, paf, ignore_mask, _ = self.get_item_raw(index)
img = normalize(img)
heat_map, paf, ignore_mask = resize_hm_paf(heat_map, paf, ignore_mask, self.opt.hmSize)
return img, heat_map, paf, ignore_mask, index
def load_image(self, img_path):
img = cv2.imread(img_path)
img = img.astype('float32') / 255.
return img
def get_imgs_multiscale(self, index, scales, flip = False):
img, heat_map, paf, ignore_mask, keypoints = self.get_item_raw(index, False)
imgs = []
for scale in scales:
width, height = img.shape[1], img.shape[0]
new_width, new_height = int(scale* width), int(scale*height)
scaled_img = cv2.resize(img.copy(), (new_width, new_height))
flip_img = cv2.flip(scaled_img, 1)
scaled_img = normalize(scaled_img)
imgs.append(scaled_img)
if flip:
imgs.append(normalize(flip_img))
paf = paf.transpose(2, 3, 0, 1)
paf = paf.reshape(paf.shape[0], paf.shape[1], paf.shape[2] * paf.shape[3])
paf = paf.transpose(2, 0, 1)
return imgs, heat_map, paf, ignore_mask, keypoints
def __len__(self):
return len(self.indices)
```
#### File: 20200307PAF/data_process/data_loader_provider.py
```python
import torch
from .coco import CocoDataSet
from .coco_test import CocoTestDataSet
def create_data_loaders(opt):
tr_dataset, te_dataset = create_data_sets(opt)
train_loader = torch.utils.data.DataLoader(
tr_dataset,
batch_size=opt.batchSize,
shuffle=True if opt.DEBUG == 0 else False,
drop_last=True,
num_workers=opt.nThreads
)
test_loader = torch.utils.data.DataLoader(
te_dataset,
batch_size=opt.batchSize,
shuffle=False,
num_workers=opt.nThreads
)
return train_loader, test_loader
def create_data_sets(opt):
if opt.dataset == 'coco':
tr_dataset = CocoDataSet(opt.data, opt, 'train')
te_dataset = CocoDataSet(opt.data, opt, 'val')
else:
raise ValueError('Data set ' + opt.dataset + ' not available.')
return tr_dataset, te_dataset
def create_testdata_loaders(opt):
te_dataset = create_testdata_sets(opt)
test_loader = torch.utils.data.DataLoader(
te_dataset,
batch_size=opt.batchSize,
shuffle=False,
num_workers=opt.nThreads
)
return test_loader
def create_testdata_sets(opt):
if opt.dataset == 'coco':
te_dataset = CocoTestDataSet(opt.data, opt, 'test')
else:
raise ValueError('Data set ' + opt.dataset + ' not available.')
return te_dataset
```
#### File: 20200307PAF/data_process/process_utils.py
```python
import numpy as np
import random
import cv2
sigma_inp = 7
n = sigma_inp * 6 + 1
g_inp = np.zeros((n, n))
for i in range(n):
for j in range(n):
g_inp[i, j] = np.exp(-((i - n / 2) ** 2 + (j - n / 2) ** 2) / (2. * sigma_inp * sigma_inp))
# IMAGE NET CONSTANTS
MEAN = [0.485, 0.456, 0.406],
STD = [0.229, 0.224, 0.225]
# https://github.com/xingyizhou/pytorch-pose-hg-3d/blob/master/src/utils/img.py
def Gaussian(sigma):
if sigma == 7:
return np.array([0.0529, 0.1197, 0.1954, 0.2301, 0.1954, 0.1197, 0.0529,
0.1197, 0.2709, 0.4421, 0.5205, 0.4421, 0.2709, 0.1197,
0.1954, 0.4421, 0.7214, 0.8494, 0.7214, 0.4421, 0.1954,
0.2301, 0.5205, 0.8494, 1.0000, 0.8494, 0.5205, 0.2301,
0.1954, 0.4421, 0.7214, 0.8494, 0.7214, 0.4421, 0.1954,
0.1197, 0.2709, 0.4421, 0.5205, 0.4421, 0.2709, 0.1197,
0.0529, 0.1197, 0.1954, 0.2301, 0.1954, 0.1197, 0.0529]).reshape(7, 7)
elif sigma == n:
return g_inp
else:
raise Exception('Gaussian {} Not Implement'.format(sigma))
# https://github.com/xingyizhou/pytorch-pose-hg-3d/blob/master/src/utils/img.py
def DrawGaussian(img, pt, sigma):
tmpSize = int(np.math.ceil(3 * sigma))
ul = [int(np.math.floor(pt[0] - tmpSize)), int(np.math.floor(pt[1] - tmpSize))]
br = [int(np.math.floor(pt[0] + tmpSize)), int(np.math.floor(pt[1] + tmpSize))]
if ul[0] > img.shape[1] or ul[1] > img.shape[0] or br[0] < 1 or br[1] < 1:
return img
size = 2 * tmpSize + 1
g = Gaussian(size)
g_x = [max(0, -ul[0]), min(br[0], img.shape[1]) - max(0, ul[0]) + max(0, -ul[0])]
g_y = [max(0, -ul[1]), min(br[1], img.shape[0]) - max(0, ul[1]) + max(0, -ul[1])]
img_x = [max(0, ul[0]), min(br[0], img.shape[1])]
img_y = [max(0, ul[1]), min(br[1], img.shape[0])]
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return img
def flip(img, ignore_mask, keypoints, flip_indices):
width = img.shape[1]
img = cv2.flip(img, 1)
ignore_mask = cv2.flip(ignore_mask, 1)
keypoints[:,:,0] = width - 1 - keypoints[:,:,0]
for flip_id in flip_indices:
temp = keypoints[:, flip_id[0], :].copy()
keypoints[:, flip_id[0], :] = keypoints[:, flip_id[1], :]
keypoints[:, flip_id[1], :] = temp
return img, ignore_mask, keypoints
def resize(img, ignore_mask, keypoints, imgSize):
width, height = img.shape[0], img.shape[1]
img = cv2.resize(img, (imgSize, imgSize))
ignore_mask = cv2.resize( ignore_mask, (imgSize, imgSize))
keypoints[:, :, 0] = keypoints[:, :, 0] * imgSize / height
keypoints[:, :, 1] = keypoints[:, :, 1] * imgSize / width
return img, ignore_mask, keypoints
def resize_hm(heatmap, hm_size):
if np.isscalar(hm_size):
hm_size = (hm_size, hm_size)
heatmap = cv2.resize(heatmap.transpose(1, 2, 0), hm_size,interpolation=cv2.INTER_CUBIC)
return heatmap.transpose(2, 0, 1)
def resize_hm_paf(heatmap, paf, ignore_mask, hm_size):
ignore_mask = cv2.resize(ignore_mask, (hm_size, hm_size))
heatmap = resize_hm(heatmap, hm_size)
paf = paf.transpose(2,3,0,1)
paf = paf.reshape(paf.shape[0], paf.shape[1], paf.shape[2] * paf.shape[3])
paf = cv2.resize(paf, (hm_size, hm_size),interpolation=cv2.INTER_CUBIC)
paf = paf.transpose(2, 0, 1)
return heatmap, paf, ignore_mask
def color_augment(img, ignore_mask, keypoints, color_aug):
for channel in range(img.shape[2]):
img[:, :, channel] = np.clip(img[:, :, channel] * (np.random.random()*color_aug*2 + 1 - color_aug) , 0, 1)
return img, ignore_mask, keypoints
def normalize(img):
img = img[:, :, ::-1]
img = (img - MEAN) / STD
img = img.transpose(2, 0, 1)
return img
def denormalize(img):
img = img.transpose(1, 2, 0)
img = img * STD + MEAN
img = img[:, :, ::-1]
return img
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
# https://github.com/JimmySuen/integral-human-pose/ - Integral pose estimation,
# This paper has very good results on single person pose
def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, t_x, t_y, dst_width, dst_height, scale, rot):
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
# dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_center = np.array([t_x, t_y], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_augment(img, ignore_mask, keypoints, rot_angle, scale_aug_factor):
keypoints_useful = keypoints[keypoints[:,:,2]>0]
if len(keypoints_useful) == 0:
return img, ignore_mask, keypoints
if keypoints_useful.ndim == 2:
keypoints_useful = keypoints_useful.reshape(1, keypoints_useful.shape[0], keypoints_useful.shape[1])
left_lim = keypoints_useful[:,:,0].min() - 32
right_lim = keypoints_useful[:,:,0].max() + 32
top_lim = keypoints_useful[:,:,1].min() - 32
bot_lim = keypoints_useful[:,:,1].max() + 32
c_y = img.shape[0]/2
c_x = img.shape[1]/2
scale_min = max(max(right_lim-c_x, c_x-left_lim)/c_x, max(c_y - top_lim, bot_lim - c_y)/c_y, 1 - scale_aug_factor)
scale_max = min(2 - scale_min, 1 + scale_aug_factor)
scale = (1 + np.clip(np.random.randn(), -1, 1))*(scale_max - scale_min)*0.5 + scale_min
trans = gen_trans_from_patch_cv(c_x, c_y, img.shape[1], img.shape[0], c_x, c_y, img.shape[1], img.shape[0], scale, rot_angle)
img = cv2.warpAffine(img, trans, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
ignore_mask = cv2.warpAffine(ignore_mask, trans, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
affine_trans_keypoints = np.matmul(trans[:,:2], keypoints[:,:,:2].copy().transpose(0,2,1)).transpose(0,2,1)
affine_trans_keypoints = affine_trans_keypoints + trans[:,2]
keypoints[:,:,:2] = affine_trans_keypoints
return img, ignore_mask, keypoints
def rotation(img, keypoints, rot_angle):
if not len(keypoints) == 0:
keypoints = keypoints.reshape(1, keypoints.shape[0], keypoints.shape[1])
if rot_angle in (90, -90, 270, -270):
c_y = img.shape[0]/2
c_x = img.shape[1]/2
t_y = c_x
t_x = c_y
# trans = cv2.getRotationMatrix2D((c_x,c_y),rot_angle,1)
# img = cv2.warpAffine(img,trans,(c_x,c_y))
trans = gen_trans_from_patch_cv(c_x, c_y, img.shape[1], img.shape[0], t_x, t_y, img.shape[1], img.shape[0], 1, rot_angle)
img = cv2.warpAffine(img, trans, (img.shape[0], img.shape[1]), flags=cv2.INTER_LINEAR)
else:
c_y = img.shape[0]/2
c_x = img.shape[1]/2
t_y = c_y
t_x = c_x
trans = gen_trans_from_patch_cv(c_x, c_y, img.shape[1], img.shape[0], t_x, t_y, img.shape[1], img.shape[0], 1, rot_angle)
img = cv2.warpAffine(img, trans, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
if not len(keypoints) == 0:
affine_trans_keypoints = np.matmul(trans[:,:2], keypoints[:,:,:2].copy().transpose(0,2,1)).transpose(0,2,1)
affine_trans_keypoints = affine_trans_keypoints + trans[:,2]
keypoints[:,:,:2] = affine_trans_keypoints
keypoints = keypoints.reshape(keypoints.shape[1], keypoints.shape[2])
return img, keypoints
def image_cropped(img, keypoints):
if len(keypoints) == 0:
return -1,-1
x_points = keypoints[::2]
y_points = keypoints[1::2]
left_lim = int(np.min(x_points[x_points>=0])) - 32
right_lim = int(np.max(x_points[x_points>=0])) + 32
bot_lim = int(np.min(y_points[y_points>=0])) - 32
top_lim = int(np.max(y_points[y_points>=0])) + 32
crop_location = [left_lim, right_lim, bot_lim, top_lim]
cropped_img = img[bot_lim:top_lim, left_lim:right_lim].round(decimals=5)
return crop_location, cropped_img
```
#### File: 20200307PAF/training/train_net.py
```python
import torch
import os
from tqdm import tqdm
from visualization.visualize import visualize_output
def step(data_loader, model, criterion_hm, criterion_paf, to_train=False, optimizer=None, viz_output=False):
if to_train:
model.train()
else:
model.eval()
nIters = len(data_loader)
hm_loss_meter, paf_loss_meter = AverageMeter(), AverageMeter()
with tqdm(total=nIters) as t:
for i, (input_, heatmap, paf, ignore_mask, indices) in enumerate(data_loader):
input_cuda = input_.float().cuda() if torch.cuda.is_available() else input_.float().cpu()
heatmap_t_cuda = heatmap.float().cuda() if torch.cuda.is_available() else heatmap.float().cpu()
paf_t_cuda = paf.float().cuda() if torch.cuda.is_available() else paf.float().cpu()
ignore_mask_cuda = ignore_mask.reshape(ignore_mask.shape[0], 1,
ignore_mask.shape[1], ignore_mask.shape[2])
ignore_mask_cuda = ignore_mask_cuda.float().cuda() if torch.cuda.is_available() else ignore_mask_cuda.float().cpu()
allow_mask = 1 - ignore_mask_cuda
paf_outputs, heatmap_out= model(input_cuda)
loss_hm_total = 0
loss_paf_total = 0
for i in range(len(paf_outputs)):
paf_out = paf_outputs[i]
loss_paf_total += criterion_paf(paf_out * allow_mask, paf_t_cuda * allow_mask)/allow_mask.sum().detach()/heatmap.shape[0]/paf.shape[1]
loss_hm_total += criterion_hm(heatmap_out * allow_mask, heatmap_t_cuda * allow_mask)/allow_mask.sum().detach()/heatmap.shape[0]/heatmap.shape[1]
output = heatmap_out.data.cpu().numpy(), paf_outputs[-1].data.cpu().numpy(), indices.numpy()
if to_train:
loss = loss_hm_total + loss_paf_total
# print('heatmap.shape{}, paf.shape{}, allow_mask.shape{}'.format(heatmap.shape, paf.shape, allow_mask.shape))
# print('data{}, loss_hm{}, loss_paf{}'.format(i, loss_hm_total.item(), loss_paf_total))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if viz_output:
visualize_output(input_.numpy(), heatmap.numpy(), paf.numpy(), ignore_mask.numpy(), output)
hm_loss_meter.update(loss_hm_total.data.cpu().numpy())
paf_loss_meter.update(loss_paf_total.data.cpu().numpy())
t.set_postfix(loss_hm='{:10.8f}'.format(hm_loss_meter.avg), loss_paf='{:10.8f}'.format(paf_loss_meter.avg))
t.update()
return hm_loss_meter.avg, paf_loss_meter.avg
def train_net(train_loader, test_loader, model, criterion_hm, criterion_paf, optimizer,
n_epochs, val_interval, learn_rate, drop_lr, save_dir, viz_output=False):
heatmap_loss_avg, paf_loss_avg = 0.0, 0.0
for epoch in range(1, n_epochs + 1):
step(train_loader, model, criterion_hm, criterion_paf, True, optimizer, viz_output=viz_output)
if epoch % val_interval == 0:
with torch.no_grad():
validate_net(test_loader, model, criterion_hm, criterion_paf, save_dir, epoch, viz_output=viz_output)
# adjust_learning_rate(optimizer, epoch, drop_lr, learn_rate)
return heatmap_loss_avg, paf_loss_avg
def validate_net(test_loader, model, criterion_hm, criterion_paf, save_dir=None, epoch=0, viz_output=False):
heatmap_loss_avg, paf_loss_avg = step(test_loader, model, criterion_hm, criterion_paf, viz_output=viz_output)
if not save_dir is None:
torch.save(model.state_dict(), os.path.join(save_dir, 'model_{}.pth'.format(epoch)))
return heatmap_loss_avg, paf_loss_avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, dropLR, LR):
lr = LR * (0.1 ** (epoch // dropLR))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
``` |
{
"source": "JohnnyHowe/Slow-Engine-Python",
"score": 4
} |
#### File: draw/text/text.py
```python
import pygame
from slowEngine.display import Display
from slowEngine.camera import Camera
def draw_world_text(text, color, position, font_size=1, font_name="arial"):
""" Draw some text in the world (world coordinates).
Args:
text: string to show
color: color to show text as
position: where to center text on
font_size: font size of text
font_name: name of font to use
"""
new_position = Camera.world_to_display_position(position)
draw_screen_text(text, color, new_position, int(font_size * Camera.get_display_size()), font_name)
def draw_screen_text(text, color, position, font_size=20, font_name="arial"):
""" Draw some text on the screen (screen coordinates).
Args:
text: string to show
color: color to show text as
position: where to center text on
font_size: font size of text
font_name: name of font to use
"""
font = pygame.font.SysFont(font_name, font_size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
display_position = (position.x - text_rect.w / 2, position.y - text_rect.h / 2)
Display.surface.blit(text_surface, display_position)
```
#### File: Slow-Engine-Python/sample_programs/player_movement.py
```python
import slowEngine
import pygame
class Game:
player = None
def __init__(self):
self.player = Player()
def run(self):
while True:
self.run_frame()
def run_frame(self):
slowEngine.EventHandler.run()
slowEngine.Display.fill((255, 255, 255))
self.player.show()
slowEngine.Display.update_display()
class Player:
position = None
def __init__(self):
self.position = slowEngine.Vector2(0, 0)
def show(self):
slowEngine.draw.draw_world_circle((0, 255, 0), self.position, 0.5)
slowEngine.draw.draw_world_circle((0, 0, 0), self.position, 0.5, 0.05)
slowEngine.draw.draw_world_text("Hoes", (0, 0, 0), self.position + slowEngine.Vector2(0, 1), 0.5)
if __name__ == "__main__":
Game().run()
``` |
{
"source": "johnnyiller/cluster_funk",
"score": 3
} |
#### File: core/environments/stack_collection.py
```python
class StackCollection:
def __init__(self, client=None, data=None):
super(StackCollection, self).__init__()
if data is None:
paginator = client.get_paginator('describe_stacks')
results = paginator.paginate()
self.list = list()
for result in results:
stacks = result['Stacks']
for stack in stacks:
self.list.append(stack)
else:
self.list = list(data)
def __len__(self):
return len(self.list)
def __getitem__(self, ii):
return self.list[ii]
def __delitem__(self, ii):
del self.list[ii]
def __setitem__(self, ii, val):
self.list[ii] = val
def __str__(self):
return str(self.list)
def insert(self, ii, val):
self.list.insert(ii, val)
def reverse(self):
return self[::-1]
def filter_by(self, func):
filtered = [stack for stack in self.list if func(stack)]
return StackCollection(data=filtered)
@staticmethod
def has_prefix(stack_prefix, stack):
for tag in stack.get('Tags', []):
if tag['Key'] == 'Name' and tag.get(
'Value', "").startswith(stack_prefix):
return True
return False
@staticmethod
def is_cf_stack(stack):
for tag in stack.get('Tags', []):
if tag['Key'] == 'tool' and tag['Value'] == 'cluster_funk':
return True
return False
@staticmethod
def has_env(env, stack):
for tag in stack.get('Tags', []):
if tag['Key'] == 'environment' and tag['Value'] == env:
return True
return False
def output_dict(self):
result = {}
for stack in self:
for output in stack.get("Outputs", []):
result[output.get("OutputKey", "")] = output["OutputValue"]
return result
```
#### File: cluster_funk/tests/conftest.py
```python
import boto3
from moto import mock_emr, mock_s3, mock_cloudformation, mock_ec2
import pytest
import contextlib
import os
import shutil
import tempfile
from mock import patch, MagicMock, create_autospec
from cement.ext.ext_logging import LoggingLogHandler
@contextlib.contextmanager
def cd(newdir, cleanup=lambda: True):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
cleanup()
@contextlib.contextmanager
def tempdir():
dirpath = tempfile.mkdtemp()
def cleanup():
shutil.rmtree(dirpath)
with cd(dirpath, cleanup):
yield dirpath
@pytest.fixture(scope="function")
def tmp(request):
with tempdir() as dirpath:
yield dirpath
@pytest.fixture(scope="function")
def job_collection_data(request):
return [
{
'Id': 'j-djijsllkdk'
},
{
'Id': 'j-djlkjldsjf'
}
]
@pytest.fixture(scope="function")
def cluster_collection_data():
return [
{
'Id': 'i-id1',
'PublicDnsName': 'www.example.com',
'PublicIpAddress': '172.16.58.3',
'Status': {
'State': 'RUNNING'
}
},
{
'Id': 'i-id2',
'PublicDnsName': 'www.example2.com',
'PublicIpAddress': '192.168.127.12',
'Status': {
'State': 'RUNNING'
}
}
]
@pytest.fixture(scope="function")
def stack_collection_data(request):
return [
{
'Id': 's-id1',
'Outputs': [
{
'OutputKey': 'akey',
'OutputValue': 'avalue'
}
]
},
{
'Id': 's-id2',
'Outputs': [
{
'OutputKey': 'akeytwo',
'OutputValue': 'avaluetwo'
}
]
}
]
@pytest.fixture(scope='function')
def cloudformation_client(request):
with mock_cloudformation():
yield boto3.client('cloudformation', region_name='us-east-1')
@pytest.fixture(scope='function')
def s3_client(request):
with mock_s3():
yield boto3.client('s3', region_name='us-east-1')
@pytest.fixture(scope='function')
def emr_client(request):
with mock_emr():
yield boto3.client('emr', region_name='us-east-1')
@pytest.fixture(scope="function")
def emr_cluster(request):
with mock_emr():
emr_client = boto3.client('emr', region_name='us-east-1')
emr_boot_cluster = {
'Name': 'example_cluster',
'LogUri': 's3://somes3bucket',
'Instances': {
'MasterInstanceType': 'c4.large',
'SlaveInstanceType': 'c4.large',
'InstanceCount': 3
},
'Tags': [
{
'Key': 'user_id',
'Value': '123-456'
}
]
}
cluster = emr_client.run_job_flow(**emr_boot_cluster)
step_ids = emr_client.add_job_flow_steps(
JobFlowId=cluster['JobFlowId'],
Steps=[
{
'Name': "example",
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 's3://runner.jar'
}
},
{
'Name': "example 2",
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 's3://runner.jar'
}
}
]
)
yield (emr_client, cluster, step_ids)
@pytest.fixture(scope='function')
def ssh_connection(request):
return MagicMock()
@pytest.fixture(scope='function')
def cluster_instance_params(request):
return {
'Id': 'c-testinstance',
'PublicDnsName': 'my.dns.name',
'PublicIpAddress': '234.543.22.123',
'Status': {
'State': 'RUNNING'
}
}
@pytest.fixture(scope='function')
def mock_uuid(request):
mock = MagicMock(return_value='uuid-thing')
with patch('uuid.uuid4', mock):
yield {'mock': mock, 'uuid': 'uuid-thing'}
@pytest.fixture(scope='function')
def paginated_emr_client(emr_cluster):
client = emr_cluster[0]
m = MagicMock(return_value=client)
page_mock = MagicMock()
page_mock.paginate = MagicMock(return_value=[client.list_clusters()])
m.get_paginator = page_mock
with patch('cluster_funk.controllers.clusters.Clusters._emr_client', m):
yield m
@pytest.fixture(scope='function')
def cluster_instance_mock():
run_cmd_mock = MagicMock(return_value='run_cmd_called')
syncfiles_mock = MagicMock(return_value='syncfiles_cmd_called')
with patch('cluster_funk.core.clusters.cluster_instance.ClusterInstance.run_cmd', run_cmd_mock):
with patch('cluster_funk.core.clusters.cluster_instance.ClusterInstance.syncfiles', syncfiles_mock):
yield {
'run_cmd': run_cmd_mock,
'syncfiles': syncfiles_mock
}
@pytest.fixture(scope='function')
def cluster_list_instances_mock(cluster_collection_data):
mock = MagicMock()
page_mock = MagicMock()
page_mock.paginate.return_value = [{'Instances': cluster_collection_data}]
mock.get_paginator.return_value = page_mock
mock.describe_cluster.return_value = {'Cluster': {'MasterPublicDnsName': 'www.example.com'}}
return MagicMock(return_value=mock)
@pytest.fixture(scope='function')
def ec2_client_mock():
with mock_ec2():
yield boto3.client('ec2', region_name='us-east-1')
``` |
{
"source": "JohnnyIrvin/CommunicateProject",
"score": 2
} |
#### File: connections/interfaces/abstract_socket.py
```python
from abc import ABC, abstractmethod
from starlette.types import Message
class AbstractSocket(ABC):
"""Abstract class for a socket."""
@abstractmethod
async def send(self, message: Message) -> None:
"""
Send a message to the socket.
Args:
message (Message): The message to send.
"""
pass
@abstractmethod
async def receive(self) -> Message:
"""
Receive data from the socket.
Returns:
Message: The message received.
"""
pass
@abstractmethod
async def accept(self) -> None:
"""
Accept a connection.
Args:
subprotocol (str): The subprotocol to use.
"""
pass
@abstractmethod
async def close(self):
"""Close the socket."""
pass
``` |
{
"source": "JohnnyIrvin/hungry-task",
"score": 2
} |
#### File: cli/integration/test_cli.py
```python
from typing import List, Tuple
import pytest
from cli import cli
task_names: List[str] = ['Read a book', 'Eat Popcorn', 'Go to sleep']
repo_names: List[str] = ['csv']
repo_tasks_tuples: List[Tuple[str, str]] = [(repo, name) for repo in repo_names for name in task_names]
def test_invoke_cli(runner):
result = runner.invoke(cli)
assert result.exit_code == 0
assert 'Usage' in result.output
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_add_task_with_name(repo: str, name: str, runner):
result = runner.invoke(cli, [repo, 'add', name])
assert 'Task added' in result.output
assert name in result.output
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_remove_task_with_name(repo: str, name: str, runner):
runner.invoke(cli, [repo, 'add', name])
result = runner.invoke(cli, [repo, 'remove', name])
assert 'Task removed' in result.output
assert name in result.output
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_list_added_tasks(repo: str, name: str, runner):
runner.invoke(cli, [repo, 'add', name])
result = runner.invoke(cli, [repo, 'list'])
assert name in result.output
runner.invoke(cli, [repo, 'remove', name])
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_list_added_tasks(repo: str, name: str, runner):
runner.invoke(cli, [repo, 'add', name])
result = runner.invoke(cli, [repo, 'list'])
assert name in result.output
runner.invoke(cli, [repo, 'remove', name])
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_get_task_with_name(repo: str, name: str, runner):
runner.invoke(cli, [repo, 'add', name])
result = runner.invoke(cli, [repo, 'get', name])
assert f'[ ] - {name}' in result.output
runner.invoke(cli, [repo, 'remove', name])
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_task_not_found(repo: str, name: str, runner):
result = runner.invoke(cli, [repo, 'get', name])
assert f"Task '{name}' not found" in result.output
@pytest.mark.parametrize(('repo', 'name'), repo_tasks_tuples)
def test_complete_task(repo: str, name: str, runner):
runner.invoke(cli, [repo, 'add', name])
result = runner.invoke(cli, [repo, 'complete', name])
assert f"Task '{name}' completed" in result.output
assert name in result.output
runner.invoke(cli, [repo, 'remove', name])
```
#### File: domain/seedwork/abstract_repository.py
```python
from abc import ABC, abstractmethod
from typing import List
from viking.domain.seedwork.entity import Entity
class AbstractRepository(ABC):
@abstractmethod
def add(self, entity: Entity):
"""
Add an entity to the repository.
Args:
entity (Entity): The entity to add.
"""
@abstractmethod
def get(self, reference) -> Entity:
"""
Get an entity from the repository.
Args:
reference: The reference of the entity to get.
Returns:
Entity: The entity.
"""
@abstractmethod
def remove(self, entity: Entity) -> Entity:
"""
Remove an entity from the repository.
Args:
entity (Entity): The entity to remove.
Returns:
Entity: The entity.
"""
@abstractmethod
def list(self) -> List[Entity]:
"""
List all entities in the repository.
Returns:
list: The list of entities.
"""
```
#### File: viking/fakes/fake_repository.py
```python
from typing import Dict, List
from uuid import UUID
from viking.domain.seedwork.abstract_repository import AbstractRepository
from viking.domain.seedwork.entity import Entity
class FakeRepository(AbstractRepository):
"""
Fake repository for testing purposes.
"""
_entities: Dict[UUID, Entity]
def __init__(self, entities: Dict[UUID, Entity] = None):
"""
Constructor.
Args:
entities (Dict[UUID, Entity]): The entities to add to the repository.
"""
if entities is None:
entities = dict()
self._entities = entities
def add(self, entity: Entity):
"""
Add an entity to the repository.
Args:
entity (Entity): The entity to add.
"""
self._entities[entity.id] = entity
def get(self, reference: UUID) -> Entity:
"""
Get an entity from the repository.
Args:
reference (UUID): The reference of the entity to get.
Returns:
Entity: The entity.
"""
return self._entities.get(reference)
def remove(self, entity: Entity) -> Entity:
"""
Remove an entity from the repository.
Args:
entity (Entity): The entity to remove.
Returns:
Entity: The entity.
"""
return self._entities.pop(entity.id)
def list(self) -> List[Entity]:
"""
List all entities in the repository.
Returns:
list: The list of entities.
"""
return list(self._entities.values())
@property
def count(self) -> int:
"""
Get the number of entities in the repository.
Returns:
int: The number of entities.
"""
return len(self._entities)
``` |
{
"source": "johnnyjacob/neomodel",
"score": 2
} |
#### File: neomodel/neomodel/util.py
```python
import logging
import os
import sys
import time
import warnings
import core as core
from threading import local
from neo4j import GraphDatabase, basic_auth, CypherError, SessionError
from neo4j.types.graph import Node
from . import config
from .exceptions import UniqueProperty, ConstraintValidationFailed, ModelDefinitionMismatch
if sys.version_info >= (3, 0):
from urllib.parse import urlparse
else:
from urlparse import urlparse # noqa
logger = logging.getLogger(__name__)
# make sure the connection url has been set prior to executing the wrapped function
def ensure_connection(func):
def wrapper(self, *args, **kwargs):
# Sort out where to find url
if hasattr(self, 'db'):
_db = self.db
else:
_db = self
if not _db.url:
_db.set_connection(config.DATABASE_URL)
return func(self, *args, **kwargs)
return wrapper
def change_neo4j_password(db, new_password):
db.cypher_query("CALL dbms.changePassword($password)", {'password': <PASSWORD>})
def clear_neo4j_database(db, clear_constraints=False, clear_indexes=False):
db.cypher_query("MATCH (a) DETACH DELETE a")
if clear_constraints:
core.drop_constraints()
if clear_indexes:
core.drop_indexes()
class NodeClassRegistry:
"""
A singleton class via which all instances share the same Node Class Registry.
"""
# Maintains a lookup directory that is used by cypher_query
# to infer which class to instantiate by examining the labels of the
# node in the resultset.
# _NODE_CLASS_REGISTRY is populated automatically by the constructor
# of the NodeMeta type.
_NODE_CLASS_REGISTRY = {}
def __init__(self):
self.__dict__['_NODE_CLASS_REGISTRY'] = self._NODE_CLASS_REGISTRY
def __str__(self):
ncr_items = list(map(lambda x: "{} --> {}".format(",".join(x[0]), x[1]),
self._NODE_CLASS_REGISTRY.items()))
return "\n".join(ncr_items)
class Database(local, NodeClassRegistry):
"""
A singleton object via which all operations from neomodel to the Neo4j backend are handled with.
"""
def __init__(self):
"""
"""
self._active_transaction = None
self.url = None
self.driver = None
self._pid = None
def set_connection(self, url):
"""
Sets the connection URL to the address a Neo4j server is set up at
"""
u = urlparse(url)
if u.netloc.find('@') > -1 and (u.scheme == 'bolt' or u.scheme == 'bolt+routing' or u.scheme == 'neo4j'):
credentials, hostname = u.netloc.rsplit('@', 1)
username, password, = credentials.split(':')
else:
raise ValueError("Expecting url format: bolt://user:password@localhost:7687"
" got {0}".format(url))
self.driver = GraphDatabase.driver(u.scheme + '://' + hostname,
auth=basic_auth(username, password),
encrypted=config.ENCRYPTED_CONNECTION,
max_pool_size=config.MAX_POOL_SIZE)
self.url = url
self._pid = os.getpid()
self._active_transaction = None
@property
def transaction(self):
"""
Returns the current transaction object
"""
return TransactionProxy(self)
@property
def write_transaction(self):
return TransactionProxy(self, access_mode="WRITE")
@property
def read_transaction(self):
return TransactionProxy(self, access_mode="READ")
@ensure_connection
def begin(self, access_mode=None):
"""
Begins a new transaction, raises SystemError exception if a transaction is in progress
"""
if self._active_transaction:
raise SystemError("Transaction in progress")
self._active_transaction = self.driver.session(access_mode=access_mode).begin_transaction()
@ensure_connection
def commit(self):
"""
Commits the current transaction
"""
r = self._active_transaction.commit()
self._active_transaction = None
return r
@ensure_connection
def rollback(self):
"""
Rolls back the current transaction
"""
self._active_transaction.rollback()
self._active_transaction = None
def _object_resolution(self, result_list):
"""
Performs in place automatic object resolution on a set of results
returned by cypher_query.
The function operates recursively in order to be able to resolve Nodes
within nested list structures. Not meant to be called directly,
used primarily by cypher_query.
:param result_list: A list of results as returned by cypher_query.
:type list:
:return: A list of instantiated objects.
"""
# Object resolution occurs in-place
for a_result_item in enumerate(result_list):
for a_result_attribute in enumerate(a_result_item[1]):
try:
# Primitive types should remain primitive types,
# Nodes to be resolved to native objects
resolved_object = a_result_attribute[1]
if type(a_result_attribute[1]) is Node:
resolved_object = self._NODE_CLASS_REGISTRY[frozenset(a_result_attribute[1].labels)].inflate(
a_result_attribute[1])
if type(a_result_attribute[1]) is list:
resolved_object = self._object_resolution([a_result_attribute[1]])
result_list[a_result_item[0]][a_result_attribute[0]] = resolved_object
except KeyError:
# Not being able to match the label set of a node with a known object results
# in a KeyError in the internal dictionary used for resolution. If it is impossible
# to match, then raise an exception with more details about the error.
raise ModelDefinitionMismatch(a_result_attribute[1], self._NODE_CLASS_REGISTRY)
return result_list
@ensure_connection
def cypher_query(self, query, params=None, handle_unique=True, retry_on_session_expire=False, resolve_objects=False):
"""
Runs a query on the database and returns a list of results and their headers.
:param query: A CYPHER query
:type: str
:param params: Dictionary of parameters
:type: dict
:param handle_unique: Whether or not to raise UniqueProperty exception on Cypher's ConstraintValidation errors
:type: bool
:param retry_on_session_expire: Whether or not to attempt the same query again if the transaction has expired
:type: bool
:param resolve_objects: Whether to attempt to resolve the returned nodes to data model objects automatically
:type: bool
"""
if self._pid != os.getpid():
self.set_connection(self.url)
if self._active_transaction:
session = self._active_transaction
else:
session = self.driver.session()
try:
# Retrieve the data
start = time.time()
response = session.run(query, params)
results, meta = [list(r.values()) for r in response], response.keys()
end = time.time()
if resolve_objects:
# Do any automatic resolution required
results = self._object_resolution(results)
except CypherError as ce:
if ce.code == u'Neo.ClientError.Schema.ConstraintValidationFailed':
if 'already exists with label' in ce.message and handle_unique:
raise UniqueProperty(ce.message)
raise ConstraintValidationFailed(ce.message)
else:
exc_info = sys.exc_info()
if sys.version_info >= (3, 0):
raise exc_info[1].with_traceback(exc_info[2])
else:
raise exc_info[1]
except SessionError:
if retry_on_session_expire:
self.set_connection(self.url)
return self.cypher_query(query=query,
params=params,
handle_unique=handle_unique,
retry_on_session_expire=False)
raise
if os.environ.get('NEOMODEL_CYPHER_DEBUG', False):
logger.debug("query: " + query + "\nparams: " + repr(params) + "\ntook: {:.2g}s\n".format(end - start))
return results, meta
class TransactionProxy(object):
def __init__(self, db, access_mode=None):
self.db = db
self.access_mode = access_mode
@ensure_connection
def __enter__(self):
self.db.begin(access_mode=self.access_mode)
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
self.db.rollback()
if exc_type is CypherError:
if exc_value.code == u'Neo.ClientError.Schema.ConstraintValidationFailed':
raise UniqueProperty(exc_value.message)
if not exc_value:
self.db.commit()
def __call__(self, func):
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
def deprecated(message):
def f__(f):
def f_(*args, **kwargs):
warnings.warn(message, category=DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
f_.__name__ = f.__name__
f_.__doc__ = f.__doc__
f_.__dict__.update(f.__dict__)
return f_
return f__
def classproperty(f):
class cpf(object):
def __init__(self, getter):
self.getter = getter
def __get__(self, obj, type=None):
return self.getter(type)
return cpf(f)
# Just used for error messages
class _UnsavedNode(object):
def __repr__(self):
return '<unsaved node>'
def __str__(self):
return self.__repr__()
def _get_node_properties(node):
"""Get the properties from a neo4j.v1.types.graph.Node object."""
# 1.6.x and newer have it as `_properties`
if hasattr(node, '_properties'):
return node._properties
# 1.5.x and older have it as `properties`
else:
return node.properties
``` |
{
"source": "JohnnyJTH/Carberretta",
"score": 2
} |
#### File: bot/cogs/links.py
```python
import discord
from discord.ext import commands
class Links(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_ready(self) -> None:
if not self.bot.ready.booted:
self.bot.ready.up(self)
@commands.command(name="youtube")
async def command_youtube(self, ctx: commands.Context) -> None:
await ctx.send("<https://youtube.carberra.xyz>")
@commands.command(name="twitch")
async def command_twitch(self, ctx: commands.Context) -> None:
await ctx.send("<https://twitch.carberra.xyz>")
@commands.command(name="lbry")
async def command_lbry(self, ctx: commands.Context) -> None:
await ctx.send("<https://lbry.carberra.xyz>")
@commands.command(name="patreon")
async def command_patreon(self, ctx: commands.Context) -> None:
await ctx.send("<https://patreon.carberra.xyz>")
@commands.command(name="twitter")
async def command_twitter(self, ctx: commands.Context) -> None:
await ctx.send("<https://twitter.carberra.xyz>")
@commands.command(name="facebook")
async def command_facebook(self, ctx: commands.Context) -> None:
await ctx.send("<https://facebook.carberra.xyz>")
@commands.command(name="github")
async def command_github(self, ctx: commands.Context) -> None:
await ctx.send("<https://github.carberra.xyz>")
@commands.command(name="donate")
async def command_donate(self, ctx: commands.Context) -> None:
await ctx.send("<https://donate.carberra.xyz>")
@commands.command(name="plans")
async def command_plans(self, ctx: commands.Context) -> None:
await ctx.send("<https://plans.carberra.xyz>")
@commands.command(name="docs")
async def command_docs(self, ctx: commands.Context) -> None:
await ctx.send("<https://docs.carberra.xyz>")
@commands.command(name="pep")
async def command_pep(self, ctx: commands.Context, pep_number: int) -> None:
async with self.bot.session.get(f"https://python.org/dev/peps/pep-{pep_number:04}") as response:
if not 200 <= response.status <= 299:
await ctx.send(f"PEP {pep_number:04} could not be found.")
return
await ctx.send(f"PEP {pep_number:04}: <https://python.org/dev/peps/pep-{pep_number:04}>")
@commands.command(name="google", aliases=['lmgt', 'lmgtfy'])
async def command_google(self, ctx: commands.Context, *, query: str) -> None:
if len(query) > 500:
return await ctx.send("Your query should be no longer than 500 characters.")
await ctx.send(f"<https://letmegooglethat.com/?q={query.replace(' ', '+'}>")
@commands.command(name="google", aliases=["lmgt", "lmgtfy"])
async def command_google(self, ctx: commands.Context, *, query: str) -> None:
if len(query) > 500:
return await ctx.send("Your query should be no longer than 500 characters.")
await ctx.send(f"<https://letmegooglethat.com/?q={query.replace(' ', '+')}>")
def setup(bot: commands.Bot) -> None:
bot.add_cog(Links(bot))
``` |
{
"source": "JohnnyJTH/motor",
"score": 2
} |
#### File: test/tornado_tests/test_motor_web.py
```python
import datetime
import email
import hashlib
import time
import re
import unittest
import gridfs
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application
import motor
import motor.web
from motor.motor_gridfs import _hash_gridout
import test
from test.test_environment import env, CA_PEM, CLIENT_PEM
# We're using Tornado's AsyncHTTPTestCase instead of our own MotorTestCase for
# the convenience of self.fetch().
class GridFSHandlerTestBase(AsyncHTTPTestCase):
def setUp(self):
super().setUp()
self.fs = gridfs.GridFS(test.env.sync_cx.motor_test)
# Make a 500k file in GridFS with filename 'foo'
self.contents = b'Jesse' * 100 * 1024
# Record when we created the file, to check the Last-Modified header
self.put_start = datetime.datetime.utcnow().replace(microsecond=0)
file_id = 'id'
self.file_id = file_id
self.fs.delete(self.file_id)
self.fs.put(
self.contents, _id=file_id, filename='foo', content_type='my type')
item = self.fs.get(file_id)
self.contents_hash = _hash_gridout(item)
self.put_end = datetime.datetime.utcnow().replace(microsecond=0)
self.assertTrue(self.fs.get_last_version('foo'))
def motor_db(self, **kwargs):
if env.mongod_started_with_ssl:
kwargs.setdefault('tlsCAFile', CA_PEM)
kwargs.setdefault('tlsCertificateKeyFile', CLIENT_PEM)
kwargs.setdefault('tls', env.mongod_started_with_ssl)
client = motor.MotorClient(
test.env.uri,
io_loop=self.io_loop,
**kwargs)
return client.motor_test
def tearDown(self):
self.fs.delete(self.file_id)
super().tearDown()
def get_app(self):
return Application([
('/(.+)', motor.web.GridFSHandler, {'database': self.motor_db()})])
def stop(self, *args, **kwargs):
# A stop() method more permissive about the number of its positional
# arguments than AsyncHTTPTestCase.stop
if len(args) == 1:
AsyncHTTPTestCase.stop(self, args[0], **kwargs)
else:
AsyncHTTPTestCase.stop(self, args, **kwargs)
def parse_date(self, d):
date_tuple = email.utils.parsedate(d)
return datetime.datetime.fromtimestamp(time.mktime(date_tuple))
def last_mod(self, response):
"""Parse the 'Last-Modified' header from an HTTP response into a
datetime.
"""
return self.parse_date(response.headers['Last-Modified'])
def expires(self, response):
return self.parse_date(response.headers['Expires'])
class GridFSHandlerTest(GridFSHandlerTestBase):
def test_basic(self):
# First request
response = self.fetch('/foo')
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
self.assertEqual(
len(self.contents), int(response.headers['Content-Length']))
self.assertEqual('my type', response.headers['Content-Type'])
self.assertEqual('public', response.headers['Cache-Control'])
self.assertTrue('Expires' not in response.headers)
etag = response.headers['Etag']
last_mod_dt = self.last_mod(response)
self.assertEqual(self.contents_hash, etag.strip('"'))
self.assertTrue(self.put_start <= last_mod_dt <= self.put_end)
# Now check we get 304 NOT MODIFIED responses as appropriate
for ims_value in (
last_mod_dt,
last_mod_dt + datetime.timedelta(seconds=1)
):
response = self.fetch('/foo', if_modified_since=ims_value)
self.assertEqual(304, response.code)
self.assertEqual(b'', response.body)
# If-Modified-Since in the past, get whole response back
response = self.fetch(
'/foo',
if_modified_since=last_mod_dt - datetime.timedelta(seconds=1))
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
# Matching Etag
response = self.fetch('/foo', headers={'If-None-Match': etag})
self.assertEqual(304, response.code)
self.assertEqual(b'', response.body)
# Mismatched Etag
response = self.fetch('/foo', headers={'If-None-Match': etag + 'a'})
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
def test_404(self):
response = self.fetch('/bar')
self.assertEqual(404, response.code)
def test_head(self):
response = self.fetch('/foo', method='HEAD')
# Get Etag and parse Last-Modified into a datetime
etag = response.headers['Etag']
last_mod_dt = self.last_mod(response)
# Test the result
self.assertEqual(200, response.code)
self.assertEqual(b'', response.body) # Empty body for HEAD request
self.assertEqual(
len(self.contents), int(response.headers['Content-Length']))
self.assertEqual('my type', response.headers['Content-Type'])
self.assertEqual(self.contents_hash, etag.strip('"'))
self.assertTrue(self.put_start <= last_mod_dt <= self.put_end)
self.assertEqual('public', response.headers['Cache-Control'])
def test_content_type(self):
# Check that GridFSHandler uses file extension to guess Content-Type
# if not provided
for filename, expected_type in [
('foo.jpg', 'jpeg'),
('foo.png', 'png'),
('ht.html', 'html'),
]:
# 'fs' is PyMongo's blocking GridFS
self.fs.put(b'', filename=filename)
for method in 'GET', 'HEAD':
response = self.fetch('/' + filename, method=method)
self.assertEqual(200, response.code)
# mimetypes are platform-defined, be fuzzy
self.assertIn(
expected_type,
response.headers['Content-Type'].lower())
class TZAwareGridFSHandlerTest(GridFSHandlerTestBase):
def motor_db(self):
return super().motor_db(tz_aware=True)
def test_tz_aware(self):
now = datetime.datetime.utcnow()
ago = now - datetime.timedelta(minutes=10)
hence = now + datetime.timedelta(minutes=10)
response = self.fetch('/foo', if_modified_since=ago)
self.assertEqual(200, response.code)
response = self.fetch('/foo', if_modified_since=hence)
self.assertEqual(304, response.code)
class CustomGridFSHandlerTest(GridFSHandlerTestBase):
def get_app(self):
class CustomGridFSHandler(motor.web.GridFSHandler):
def get_gridfs_file(self, bucket, filename, request):
# Test overriding the get_gridfs_file() method, path is
# interpreted as file_id instead of filename.
return bucket.open_download_stream(file_id=filename)
def get_cache_time(self, path, modified, mime_type):
return 10
def set_extra_headers(self, path, gridout):
self.set_header('quux', 'fizzledy')
return Application([
('/(.+)', CustomGridFSHandler, {'database': self.motor_db()})])
def test_get_gridfs_file(self):
# We overrode get_gridfs_file so we expect getting by filename *not* to
# work now; we'll get a 404. We have to get by file_id now.
response = self.fetch('/foo')
self.assertEqual(404, response.code)
response = self.fetch('/' + str(self.file_id))
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
cache_control = response.headers['Cache-Control']
self.assertTrue(re.match(r'max-age=\d+', cache_control))
self.assertEqual(10, int(cache_control.split('=')[1]))
expires = self.expires(response)
# It should expire about 10 seconds from now
self.assertTrue(
datetime.timedelta(seconds=8)
< expires - datetime.datetime.utcnow()
< datetime.timedelta(seconds=12))
self.assertEqual('fizzledy', response.headers['quux'])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnnyKaime/CloudComputing",
"score": 3
} |
#### File: JohnnyKaime/CloudComputing/s1.py
```python
import pubnub
import time
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub, SubscribeListener
messages = []
name = input("Greetings, enter your ID: ")
pnconfig = PNConfiguration()
pnconfig.subscribe_key = "sub-c-0f1e68a2-0464-11ea-b6a6-32c7c2eb6eff"
pnconfig.publish_key = "pub-c-a516a8bf-4e77-4cf5-a454-bc44137ade7f"
pnconfig.uuid = name
#pnconfig.ssl = False
pubnub = PubNub(pnconfig)
class DatabaseSync(SubscribeListener):
Data = None
count = 0
def goAhead():
#iteration = int(input("How many bet request do you want? "))
constraints = messages[-1].split()
iteration = 3
for i in range(iteration):
amount = int(input("Enter loan amount: "))
#for i in messages[-1]:
#print(i)
while( (amount > int(constraints[0])) or (amount < 1)):
print("Amount invalid")
amount = int(input("Enter loan amount: "))
interestRate = int(input("Enter desired interest rate: "))
while((interestRate > int(constraints[1])) or (interestRate < 1)):
print("Interest rate invalid")
interestRate = int(input("Enter desired interest rate: "))
year = int(input("Enter loan repay period in years: "))
while((year > int(constraints[2])) or (year < 1)):
print("Repay period in years invalid")
year = int(input("Enter loan repay period in years: "))
print("End of request\n")
requestLoans.append([pnconfig.uuid,amount,interestRate,year])
pubnub.publish().channel("Demo.2").message([requestLoans[i][0],requestLoans[i][1],requestLoans[i][2],requestLoans[i][3]]).pn_async(show)
#End of Subscriber action
pubnub.publish().channel("Demo.2").message("End").pn_async(show)
def checkGoAhead(message):
if "End" in message:
DatabaseSync.goAhead()
else:
print(message)
global messages
messages.append(message)
def message(self, pubnub, data):
self.Data = data
DatabaseSync.checkGoAhead(data.message)
#Shows message and status
def show(msg, stat):
if msg and stat: pass#print( "\n",msg.timetoken, stat.status_code )
else : print( "Error", stat and stat.status_code )
sync = DatabaseSync()
pubnub.add_listener(sync)
pubnub.subscribe().channels("Demo.1").execute()
requestLoans = []
``` |
{
"source": "JohnnyKaime/Epic7_Shop_Refresh",
"score": 3
} |
#### File: JohnnyKaime/Epic7_Shop_Refresh/Test.py
```python
import win32gui, win32api, win32con, pyautogui, sys, time, keyboard, time
#def click(x,y):
#win32gui.SetCursorPos((x,y))
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
#time.sleep(0.4)
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
def checkBookmarks():
Mystic_pos=pyautogui.locateOnScreen('Pics\\mystic.png',grayscale=False,confidence=0.7)
Coven_pos=pyautogui.locateOnScreen('Pics\\covenant.png',grayscale=False,confidence=0.7)
if Mystic_pos:
Mystic_point=pyautogui.center(Mystic_pos)
x = pyautogui.locateOnScreen('Pics\\mystic.png')[0]
y = pyautogui.locateOnScreen('Pics\\Buy_button_Mystic.png')[1]
pyautogui.click(x+800,y+50)
pyautogui.click(pyautogui.locateOnScreen('Pics\\mystic.png'))
if Coven_pos:
Coven_point=pyautogui.center(Coven_pos)
x = pyautogui.locateOnScreen('Pics\\covenant.png')[0]
y = pyautogui.locateOnScreen('Pics\\covenant.png')[1]
pyautogui.click(x+800,y+50)
pyautogui.click(pyautogui.locateOnScreen('Pics\\Buy_button_Covenant.png'))
def scroll():
pyautogui.scroll(-5, x=1263, y=590)
time.sleep(3)
pyautogui.click(x=1263,y=590)
def Refresh():
while keyboard.is_pressed('q') == False:
RB_pos=pyautogui.locateOnScreen('Pics\\refresh_button.png', grayscale=False,confidence=0.7)
checkBookmarks()
scroll()
checkBookmarks()
pyautogui.click(RB_pos)
def FindEmulator():
hwnd = win32gui.FindWindow(None, "Epic 7")
try:
if not hwnd:
emuName = input("Enter name of the Epic 7 emulator. ")
hwnd = win32gui.FindWindow(None, emuName)
if not hwnd:
raise ValueError()
return hwnd
except ValueError:
print("%s application not found" % emuName)
sys.exit()
def FindSizeOfEmulator(hwnd):
rect = win32gui.GetWindowRect(hwnd)
x = rect[0]
y = rect[1]
w = rect[2] - x
h = rect[3] - y
print("Application Name: %s" % win32gui.GetWindowText(hwnd))
print("\tLocation: (%d, %d)" % (x, y))
print("\t Size: (%d, %d)" % (w, h))
def main():
bookmarks,mystics,refreshes = 0,0,0
targetWindow = FindEmulator()
FindSizeOfEmulator(targetWindow)
if __name__ == '__main__':
main()
``` |
{
"source": "Johnny-Ko/GunDetector",
"score": 3
} |
#### File: Data/Model_Weights/Wget_Download_Weights.py
```python
import requests
import os
import wget
#create this bar_progress method which is invoked automatically from wget
def bar_progress(current, total, width=80):
progress_message = "Downloading: %d%% [%d / %d] bytes" % (current / total * 100, current, total)
# Don't use print() as it will print in new line every time.
sys.stdout.write("\r" + progress_message)
sys.stdout.flush()
def wget_file(weight_file_name, destination):
fileURL = 'https://pjreddie.com/media/files/' + weight_file_name
fileURL
filename = wget.download(fileURL, out=destination, bar=bar_progress)
if __name__ == "__main__":
import sys
if len(sys.argv) is not 3:
print("Usage: python wget_download_Weights.py weight_file destination_folder")
else:
# TAKE ID FROM SHAREABLE LINK
weight_file_name = sys.argv[1]
# DESTINATION FILE ON YOUR DISK
destination = sys.argv[2]
wget_file(weight_file_name, destination)
``` |
{
"source": "johnnykoo84/ds-bw",
"score": 4
} |
#### File: app/api/features.py
```python
from fastapi import APIRouter, HTTPException
import pandas as pd
import plotly.express as px
from .spotify import *
router = APIRouter()
@router.get('/features/{enter_song_here}')
async def features(enter_song_here: str):
"""
## How to use -
* First click on "try it out."
* Type in a Favorite Song Title of yours below to get back that song's ID and features.
* For instance "Piano Man" (minus the quotes).
* (Some songs have the same name so you might get back another artist who wrote a song with the same title).
* Then copy that song ID (without quotes) to enter into the predict input below.
* Predict will then use your ID to recommend 10 similar songs.
## Path Parameter -
* `enter_song_here`: Type in song by name here.
"""
keys = ['acousticness',
'danceability',
'duration_ms',
'energy',
'instrumentalness',
'key',
'liveness',
'loudness',
'mode',
'speechiness',
'tempo',
'time_signature',
'valence']
spotify = SpotifyAPI(client_id, client_secret)
track1 = spotify.search(enter_song_here, search_type="track")
songID = track1["tracks"]["items"][0]["id"]
features = spotify.get_features(songID)
name = track1["tracks"]["items"][0]["name"]
artist = spotify.artist_from_track_id(songID)
artistname = artist["album"]["artists"][0]["name"]
select_features = {x:features[x] for x in keys}
return {
'song_id': songID,
'song_name': name,
'artist_name': artistname,
'features': select_features
}
``` |
{
"source": "johnnykoo84/lambdata",
"score": 2
} |
#### File: my_lambdata/test/test_my_mode.py
```python
from .. import my_mode
class TestClass():
def test_one(self):
assert my_mode.enlarge(10) != 100
``` |
{
"source": "johnnykwan21/openrice_hk_crawler-",
"score": 3
} |
#### File: openrice_hk_crawler-/openrice_spider/pipelines.py
```python
import codecs
from datetime import datetime
from scrapy.exporters import JsonItemExporter
class OpenriceRestaurantPipeline(object):
def __init__(self):
super().__init__()
file_name_prefix = 'restaurant_data'
spider_date = datetime.now().strftime('%Y%m%d')
start_time = datetime.today().strftime("%H%M")
file_name = "_".join([file_name_prefix, str(spider_date), str(start_time)])
self.file = codecs.open(filename="{}.json".format(file_name), mode="wb")
self.exporter = JsonItemExporter(self.file, encoding="utf-8", ensure_ascii=False)
self.exporter.start_exporting()
self.shorten_url_list = set()
def process_item(self, item, spider):
if item['shorten_url'] in self.shorten_url_list:
pass
else:
self.shorten_url_list.add(item['shorten_url'])
self.exporter.export_item(item)
print("\n**** Scrapped: " + str(len(self.shorten_url_list)) + " restaurant ****")
print(item)
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
``` |
{
"source": "johnnykwwang/Halite-III",
"score": 2
} |
#### File: alembic/versions/201807052103_5aaeafd07224_.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5aaeafd07224'
down_revision = None
branch_labels = None
depends_on = None
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('leagues')
op.drop_table('user_badge')
op.drop_table('badge')
op.drop_table('hackathon_snapshot')
op.drop_table('hackathon_participant')
op.drop_table('hackathon')
op.drop_table('game_participant')
op.drop_table('game_bot_stat')
op.drop_table('game_view_stat')
op.drop_table('game_stat')
op.drop_table('game')
op.drop_table('bot_history')
op.drop_table('bot')
op.drop_table('challenge_participant')
op.drop_table('challenge')
op.drop_table('user_tier_history')
op.drop_table('user_notification')
op.drop_table('user')
op.drop_table('organization_email_domain')
op.drop_table('organization')
op.drop_table('halite_1_user')
sa.Enum(name='halite_1_user_level').drop(op.get_bind())
sa.Enum(name='organization_kind').drop(op.get_bind())
sa.Enum(name='user_player_level').drop(op.get_bind())
sa.Enum(name='challenge_status').drop(op.get_bind())
sa.Enum(name='bot_compile_status').drop(op.get_bind())
sa.Enum(name='user_notification_mood').drop(op.get_bind())
# ### end Alembic commands ###
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('halite_1_user',
sa.Column('userID', sa.Integer(), nullable=False),
sa.Column('oauthID', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('oauthProvider', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('username', sa.String(length=32), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('isRunning', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('compileStatus', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('organization', sa.String(length=64), nullable=False),
sa.Column('language', sa.String(length=16), nullable=True),
sa.Column('mu', sa.Float(), server_default=sa.text("'25'"), nullable=False),
sa.Column('sigma', sa.Float(), server_default=sa.text("'8.333'"), nullable=False),
sa.Column('rank', sa.Float(), autoincrement=False, nullable=True),
sa.Column('numSubmissions', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('numGames', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('creationTime', sa.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.Column('updateTime', sa.TIMESTAMP(), nullable=True),
sa.Column('onEmailList', sa.Boolean(), server_default=sa.text("'1'"), autoincrement=False, nullable=False),
sa.Column('githubEmail', sa.String(length=64), nullable=True),
sa.Column('verificationCode', sa.String(length=64), nullable=True),
sa.Column('isEmailGood', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('level', sa.Enum('High School', 'Undergraduate', 'Graduate', 'Professional', name='halite_1_user_level'), server_default=sa.text("'Professional'"), nullable=False),
sa.PrimaryKeyConstraint('userID'),
)
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('organization_name', sa.Unicode(length=64), nullable=False),
sa.Column('kind', sa.Enum('High School', 'University', 'Professional School', 'Company', 'Other', name='organization_kind'), server_default=sa.text("'Other'"), nullable=False),
sa.Column('verification_code', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('organization_email_domain',
sa.Column('organization_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('domain', sa.Unicode(length=255), nullable=False),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], name='organization_email_domain_ibfk_1', ondelete="CASCADE"),
sa.PrimaryKeyConstraint('organization_id', 'domain'),
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('oauth_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('oauth_provider', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('username', sa.Unicode(length=40), nullable=False),
sa.Column('email', sa.Unicode(length=320), nullable=True),
sa.Column('github_email', sa.Unicode(length=320), nullable=True),
sa.Column('verification_code', sa.String(length=64), nullable=True),
sa.Column('is_active', sa.Boolean(), server_default=sa.text("'1'"), autoincrement=False, nullable=False),
sa.Column('on_email_list', sa.Boolean(), server_default=sa.text("'1'"), autoincrement=False, nullable=False),
sa.Column('is_email_good', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('player_level', sa.Enum('High School', 'University', 'Professional', name='user_player_level'), server_default=sa.text("'Professional'"), nullable=False),
sa.Column('organization_id', sa.Integer(), autoincrement=False, nullable=True),
sa.Column('country_code', sa.String(length=3), nullable=True),
sa.Column('country_subdivision_code', sa.String(length=10), nullable=True),
sa.Column('creation_time', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.Column('update_time', sa.TIMESTAMP(timezone=True), nullable=True),
sa.Column('api_key_hash', sa.String(length=255), nullable=True),
sa.Column('is_admin', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=True),
sa.Column('is_gpu_enabled', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], name='user_ibfk_1'),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('challenge',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('finished', sa.TIMESTAMP(timezone=True), nullable=True),
sa.Column('num_games', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('status', sa.Enum('created', 'playing_game', 'finished', name='challenge_status'), server_default=sa.text("'created'"), nullable=False),
sa.Column('most_recent_game_task', sa.TIMESTAMP(timezone=True), nullable=True),
sa.Column('issuer', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('winner', sa.Integer(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['issuer'], ['user.id'], name='challenge_issuer_fk', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['winner'], ['user.id'], name='challenge_winner_fk', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('challenge_participant',
sa.Column('challenge_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('points', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['challenge_id'], ['challenge.id'], name='challenge_participant_fk', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='challenge_participant_ibfk_2', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('challenge_id', 'user_id'),
)
op.create_table('bot',
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('compile_status', sa.Enum('Uploaded', 'InProgress', 'Successful', 'Failed', 'Disabled', name='bot_compile_status'), nullable=False),
sa.Column('compile_start', sa.TIMESTAMP(timezone=True), nullable=True),
sa.Column('language', sa.Unicode(length=64), nullable=True),
sa.Column('version_number', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('games_played', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('mu', sa.Float(), server_default=sa.text("'25'"), nullable=False),
sa.Column('sigma', sa.Float(), server_default=sa.text("'8.333'"), nullable=False),
sa.Column('score', sa.Float(), server_default=sa.text("'0'"), nullable=False),
sa.Column('creation_time', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.Column('update_time', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.Column('timeout_sent', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='bot_ibfk_2', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('user_id', 'id'),
)
op.create_table('bot_history',
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('bot_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('version_number', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('last_rank', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('last_score', sa.Float(), nullable=False),
sa.Column('last_num_players', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('last_games_played', sa.Integer(), autoincrement=False, nullable=True),
sa.Column('language', sa.Unicode(length=64), nullable=False),
sa.Column('when_retired', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.ForeignKeyConstraint(['user_id', 'bot_id'], ['bot.user_id', 'bot.id'], name='bot_history_ibfk_4', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='bot_history_ibfk_3', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('user_id', 'bot_id', 'version_number'),
)
op.create_table('game',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('replay_name', sa.String(length=128), nullable=False),
sa.Column('map_width', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('map_height', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('map_seed', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('map_generator', sa.String(length=128), nullable=False),
sa.Column('time_played', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.Column('replay_bucket', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.Column('challenge_id', sa.Integer(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['challenge_id'], ['challenge.id'], name='game_challenge_fk', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('game_stat',
sa.Column('game_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('turns_total', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name='game_stat_ibfk_1', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('game_id'),
)
op.create_table('game_view_stat',
sa.Column('game_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('views_total', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name='game_view_stat_ibfk_1', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('game_id'),
)
op.create_table('game_bot_stat',
sa.Column('game_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('bot_id', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name='game_bot_stat_ibfk_1', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id', 'bot_id'], ['bot.user_id', 'bot.id'], name='fkcompid', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='fkuserid', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('game_id', 'user_id', 'bot_id'),
)
op.create_table('game_participant',
sa.Column('game_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('bot_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('version_number', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('log_name', sa.String(length=256), nullable=True),
sa.Column('rank', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('player_index', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('timed_out', sa.Boolean(), autoincrement=False, nullable=False),
sa.Column('mu', sa.Float(), nullable=True),
sa.Column('sigma', sa.Float(), nullable=True),
sa.Column('leaderboard_rank', sa.Integer(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name='game_participant_ibfk_4', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id', 'bot_id'], ['bot.user_id', 'bot.id'], name='game_participant_ibfk_3', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='game_participant_ibfk_2', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('game_id', 'user_id', 'bot_id'),
)
op.create_table('hackathon',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(length=256), nullable=False),
sa.Column('description', sa.Unicode(length=4096), nullable=False),
sa.Column('start_date', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('end_date', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('verification_code', sa.String(length=32), nullable=False),
sa.Column('organization_id', sa.Integer(), autoincrement=False, nullable=True),
sa.Column('location', sa.Unicode(length=256), nullable=True),
sa.Column('thumbnail', sa.String(length=512), nullable=True),
sa.Column('is_open', sa.Boolean(), server_default=sa.text("'0'"), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], name='hackathon_ibfk_1'),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('user_tier_history',
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('tier', sa.String(length=256), nullable=False),
sa.Column('last_in_tier', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.Column('total_time_in_tier', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='user_tier_history_ibfk_2', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('user_id', 'tier'),
)
op.create_table('hackathon_snapshot',
sa.Column('hackathon_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('bot_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('games_played', sa.Integer(), server_default=sa.text("'0'"), autoincrement=False, nullable=True),
sa.Column('score', sa.Float(), nullable=False),
sa.Column('mu', sa.Float(), nullable=False),
sa.Column('sigma', sa.Float(), nullable=False),
sa.Column('version_number', sa.Integer(), autoincrement=False, nullable=True),
sa.Column('language', sa.Unicode(length=64), nullable=True),
sa.ForeignKeyConstraint(['hackathon_id'], ['hackathon.id'], name='hackathon_snapshot_ibfk_6', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id', 'bot_id'], ['bot.user_id', 'bot.id'], name='hackathon_snapshot_ibfk_5', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='hackathon_snapshot_ibfk_4', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('hackathon_id', 'user_id', 'bot_id'),
)
op.create_table('leagues',
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('category', sa.Unicode(length=45), nullable=False),
sa.Column('name', sa.Unicode(length=45), nullable=False),
sa.Column('description', sa.Unicode(length=1024), nullable=False),
sa.Column('query', sa.Unicode(length=1024), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('user_notification',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('title', sa.Unicode(length=64), nullable=False),
sa.Column('body', sa.Unicode(length=2048), nullable=False),
sa.Column('mood', sa.Enum('error', 'neutral', 'success', name='user_notification_mood'), server_default=sa.text("'neutral'"), nullable=False),
sa.Column('creation_time', sa.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='user_notification_ibfk_2', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('hackathon_participant',
sa.Column('hackathon_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['hackathon_id'], ['hackathon.id'], name='hackathon_participant_ibfk_4', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='hackathon_participant_ibfk_3', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('hackathon_id', 'user_id'),
)
op.create_table('badge',
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('name', sa.Unicode(length=256), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('user_badge',
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('badge_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('is_enabled', sa.Boolean(), server_default=sa.sql.True_(), autoincrement=False, nullable=False),
sa.Column('creation_time', sa.TIMESTAMP(timezone=True), server_default=sa.func.current_timestamp(), nullable=True),
sa.Column('update_time', sa.TIMESTAMP(timezone=True), nullable=True),
sa.ForeignKeyConstraint(['badge_id'], ['badge.id'], name='user_badge_ibfk_2', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='user_badge_ibfk_1', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('user_id', 'badge_id'),
)
# ### end Alembic commands ###
```
#### File: alembic/versions/201807251445_b298f5f566fa_add_json_game_stats_field.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'b298f5f566fa'
down_revision = '5aae<PASSWORD>4'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('game',
sa.Column('stats', postgresql.JSON()))
op.drop_table('game_stat')
op.drop_table('game_bot_stat')
def downgrade():
op.create_table('game_stat',
sa.Column('game_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('turns_total', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name='game_stat_ibfk_1', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('game_id'),
)
op.create_table('game_bot_stat',
sa.Column('game_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('bot_id', sa.Integer(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name='game_bot_stat_ibfk_1', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id', 'bot_id'], ['bot.user_id', 'bot.id'], name='fkcompid', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='fkuserid', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('game_id', 'user_id', 'bot_id'),
)
op.drop_column('game', 'stats')
```
#### File: alembic/versions/201809251245_a37aed225eff_make_email_unique.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'a<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint("uq_user_email", "user", ["email"])
def downgrade():
op.drop_constraint("uq_user_email", "user")
```
#### File: alembic/versions/201810101416_b56f74b8f3e5_make_usernames_team_names_actually_case_.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.drop_index("uq_user_username", "user")
op.drop_index("uq_team_name", "team")
op.create_index("uq_user_username", "user", [sa.text("lower(username)")], unique=True)
op.create_index("uq_team_name", "team", [sa.text("lower(name)")], unique=True)
def downgrade():
op.drop_index("uq_user_username", "user")
op.drop_index("uq_team_name", "team")
op.create_index("uq_user_username", "user", [sa.text("lower(username)")])
op.create_index("uq_team_name", "team", [sa.text("lower(name)")])
```
#### File: apiserver/coordinator/storage.py
```python
import base64
import binascii
import datetime
import hashlib
import io
import os.path
import tempfile
import threading
import zipfile
import flask
import google.cloud.storage as gcloud_storage
import google.cloud.exceptions as gcloud_exceptions
from werkzeug.contrib.cache import FileSystemCache
from .. import config, model, util
from .blueprint import coordinator_api
# Cache the worker blob to avoid repeated requests to object storage
cache_dir = tempfile.TemporaryDirectory()
# Lock that prevents multiple threads from initializing the cache
cache_lock = threading.Lock()
# CV for other threads to block upon
cache_cv = threading.Condition()
cache = FileSystemCache(cache_dir.name, default_timeout=60*5)
@coordinator_api.route("/download/worker", methods=["GET"])
def download_source_blob():
"""Retrieve the worker blob from object storage."""
cached_blob = cache.get(config.WORKER_ARTIFACT_KEY)
if cached_blob is None:
if cache_lock.acquire(blocking=False):
print("Getting from GCloud", config.WORKER_ARTIFACT_KEY)
# Retrieve from GCloud
try:
gcloud_blob = gcloud_storage.Blob(
config.WORKER_ARTIFACT_KEY,
model.get_deployed_artifacts_bucket(),
chunk_size=10 * 262144)
cached_blob = gcloud_blob.download_as_string()
cache.set(config.WORKER_ARTIFACT_KEY, cached_blob)
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Worker blob not found.")
finally:
cache_lock.release()
with cache_cv:
cache_cv.notify_all()
else:
with cache_cv:
while not cache.has(config.WORKER_ARTIFACT_KEY):
cache_cv.wait()
cached_blob = cache.get(config.WORKER_ARTIFACT_KEY)
if cached_blob is None:
raise util.APIError(404, message="Worker blob not found.")
print("Building buffer")
buffer = io.BytesIO()
buffer.write(cached_blob)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/gzip",
as_attachment=True,
attachment_filename="Halite.tgz")
@coordinator_api.route("/botFile", methods=["POST"])
def upload_bot():
"""Save a compiled bot to object storage."""
user_id = flask.request.form.get("user_id", None)
bot_id = flask.request.form.get("bot_id", None)
if "bot.zip" not in flask.request.files:
raise util.APIError(400, message="Please provide the bot file.")
uploaded_file = flask.request.files["bot.zip"]
# Save to GCloud
blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id),
model.get_bot_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
return util.response_success()
@coordinator_api.route("/botFile", methods=["GET"])
def download_bot():
"""Retrieve a compiled or uncompiled bot from object storage."""
user_id = flask.request.values.get("user_id", None)
bot_id = flask.request.values.get("bot_id", None)
compile = flask.request.values.get("compile", False)
botname = "{}_{}".format(user_id, bot_id)
if user_id == "gym":
bucket = model.get_gym_bot_bucket()
botname = bot_id + ".zip"
elif bot_id == "editor":
return download_editor_bot(user_id)
elif compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
# Retrieve from GCloud
try:
blob = bucket.get_blob(botname)
buffer = io.BytesIO()
blob.download_to_file(buffer)
buffer.seek(0)
blob_hash = binascii.hexlify(base64.b64decode(blob.md5_hash)).decode('utf-8')
response = flask.make_response(flask.send_file(
buffer,
mimetype="application/zip",
as_attachment=True,
attachment_filename=botname + ".zip"))
# Give hash in the header to avoid a separate request
response.headers["X-Hash"] = blob_hash
return response
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Bot not found.")
def download_editor_bot(user_id):
bucket = model.get_editor_bucket()
prefix = "{}/".format(user_id)
blobs = bucket.list_blobs(prefix=prefix)
zipblob = io.BytesIO()
with zipfile.ZipFile(zipblob, "w") as zipresult:
for blob in blobs:
path = os.path.relpath(blob.name, prefix)
contents = blob.download_as_string()
zipresult.writestr(path, contents)
zipblob.seek(0)
blob_hash = hashlib.md5(zipblob.getbuffer()).hexdigest()
response = flask.make_response(flask.send_file(
zipblob,
mimetype="application/zip",
as_attachment=True,
attachment_filename="{}_editor.zip".format(user_id)))
# Give hash in the header to avoid a separate request
response.headers["X-Hash"] = blob_hash
return response
@coordinator_api.route("/botHash")
def hash_bot():
"""Get the MD5 hash of a compiled bot."""
user_id = flask.request.args.get("user_id", None)
bot_id = flask.request.args.get("bot_id", None)
compile = flask.request.args.get("compile", False)
if not user_id or not bot_id:
raise util.APIError(400, message="Please provide user and bot ID.")
if user_id == "gym":
bucket = model.get_gym_bot_bucket()
elif compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
blob = bucket.get_blob("{}_{}".format(user_id, bot_id))
if blob is None:
raise util.APIError(400, message="Bot does not exist.")
return util.response_success({
"hash": binascii.hexlify(base64.b64decode(blob.md5_hash)).decode('utf-8'),
})
@coordinator_api.route("/uploadLog", methods=["POST"])
def upload_worker_log():
"""Store a log file from a worker."""
worker = flask.request.form["worker"]
if "log" not in flask.request.files:
raise util.APIError(400, message="Please provide the log file.")
uploaded_file = flask.request.files["log"]
# Save to GCS
blob = gcloud_storage.Blob("{}_{}".format(datetime.datetime.now().isoformat(), worker),
model.get_worker_log_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
return util.response_success()
```
#### File: apiserver/apiserver/ondemand.py
```python
import datetime
import time
import google.cloud.datastore as gcloud_datastore
import google.cloud.storage as gcloud_storage
from . import config, model, util
ONDEMAND_KIND = "ondemand"
def key_from_user_id(user_id):
"""
Create a datastore key from a user ID.
Each user has at most 1 ondemand game task.
"""
return gcloud_datastore.Key(
ONDEMAND_KIND,
user_id,
project=config.GCLOUD_PROJECT,
)
def check_status(user_id):
client = model.get_datastore_client()
query = client.query(kind=ONDEMAND_KIND)
query.key_filter(key_from_user_id(user_id))
result = list(query.fetch(limit=1))
return result[0] if result else None
def launch(user_id, opponents, environment_parameters, metadata):
"""
Create a new ondemand game task.
"""
client = model.get_datastore_client()
entity = gcloud_datastore.Entity(key_from_user_id(user_id))
entity.update({
"status": "created",
"opponents": opponents,
"environment_parameters": environment_parameters,
"last_updated": datetime.datetime.now(datetime.timezone.utc),
"retries": 0,
# Used to associate a game with a tutorial or something like
# that
"metadata": metadata,
"objective": {
"completed": False,
},
})
client.put(entity)
def continue_game(user_id, num_turns, snapshot_index):
"""
Request that an ondemand game be resumed.
"""
client = model.get_datastore_client()
query = client.query(kind=ONDEMAND_KIND)
query.key_filter(key_from_user_id(user_id))
result = list(query.fetch(limit=1))
if not result:
raise util.APIError(
404,
message="Ondemand game not found for user {}.".format(user_id))
task = result[0]
if task["status"] == "failed":
raise util.APIError(
400,
message="Ondemand game failed for user {}. Please restart.".format(user_id))
elif task["status"] not in ("completed", "created"):
raise util.APIError(
400,
message="Ondemand game not ready for user {}. Please wait.".format(user_id))
# Resume game from snapshot of state if this is not the first time
# we're starting it
if task["status"] == "completed" and "game_output" in task:
task["environment_parameters"]["from-snapshot"] = \
task["snapshots"][snapshot_index]["snapshot"]
task.update({
"status": "pending",
"last_updated": datetime.datetime.now(datetime.timezone.utc),
"retries": 0,
"objective": {
"completed": False,
},
})
task["environment_parameters"]["turn-limit"] = num_turns
client.put(task)
# Base seconds to wait if task assignment conflicts.
TASK_CONFLICT_BACKOFF = 1
# Multiplicative increase for wait time
TASK_CONFLICT_FACTOR = 2
# Maximum wait time
TASK_CONFLICT_BACKOFF_MAX = 16
# Maximum minutes before task is stale and can be rescheduled.
TASK_MAX_AGE = 5
# Maximum number of times a task will be retried.
TASK_MAX_RETRIES = 3
def pending_task():
"""
Look for a pending ondemand game task.
"""
client = model.get_datastore_client()
current_backoff = TASK_CONFLICT_BACKOFF
while True:
# Search first for games that are stuck
stale_cutoff = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(minutes=TASK_MAX_AGE)
query = client.query(kind=ONDEMAND_KIND)
query.add_filter("status", "=", "running")
query.add_filter("last_updated", "<", stale_cutoff)
query.order = ["-last_updated"]
result = list(query.fetch(limit=1))
# Search for regular games
if not result:
query = client.query(kind=ONDEMAND_KIND)
query.add_filter("status", "=", "pending")
query.order = ["-last_updated"]
result = list(query.fetch(limit=1))
if result:
# Make sure tasks are only assigned once.
with client.transaction() as xact:
task = client.get(result[0].key)
if (task["status"] == "running" and
task["last_updated"] < stale_cutoff and
task["retries"] > TASK_MAX_RETRIES):
task["status"] = "failed"
task["last_updated"] = datetime.datetime.now(datetime.timezone.utc)
xact.put(task)
elif task["status"] == "pending" or (
task["status"] == "running" and
task["last_updated"] < stale_cutoff):
task["status"] = "running"
task["last_updated"] = datetime.datetime.now(datetime.timezone.utc)
task["retries"] += 1
xact.put(task)
return task
# Otherwise, wait and retry
time.sleep(current_backoff)
current_backoff = min(
TASK_CONFLICT_BACKOFF_MAX,
TASK_CONFLICT_FACTOR * current_backoff)
else:
# No task available
return None
def update_task(user_id, game_output, files):
"""
Update the status of an ondemand game after play.
"""
client = model.get_datastore_client()
query = client.query(kind=ONDEMAND_KIND)
query.key_filter(key_from_user_id(user_id))
result = list(query.fetch(limit=1))
if not result:
return
task = result[0]
task["status"] = "completed"
current_time = datetime.datetime.now(datetime.timezone.utc)
if game_output:
# Track history of game state snapshots, so that user can
# rewind. Use case: user takes next step in tutorial and fails;
# needs previous game state, not current game state, to continue.
if "snapshots" not in task:
task["snapshots"] = []
# task["snapshots"].append({
# "snapshot": game_output["final_snapshot"],
# "updated_at": current_time,
# })
del game_output["final_snapshot"]
task["game_output"] = game_output
task["last_updated"] = current_time
task["retries"] = 0
# TODO: once we track tutorials, have this reflect tutorial
# objective completion status instead of victory
# TODO: will the relevant player always be player 0?
task["objective"] = {
"completed": game_output and game_output["stats"]["0"]["rank"] == 1,
}
if "replay" in files:
bucket = model.get_ondemand_replay_bucket()
replay_key = "ondemand_{}".format(user_id)
blob = gcloud_storage.Blob(replay_key, bucket, chunk_size=262144)
blob.upload_from_file(files["replay"])
if "error_log" in files:
bucket = model.get_ondemand_replay_bucket()
log_key = "ondemand_log_{}".format(user_id)
blob = gcloud_storage.Blob(log_key, bucket, chunk_size=262144)
blob.upload_from_file(files["error_log"])
task["error_log"] = True
task["crashed"] = game_output and game_output["terminated"].get("0", False)
else:
task["error_log"] = None
task["crashed"] = False
if "bot_log" in files:
bucket = model.get_ondemand_replay_bucket()
log_key = "ondemand_bot_log_{}".format(user_id)
blob = gcloud_storage.Blob(log_key, bucket, chunk_size=262144)
blob.upload_from_file(files["bot_log"])
task["bot_log"] = True
else:
task["bot_log"] = None
if "compile_error" in files:
task["compile_error"] = files["compile_error"]
else:
task["compile_error"] = None
client.put(task)
```
#### File: apiserver/apiserver/server.py
```python
from . import app, setup_logging
from . import login
from . import web
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
import flask
import flask_oauthlib.client
from flask_compress import Compress
compress = Compress()
compress.init_app(app)
setup_logging("api_server.log", app.logger)
setup_logging("oauth.log", flask_oauthlib.client.log)
setup_logging("login.log", login.login_log)
app.register_blueprint(login.oauth_login, url_prefix="/v1/login")
app.register_blueprint(login.oauth_logout, url_prefix="/v1/logout")
app.register_blueprint(web.web_api, url_prefix="/v1/api")
@app.before_request
def force_https():
if not flask.request.is_secure and flask.request.endpoint and \
not flask.request.url.startswith('http://localhost') and \
not app.config["DEBUG"] == True and \
flask.request.endpoint != 'health_check':
return flask.redirect(flask.request.url.replace('http://', 'https://'))
```
#### File: apiserver/web/organization.py
```python
import uuid
import flask
import sqlalchemy
from .. import model, util
from . import util as web_util
from .blueprint import web_api
@web_api.route("/organization")
@util.cross_origin(methods=["GET"])
def list_organizations():
result = []
offset, limit = web_util.get_offset_limit()
where_clause, order_clause, _ = web_util.get_sort_filter({
"organization_id": model.organizations.c.id,
"name": model.organizations.c.organization_name,
"type": model.organizations.c.kind,
})
with model.read_conn() as conn:
# Don't limit this query
query = model.organizations.select() \
.where(where_clause).order_by(*order_clause) \
.offset(offset)
orgs = conn.execute(query)
for org in orgs.fetchall():
result.append({
"organization_id": org["id"],
"name": org["organization_name"],
"type": org["kind"],
})
return flask.jsonify(result)
@web_api.route("/organization/<int:org_id>", methods=["GET"])
@util.cross_origin(methods=["GET"])
def get_organization(org_id):
with model.read_conn() as conn:
org = conn.execute(model.organizations.select().where(
model.organizations.c.id == org_id
)).first()
if not org:
raise util.APIError(404, message="Organization not found.")
return flask.jsonify({
"organization_id": org["id"],
"name": org["organization_name"],
"type": org["kind"],
})
@web_api.route("/organization", methods=["POST"])
@web_util.requires_login(accept_key=True, admin=True)
def create_organization(*, user_id):
org_body = flask.request.get_json()
if "name" not in org_body:
raise util.APIError(400, message="Organization must be named.")
if "type" not in org_body:
raise util.APIError(400, message="Organization must have a type.")
verification_code = None
if org_body.get("require_code", False):
verification_code = uuid.uuid4().hex
with model.engine.connect() as conn:
org_id = conn.execute(model.organizations.insert().values(
organization_name=org_body["name"],
kind=org_body["type"],
verification_code=verification_code,
)).inserted_primary_key[0]
response = {
"organization_id": org_id,
}
if verification_code:
response["verification_code"] = verification_code
return util.response_success(response, status_code=201)
@web_api.route("/organization/<int:org_id>", methods=["PUT"])
@web_util.requires_login(accept_key=True, admin=True)
def update_organization(org_id, *, user_id):
fields = flask.request.get_json()
columns = {
"name": "organization_name",
"type": "kind",
}
record = {}
for key in fields:
if key not in columns:
raise util.APIError(400, message="Cannot update '{}'".format(key))
record[columns[key]] = fields[key]
if record:
with model.engine.connect() as conn:
conn.execute(model.organizations.update().where(
model.organizations.c.id == org_id
).values(**record))
return util.response_success()
@web_api.route("/organization/<int:org_id>/email_domain", methods=["GET"])
@web_util.requires_login(accept_key=True, admin=True)
def list_organization_email_domains(org_id, *, user_id):
result = []
with model.read_conn() as conn:
domains = conn.execute(model.organization_email_domains.select(
model.organization_email_domains.c.organization_id == org_id
))
for domain in domains.fetchall():
result.append(domain["domain"])
return flask.jsonify(result)
@web_api.route("/organization/<int:org_id>/email_domain", methods=["POST"])
@web_util.requires_login(accept_key=True, admin=True)
def create_organization_email_domain(org_id, *, user_id):
domains = []
json_body = flask.request.get_json()
if json_body:
if "domain" in json_body:
domains.append(json_body["domain"])
elif "domains" in json_body:
domains.extend(json_body["domains"])
else:
if "domain" in flask.request.form:
domains.append(flask.request.form["domain"])
elif "domains" in flask.request.form:
domains.extend(flask.request.form.getlist("domain"))
with model.engine.connect() as conn:
org = conn.execute(model.organizations.select().where(
model.organizations.c.id == org_id
)).first()
if org is None:
raise util.APIError(404, message="Organization does not exist.")
conn.execute(
model.organization_email_domains.insert(),
[
{
"organization_id": org_id,
"domain": domain,
}
for domain in domains
]
)
return util.response_success(status_code=201)
@web_api.route("/organization/<int:org_id>", methods=["DELETE"])
@web_util.requires_login(accept_key=True, admin=True)
def delete_organization(org_id, *, user_id):
with model.engine.connect() as conn:
with conn.begin() as transaction:
count = conn.execute(sqlalchemy.sql.select([
sqlalchemy.sql.func.count()
]).select_from(model.users).where(
model.users.c.organization_id == org_id
)).first()[0]
if count > 0:
raise util.APIError(
400, message="Cannot delete organization with members.")
conn.execute(model.organizations.delete().where(
model.organizations.c.id == org_id
))
return util.response_success()
```
#### File: apiserver/worker/backend.py
```python
import logging
import urllib.request
import requests
from hashlib import md5
import json
import os
from time import gmtime, strftime, sleep
# Needs to match corresponding value in apiserver/config.py
# This is the default value, 100 MiB
MAX_BOT_UPLOAD_SIZE = 100 * 1024 * 1024
# Maximum wait time in between compiled bot archive upload attempts,
# in seconds
MAX_UPLOAD_BACKOFF = 32
with open("config.json") as configfile:
config = json.load(configfile)
MANAGER_URL = config["MANAGER_URL"]
SECRET_FOLDER = config["SECRET_FOLDER"]
CAPABILITIES = config.get("CAPABILITIES", [])
SERVER_ID = config.get("RANDOM_ID", "notset")
provided_size = config.get("MAX_BOT_UPLOAD_SIZE", MAX_BOT_UPLOAD_SIZE)
if provided_size:
MAX_BOT_UPLOAD_SIZE = provided_size
def getTask(kind="task"):
"""Gets either a run or a compile task from the API"""
params = {
"capability": CAPABILITIES,
}
content = requests.get(MANAGER_URL + kind, params=params).text
logging.info("Task call %s\n" % content)
if content == "null":
return None
else:
return json.loads(content)
def getBotHash(user_id, bot_id, is_compile=False):
"""Gets the checksum of a user's bot's zipped source code"""
params = {
"user_id": user_id,
"bot_id": bot_id
}
if is_compile:
params["compile"] = 1
result = requests.get(MANAGER_URL+"botHash", params=params)
logging.debug("Getting bot hash %s\n" % result.text)
return json.loads(result.text).get("hash")
def storeBotLocally(user_id, bot_id, storage_dir, is_compile=False):
"""
Download and store a bot's zip file locally
Checks the file's checksum to make sure the file was downloaded properly
"""
iterations = 0
while iterations < 100:
url = MANAGER_URL + "botFile?user_id={}&bot_id={}".format(user_id, bot_id)
if is_compile:
url += "&compile=1"
logging.debug("Bot file url %s\n" % url)
remote_zip = urllib.request.urlopen(url)
zip_filename = remote_zip.headers.get('Content-disposition').split("filename")[1]
zip_path = os.path.join(storage_dir, zip_filename)
if os.path.exists(zip_path):
os.remove(zip_path)
remote_zip_contents = remote_zip.read()
remote_zip.close()
local_zip = open(zip_path, "wb")
local_zip.write(remote_zip_contents)
local_zip.close()
content_hash = md5(remote_zip_contents).hexdigest()
remote_hash = remote_zip.headers.get("X-Hash")
if content_hash != remote_hash:
iterations += 1
continue
return zip_path
raise RuntimeError("Could not download bot with valid hash, aborting")
def storeBotRemotely(user_id, bot_id, zip_file_path):
"""Posts a bot file to the manager"""
zip_contents = open(zip_file_path, "rb").read()
if len(zip_contents) > MAX_BOT_UPLOAD_SIZE:
raise RuntimeError("Bot archive exceeds maximum size of 100 MiB.")
iterations = 0
local_hash = md5(zip_contents).hexdigest()
backoff = 1
while iterations < 10:
r = requests.post(MANAGER_URL+"botFile",
data={
"user_id": str(user_id),
"bot_id": str(bot_id),
},
files={"bot.zip": zip_contents})
logging.debug("Posting compiled bot archive %s\n" % r.text)
if r.status_code >= 400 and r.status_code <= 499:
logging.error("Got a 4xx status code %s", r.status_code)
r.raise_for_status()
# Try again if local and remote hashes differ
if local_hash != getBotHash(user_id, bot_id):
logging.debug("Hashes do not match! Redoing file upload...\n")
iterations += 1
sleep(backoff)
if backoff < MAX_UPLOAD_BACKOFF:
backoff *= 2
continue
return
raise RuntimeError("Could not upload bot with valid hash, aborting")
def compileResult(user_id, bot_id, did_compile, language, errors=None):
"""Posts the result of a compilation task"""
r = requests.post(MANAGER_URL+"compile", data={
"user_id": user_id,
"bot_id": bot_id,
"did_compile": int(did_compile),
"language": language,
"errors": errors,
"server_id": SERVER_ID,
"capability": CAPABILITIES,
})
logging.debug("Posted compile result %s\n" % r.text)
def gameResult(users, game_output, extra_metadata, url_path="game"):
"""
POST the results of a game to the game coordinator.
:param users:
:param game_output: The parsed JSON result the game gives in quiet mode.
:param challenge: A dictionary of extra metadata to pass back to
the coordinator.
:return:
"""
replay_path = game_output["replay"]
logging.debug("Posting game result %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
files = {os.path.basename(replay_path): open(replay_path, "rb").read()}
for path in game_output["error_logs"].values():
files[os.path.basename(path)] = open(path, "rb").read()
for user in users:
if user.get('bot_logs'):
log_filename = '{}_{}_{}.log'.format(user['user_id'], user['bot_id'], user['username'])
files[log_filename] = user['bot_logs']
user['log_filename'] = log_filename
data = {
"users": json.dumps(users),
"game_output": json.dumps(game_output),
"server_id": SERVER_ID,
"capability": CAPABILITIES,
}
for key, value in extra_metadata.items():
data[key] = json.dumps(value)
logging.info("Uploading game result")
logging.info(json.dumps(users, indent=4))
logging.info(json.dumps(game_output, indent=4))
r = requests.post(MANAGER_URL + url_path, data=data, files=files)
logging.info("Got game result %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
logging.debug("\n-------Game result:-----")
logging.info(r.text)
logging.debug("------------------------\n")
def ondemandResult(users, game_output, extra_metadata):
gameResult(users, game_output, extra_metadata, url_path="ondemand_result")
def ondemandError(users, extra_metadata, language, log):
"""
POST a compilation error that occurred during an ondemand game.
"""
data = {
"users": json.dumps(users),
"language": language,
"log": log,
}
for key, value in extra_metadata.items():
data[key] = json.dumps(value)
r = requests.post(MANAGER_URL + "ondemand_compile", data=data)
logging.debug("\n-------Game result:-----")
logging.debug(r.text)
logging.debug("------------------------\n")
```
#### File: hlt_client/hlt_client/download_game.py
```python
import os
import zstd
import re
import requests
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
from . import client
_ITEMS_KEY = 'items'
_SELFLINK_KEY = 'selfLink'
_REPLAY_KEY = 'replay'
_REPLAY_CLASS_KEY = 'replay_class'
_MEDIA_DOWNLOAD_OPTION = '?alt=media'
_PREFIX_OPTION = '?prefix='
_BUCKET_POSITION = -3
_OBJECT_POSITION = -1
_REPLAY_PREPEND = 'replay-'
_PATH_DELIMITER = '/'
class GameDownloader:
_GOLD_BUCKET_URI = 'https://www.googleapis.com/storage/v1/b/ts2018-halite-3-gold-replays/o'
_SALT_BUCKET_URI = 'https://www.googleapis.com/storage/v1/b/ts2018-halite-3-replays/o'
_BUCKET_URIS = [_SALT_BUCKET_URI, _GOLD_BUCKET_URI]
def __init__(self, destination, buckets, prefix, decompress):
"""
Download replays files
:param destination: Where to download
:param buckets: List of bucket(s) to fetch from
:param prefix: What prefix to fetch from
:param decompress: Whether to decompress replays
"""
if not os.path.isdir(destination):
raise FileNotFoundError("Directory path does not exist")
self.destination = destination
self.objects = []
for bucket in buckets:
self.objects += self._parse_objects(requests.get(bucket + _PREFIX_OPTION + prefix).json())
self.decompress = decompress
@staticmethod
def _parse_objects(bucket_json):
"""
Parse GCS response to get URIs for objects
:param bucket_json: The response from GCS
:return: parse URIs for objects
"""
response = []
if _ITEMS_KEY not in bucket_json:
raise ValueError("No games found. (When downloading by date, use YYYYMMDD format.)")
for bucket_object in bucket_json[_ITEMS_KEY]:
response.append(bucket_object[_SELFLINK_KEY])
return response
@staticmethod
def _unzip(game_id, game_binary):
"""
Takes a zstd file and unzips it
:param game_id: The unique id for the game object (name of resulting file)
:param game_binary: The zipped binary
:return: the file unzipped if possible
"""
try:
return zstd.loads(game_binary).decode()
except Exception:
raise ValueError("Could not unzip file at: {}!".format(game_id))
@staticmethod
def _build_object_uri(bucket_class, object_id):
"""
Creates a GCS URI from the bucket id and object id
:param bucket_class: The bucket id in GCS
:param object_id: The object id in GCS
:return: the constructed GCS URI
"""
return "{}/{}".format(GameDownloader._BUCKET_URIS[bucket_class], object_id)
@staticmethod
def _parse_id_from_url(url):
"""
Take a GCS URL and transform into a filename
:param url: the GCS URL
:return: the constructed filename
"""
split_url = url.split(_PATH_DELIMITER)
return "{}_{}".format(split_url[_BUCKET_POSITION], split_url[_OBJECT_POSITION])
def _get_object(self, url):
"""
Download a single object from GCS considering the designated URL and save it to de destination
:param url: The url do download from
:return: Nothing
"""
game_id = self._parse_id_from_url(url)
try:
print("downloading {}".format(url))
if self.decompress:
with open(os.path.join(self.destination, game_id + '.json'), 'w') as fout:
fout.writelines(self._unzip(game_id, requests.get(url + _MEDIA_DOWNLOAD_OPTION).content))
else:
with open(os.path.join(self.destination, game_id + '.hlt'), 'wb') as fout:
fout.write(requests.get(url + _MEDIA_DOWNLOAD_OPTION).content)
except Exception:
raise IOError("Could not write file {} to {}".format(game_id, self.destination))
def get_objects(self):
"""
Download all desired replays in parallel threads (up to the number of cores the machines has)
:return: Nothing
"""
with ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
for url in self.objects:
executor.submit(self._get_object, url)
class DatedGameDownloader(GameDownloader):
def __init__(self, destination, date, all_bots=False, decompress=False):
"""
Download games for a date
:param destination: Where to download
:param date: Which date to download
:param all_bots: True if you wish to download silver ranked bots as well. False for only gold.
"""
buckets = [self._GOLD_BUCKET_URI] + ([self._SALT_BUCKET_URI] if all_bots else [])
super(DatedGameDownloader, self).__init__(destination, buckets, _REPLAY_PREPEND + date, decompress)
class UserGameDownloader(GameDownloader):
_USER_BOT_URI = 'https://api.2018.halite.io/v1/api/user/{}/match?limit={}&offset={}'
_FETCH_THRESHOLD = 250
_BUCKETS = []
def __init__(self, destination, user_id, limit, decompress=False):
"""
Download games for a user
:param destination: Where to download
:param user_id: Which user's replays to fetch
:param limit: How many replays to fetch (max)
"""
super(UserGameDownloader, self).__init__(destination, [], None, decompress)
self.objects = self._parse_user_metadata(self._fetch_metadata(user_id, limit))
def _fetch_metadata(self, user_id, limit):
"""
Retrieves paginated game metadata from the halite servers for a specified user up to limit items
:param user_id: The id of the user to fetch
:param limit: The maximum number of items to fetch
:return: The full metadata of items
"""
print('Fetching Metadata')
current = 0
result_set = []
while current <= limit:
current_limit = self._FETCH_THRESHOLD if ((limit - current) >= self._FETCH_THRESHOLD) else (limit - current)
result_set += requests.get(self._USER_BOT_URI.format(user_id, current_limit, current)).json()
current += self._FETCH_THRESHOLD
print('Finished metadata fetch. Found {} game files.'.format(len(result_set)))
return result_set
@staticmethod
def _parse_user_metadata(user_json):
"""
Takes response from API server and parses to get all user replays
:param user_json: The response from the API server
:return: the paths to the bucket objects with the replays for the user
"""
response = []
for user_object in user_json:
response.append(GameDownloader._build_object_uri(user_object[_REPLAY_CLASS_KEY], user_object[_REPLAY_KEY]))
return response
def _valid_date(date):
"""
Whether the date requested matches the desired format (starts between 1 and 8 digits)
:param date: The date to check
:return: True if valid, false otherwise
"""
return re.compile('^\d{1,8}').search(date)
def download(mode, destination, date, all_bots, default_user_id, user_id,
limit, decompress):
"""
Downloads bot replay files matching the designated requirements
:param mode: Whether to download files matching a date or a user id
:param destination: Where to download the files to
:param date: Which date to download the files to, if there is a date
:param all_bots: If dated, whether to download all bots (silver/gold)
:param default_user_id: What is the user id of the user making the request
:param user_id: What is the user id desired if any
:param limit: How many replays to download (currently only in user mode)
:param decompress: Whether to decompress the replays.
:return: Nothing
"""
print('Downloading game files')
if decompress:
print('Decompressing replays before saving.')
if mode == client.REPLAY_MODE_DATE:
if not _valid_date(date):
raise ValueError("Date must match format YYYYMMDD")
DatedGameDownloader(destination, date, all_bots, decompress).get_objects()
elif mode == client.REPLAY_MODE_USER:
if not (default_user_id or user_id):
raise ValueError("Cannot run default mode without authenticating .Please run `client.py --auth` first.")
UserGameDownloader(destination, default_user_id if not user_id else user_id, limit, decompress).get_objects()
print('Finished writing files to desired location')
```
#### File: hlt_client/hlt_client/util.py
```python
from . import output
def confirm(prompt, json_confirm=False):
if output.mode() == output.JSON:
return json_confirm
while True:
print(prompt + " [yn]:", end=' ')
result = input().lower().strip()
if result and result in "yn":
return True if result == "y" else False
print("Please enter y/n.")
```
#### File: tools/manager/keyboard_detection.py
```python
import sys, termios
from select import select
class keyboard_detection:
'''
Use in a with statement to enable the appropriate terminal mode to detect keyboard presses
without blocking for input. Used this way, the with statement puts a boolean detection
function in the target variable. The resulting function can be called any number of times
until a keypress is detected. Sample code:
with keyboard_detection() as key_pressed:
while not key_pressed():
sys.stdout.write('.')
sys.stdout.flush()
sleep(0.5)
print 'done'
Upon exiting the with code block, the terminal is reverted to its calling (normal) state.
The sys.stdout.flush() is important when in the keyboard detection mode; otherwise, text
output won't be seen.
'''
def __enter__(self):
# save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# new terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
# switch to unbuffered terminal
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
return self.query_keyboard
def __exit__(self, type, value, traceback):
# swith to normal terminal
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def query_keyboard(self):
dr, dw, de = select([sys.stdin], [], [], 0)
return dr != []
if __name__ == '__main__':
from time import sleep
with keyboard_detection() as key_pressed:
while not key_pressed():
sys.stdout.write('.')
sys.stdout.flush()
sleep(0.5)
print ('done')
```
#### File: tools/manager/player.py
```python
class Player:
def __init__(self, name, path, last_seen = "", rank = 1000, skill = 0.0, mu = 25.0, sigma = (25.0 / 3.0), ngames = 0, active = 1):
self.name = name
self.path = path
self.last_seen = last_seen
self.rank = rank
self.skill = skill
self.mu = mu
self.sigma = sigma
self.ngames = ngames
self.active = active
def __repr__(self):
return "{:<25}{:<20}{:^6}{:10.4f}{:10.4f}{:10.4f} {:>5} {:>5} {:<30}".format(self.name, str(self.last_seen), self.rank, self.skill, self.mu, self.sigma, self.ngames, self.active, self.path)
def update_skill(self):
self.skill = self.mu - (self.sigma * 3)
```
#### File: tools/manager/util.py
```python
import player as pl
def parse_player_record (player):
(player_id, name, path, last_seen, rank, skill, mu, sigma, ngames, active) = player
return pl.Player(name, path, last_seen, rank, skill, mu, sigma, ngames, active)
```
#### File: Halite-III/website/make_starter_kits.py
```python
import argparse
import glob
import hashlib
import itertools
import json
import os
import shutil
import zipfile
ENVIRONMENT_DIR_HELP = "Directory containing precompiled Halite environment " \
"executables, each named after their platform. "
BOX_DIR_HELP = "Directory containing precompiled Halite-in-a-Box builds, " \
"each named after their platform."
VERSION_HELP = "The version string to embed in the downloads page."
INCLUDED_EXTENSIONS = {
".bat",
".clj",
".coffee",
".cpp",
".cs",
".csproj",
".d",
".dart",
".erl",
".ex",
".exs",
".fs",
".fsproj",
".go",
".groovy",
".h",
".hpp",
".hs",
".java",
".jl",
".js",
".kt",
".lisp",
".lock",
".lua",
".m",
".md",
".ml",
".pas",
".php",
".pl",
".properties",
".pxd",
".py",
".pyx",
".rb",
".rkt",
".rs",
".sbt",
".scala",
".sh",
".sln",
".svc",
".swift",
".toml",
".txt",
}
INCLUDED_FILES = {
"Makefile",
"README",
"REQUIRE",
"LANGUAGE",
"build.gradle",
}
STARTER_KIT_DIR = "../starter_kits"
DOWNLOAD_DATA = "_data/downloads.json"
PLATFORM_AGNOSTIC = "None"
# Kits that we support
OFFICIAL_KITS = {
"Python3",
"JavaScript",
"Java",
"C++",
"Rust",
"ml",
}
# Names of generated downloads
# Standard language + platform
OUTPUT_FILE_FORMAT = "assets/downloads/Halite3_{language}_{platform}.zip"
# Platform only
ENVIRONMENT_OUTPUT_FILE_FORMAT = "assets/downloads/Halite3_{platform}.zip"
BOX_OUTPUT_FILE_FORMAT = "assets/downloads/Halite3_Offline_{platform}{extension}"
# All languages + platform
ALL_LANGUAGES_OUTPUT_FILE_FORMAT = "assets/downloads/Halite3_all_{platform}.zip"
SOURCE_FILE = "assets/downloads/Halite3Source.zip"
BENCHMARK_FILE = "assets/downloads/Halite3Benchmark.zip"
BENCHMARK_MANIFEST = "assets/downloads/Halite3Benchmark.json"
TOOLS_FILE = "assets/downloads/Halite3Tools.zip"
REPLAY_README = """Replays and error logs will appear here if you use the run_game.sh or run_game.bat scripts.
"""
def detect_environments(directory):
"""Detect which platform binaries we have."""
environments = [(PLATFORM_AGNOSTIC, None, None)]
for filename in os.listdir(directory):
platform, platform_ext = os.path.splitext(filename)
if platform.startswith("."):
# .DS_Store, .gitignore, etc.
continue
print("Detected platform", platform)
environments.append((platform,
os.path.join(directory, filename),
"halite" + platform_ext))
return environments
def scan_directory(full_path):
"""Figure out what the starter kit files in a directory are."""
included_files = []
for containing_dir, _, possible_files in os.walk(full_path):
for filename in possible_files:
_, ext = os.path.splitext(filename)
if ext.lower() in INCLUDED_EXTENSIONS or filename in INCLUDED_FILES:
included_files.append(os.path.join(containing_dir, filename))
included_files.append(("README.md", open(os.path.join(STARTER_KIT_DIR, "README.md")).read()))
included_files.append((".gitignore", open(os.path.join(STARTER_KIT_DIR, ".gitignore")).read()))
included_files.append(("./docs/api-docs.md", open("./learn-programming-challenge/api-docs.md").read()))
included_files.append(("./docs/game-overview.md", open("./learn-programming-challenge/game-overview.md").read()))
return included_files
def make_archive(output, environment, base_path, included_files):
"""Create the output ZIP archive."""
platform, source, target = environment
# Get rid of duplicates
included_files = list(set(included_files))
with zipfile.ZipFile(output, "w", zipfile.ZIP_DEFLATED) as archive:
if source is not None:
# source is None <=> platform-agnostic archive
zinfo = zipfile.ZipInfo.from_file(source, target)
zinfo.compress_type = zipfile.ZIP_DEFLATED
zinfo.external_attr = 0o0100755 << 16
with open(source, 'rb') as source_file:
archive.writestr(zinfo, source_file.read())
for file in included_files:
if isinstance(file, tuple):
archive.writestr(file[0], file[1])
else:
target_path = os.path.relpath(file, base_path)
archive.write(file, target_path)
def make_source_download():
included_files = []
for directory, _, file_list in os.walk("../game_engine"):
target_dir = os.path.relpath(directory, "../game_engine")
for filename in file_list:
# Just include all files in directory, since we should be
# deploying from a clean repo.
source_path = os.path.join(directory, filename)
target_path = os.path.normpath(
os.path.join("Halite/", target_dir, filename))
included_files.append((source_path, target_path))
with zipfile.ZipFile(SOURCE_FILE, "w", zipfile.ZIP_DEFLATED) as archive:
for source_path, target_path in included_files:
archive.write(source_path, target_path)
def make_benchmark_download():
included_files = []
manifest = []
def add_directory(root):
for directory, _, file_list in os.walk(root):
for filename in file_list:
_, ext = os.path.splitext(filename)
if ext.lower() in {".py"}:
source_path = os.path.join(directory, filename)
target_path = os.path.normpath(
os.path.join("benchmark/", os.path.relpath(source_path, start=root)))
if filename == 'MyBot.py':
target_path = os.path.normpath(
os.path.join("benchmark/", os.path.relpath(os.path.join(directory, 'RandomBot.py'), start=root)))
included_files.append((source_path, target_path))
digest = hashlib.sha256()
with open(source_path, "rb") as source_file:
digest.update(source_file.read())
manifest.append((target_path, digest.hexdigest()))
add_directory("../starter_kits/benchmark")
add_directory("../starter_kits/Python3")
with zipfile.ZipFile(BENCHMARK_FILE, "w", zipfile.ZIP_DEFLATED) as archive:
for source_path, target_path in included_files:
archive.write(source_path, target_path)
with open(BENCHMARK_MANIFEST, "w") as manifest_file:
json.dump({
"digest_type": "SHA-256",
"manifest": manifest,
}, manifest_file)
def make_tools_download():
included_files = []
for directory, _, file_list in os.walk("../tools/hlt_client"):
target_dir = os.path.relpath(directory, "../tools/hlt_client")
for filename in file_list:
_, ext = os.path.splitext(filename)
if ext.lower() in {".py", ".md", ".txt"} or filename == "hlt":
source_path = os.path.join(directory, filename)
target_path = os.path.normpath(
os.path.join("hlt_client/", target_dir, filename))
included_files.append((source_path, target_path))
with zipfile.ZipFile(TOOLS_FILE, "w", zipfile.ZIP_DEFLATED) as archive:
for source_path, target_path in included_files:
archive.write(source_path, target_path)
def make_box_halite_download(box_dir):
# Result is [platform independent, Mac, Linux, Windows] path links
result = [None, None, None, None]
# Halite-in-a-Box
for filename in os.listdir(box_dir):
if filename.startswith('.'):
continue
platform, extension = os.path.splitext(os.path.basename(filename))
destination = BOX_OUTPUT_FILE_FORMAT.format(platform=platform, extension=extension)
shutil.copy(os.path.join(box_dir, filename), destination)
if platform == 'MacOS':
result[1] = destination
elif platform == 'Linux':
result[2] = destination
elif platform == 'Windows':
result[3] = destination
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("version", help=VERSION_HELP)
parser.add_argument("environment_dir", help=ENVIRONMENT_DIR_HELP)
parser.add_argument("box_dir", help=BOX_DIR_HELP)
args = parser.parse_args()
environments = detect_environments(args.environment_dir)
generated_languages = []
# Ensure output directories exists
for location in [SOURCE_FILE, DOWNLOAD_DATA]:
try:
os.makedirs(os.path.dirname(location))
except FileExistsError:
pass
make_source_download()
make_benchmark_download()
make_tools_download()
# Keep paths of all source files around so we can make a single combined
# download at the end
all_files = []
for directory in os.listdir(STARTER_KIT_DIR):
full_path = os.path.join(STARTER_KIT_DIR, directory)
if not os.path.isdir(full_path):
continue
if directory in ("starterkitdocs", "benchmark"):
continue
if directory == 'Swift':
# Skip this for now (licensing)
continue
language = directory
generated_languages.append(language)
print("Language:", language)
included_files = scan_directory(full_path)
for file in included_files:
print("\tIncluding:", file[0] if isinstance(file, tuple) else file)
print()
all_files.extend(included_files)
for (platform, source, target) in environments:
output = "./" + OUTPUT_FILE_FORMAT.format(
language=language, platform=platform)
print("\tMaking:", output)
make_archive(output, (platform, source, target),
full_path, included_files + [("replays/README.md", REPLAY_README)])
panlanguage_kits = []
for (platform, source, target) in environments:
# Make downloads including all languages
filename = ALL_LANGUAGES_OUTPUT_FILE_FORMAT.format(platform=platform)
all_output = "./" + filename
print("\tMaking:", all_output)
make_archive(all_output, (platform, source, target), "../starter_kits", all_files)
panlanguage_kits.append(filename)
# Make downloads including no languages
if source is None:
continue
output = "./" + ENVIRONMENT_OUTPUT_FILE_FORMAT.format(platform=platform)
print("\tMaking:", output)
make_archive(output, (platform, source, target), "", [])
output = {
"platforms": [environment[0] for environment in environments],
"languages": [],
"environments": [],
"tools": [
# Don't allow downloading benchmark bots
# {
# "name": "Benchmark Bots",
# "files": [BENCHMARK_FILE, None, None, None],
# },
{
"name": "Halite Visualizer & Gym",
"files": make_box_halite_download(args.box_dir),
},
{
"name": "CLI Client Tools",
"files": [TOOLS_FILE, None, None, None],
},
],
"source": SOURCE_FILE,
"version": args.version,
}
generated_languages.sort()
for language in generated_languages:
language_kits = []
for (platform, _, _) in environments:
language_kits.append(
OUTPUT_FILE_FORMAT.format(
language=language, platform=platform))
output["languages"].append({
"language": language,
"files": language_kits,
"version": args.version if language in OFFICIAL_KITS else "Community-contributed",
})
output["languages"].append({
"language": "All Languages",
"files": panlanguage_kits,
})
for (platform, source, _) in environments:
if source is None:
continue
output["environments"].append(
ENVIRONMENT_OUTPUT_FILE_FORMAT.format(platform=platform))
with open(DOWNLOAD_DATA, "w") as output_file:
json.dump(output, output_file, indent=2)
if __name__ == "__main__":
main()
``` |
{
"source": "johnnyle24/SocialMediaInfluence",
"score": 3
} |
#### File: SocialMediaInfluence/heuristics/linear_threshold.py
```python
import numpy as np
import pdb as p
def deactivate_all_nodes(network, whitelist=None):
if whitelist is None:
whitelist = []
# Whitelist are nodes which should remain active
for node in network.Nodes():
if node.GetId() in whitelist:
network.AddIntAttrDatN(node, 1, 'active')
else:
network.AddIntAttrDatN(node, 0, 'active')
def node_influence(network, node):
network.AddIntAttrDatN(node, 1, 'active')
stack = [node.GetId()]
influence = 1
active = [node.GetId()]
while stack:
u = network.GetNI(stack.pop())
for out_edge in range(u.GetOutDeg()):
v = network.GetNI(u.GetOutNId(out_edge))
# See if inactive neighbor becomes active
if network.GetIntAttrDatN(v, 'active') == 0:
threshold = network.GetFltAttrDatN(v, 'threshold')
# Generate some edge weights (to potentially use later)
# edge_weights = np.random.dirichlet(np.ones(v.GetInDeg())) * np.random.uniform(0, 1)
# edge_weights = [1/v.GetInDeg()] * v.GetInDeg()
edge_weights = np.ones(v.GetInDeg())
for i in range(0, v.GetInDeg()):
edge_weights[i] = float(1)/v.GetInDeg();
# p.set_trace();
# print(edge_weights[i])
edge_weights = edge_weights.tolist()
# Compute the activation
activation = 0
for in_edge in range(v.GetInDeg()):
w = v.GetInNId(in_edge)
edge = network.GetEI(w, v.GetId())
is_active = network.GetIntAttrDatN(w, 'active')
weight = network.GetFltAttrDatE(edge, 'weight')
if weight == -1:
weight = edge_weights.pop()
network.AddFltAttrDatE(edge, weight, 'weight')
activation += is_active * weight
# Determine if this node becomes active
if activation > threshold:
network.AddIntAttrDatN(v, 1, 'active')
stack.append(v.GetId())
influence += 1
active.append(v.GetId())
return influence, active
def set_influence(network, max_set):
deactivate_all_nodes(network)
influence = 0
for node in max_set:
node = network.GetNI(node)
# Only measure influence if not already active
# (if already active, influence would have already been taken into account)
if network.GetIntAttrDatN(node, 'active') == 0:
influence += node_influence(network, node)[0]
return influence
``` |
{
"source": "johnnylee/scikits.pulsefit",
"score": 3
} |
#### File: scikits/pulsefit/ampfit_mle.py
```python
from __future__ import print_function
import numpy as np
import ampfit_mle_c
class AmpFitMLE(object):
"""AmpFitMLE fits pulse amplitudes using a maximum-likelihood
estimation.
"""
def __init__(self, debug=False):
self.debug = debug
def _compute_lambda_matrix(self, n_r, p, inds):
"""Compute the lambda matrix given the block size, n."""
lam = np.empty((inds.size, inds.size), dtype=np.float64)
ampfit_mle_c.compute_lambda_matrix(n_r, p, inds, lam.reshape(-1))
return lam
def _compute_phi_array(self, r, p, inds, b):
phi = np.empty(inds.size, dtype=np.float64)
ampfit_mle_c.compute_phi_array(r - b, p, inds, phi)
return phi
def fit(self, block):
"""Find the best-fit amplitude for pulses whose positions have been
identified.
"""
if self.debug:
print("\nFinding pulse amplitudes...")
# No pulses - nothing to do.
if len(block.inds) == 0:
block.amps = np.empty(0, dtype=np.float64)
return
r = block.r[block.i0:block.i1]
p = block.p
# Compute the lambda matrix and phi array.
lam = self._compute_lambda_matrix(r.size, p, block.inds)
phi = self._compute_phi_array(r, p, block.inds, block.b)
# Create a separate fast-path for single pulses.
if block.inds.size == 1:
if lam[0][0] == 0:
block.amps = np.zeros_like(block.inds)
else:
block.amps = np.array(
(phi[0] / lam[0][0],), dtype=np.float64)
# Otherwise we use linalg.solve for multiple pulses.
else:
try:
block.amps = np.linalg.solve(lam, phi)
except Exception, ex:
if self.debug:
print(" Error:", ex)
# This occurs when we have a singular matrix.
block.amps = np.zeros_like(block.inds)
if self.debug:
print(" Amps:", block.amps)
```
#### File: scikits/pulsefit/correct_addpulses.py
```python
from __future__ import print_function
import numpy as np
class CorrectAddPulses(object):
def __init__(self, ampfit, optfit, flagger, pulse_add_len, th_min,
min_dist=0, debug=False):
"""Arguments:
ampfit -- Ampitude fitter.
optfit -- Fit optimizer.
flagger -- Block flagger.
pulse_add_len -- The max number of pulses to add is the block
length divided by pulse_add_len.
th_min -- Minimum allowed pulse height.
min_dist -- If pulses are closer than min_dist, they are merged
into a single pulse.
"""
self.ampfit = ampfit
self.optfit = optfit
self.flagger = flagger
self.pulse_add_len = pulse_add_len
self.th_min = th_min
self.min_dist = min_dist
self.debug = debug
def sanitize(self, block):
refit = False
# Remove low-amplitude pulses.
mask = block.amps > self.th_min
if mask.sum() != block.inds.size:
refit = True
block.inds = block.inds[mask]
block.amps = block.amps[mask]
if self.debug:
print("Correct: Removed low-amplitude pulses.")
# Merge pulses that are too close together.
if self.min_dist != 0 and len(block.inds) > 1:
dinds = block.inds[1:] - block.inds[:-1]
mask = dinds > self.min_dist
if mask.sum() != block.inds.size - 1:
refit = True
new_inds = np.empty(mask.sum() + 1, dtype=np.float64)
new_inds[0] = block.inds[0]
if mask.sum() != 0:
new_inds[1:] = block.inds[1:][mask]
if self.debug:
print("Correct: Merged pulses.")
if refit:
self.refit(block)
def refit(self, block):
self.ampfit.fit(block)
self.optfit.optimize(block)
self.flagger.flag(block)
def correct(self, block):
if self.debug:
print("\nCorrecting...")
add_max = max(1, int((block.i1 - block.i0) / self.pulse_add_len))
for i in xrange(add_max):
if np.all(block.flags == 0):
if self.debug:
print("Correct: All OK.")
return
# Add a new pulse.
idx_new = max(block.res.argmax() - block.p.argmax(), 0)
inds = np.concatenate((block.inds, (idx_new,)))
inds.sort()
block.inds = inds
if self.debug:
print(" Adding pulse at:", idx_new)
print(" Inds:", block.inds)
self.refit(block)
self.sanitize(block)
``` |
{
"source": "JohnnyLeibniz/kindling-bot",
"score": 3
} |
#### File: JohnnyLeibniz/kindling-bot/backandforth.py
```python
import discord
from discord.ext import commands
class Add_Remove(commands.Cog):
def _init_(self,client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('(Add & Remove) log is ready.')
@commands.command()
async def addbranch(self,ctx):
await ctx.send('A branch has been added to the fire.')
#----------------------------
# KICKING/BANNING/UNBANNING
#----------------------------
@commands.command()
async def kick(self,ctx, member : discord.Member, *,reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member.mention} has been kicked.')
@commands.command()
async def ban(self,ctx, member : discord.Member, *,reason=None):
await member.ban(reason=reason)
await ctx.send(f'{member.mention} has been banned.')
@commands.command()
async def unban(self,ctx,*,member):
banned_users = await ctx.guild.bans()
member_name,member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name,user.discriminator) == (member_name,member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user.name}#{user.discriminator} has been unbanned.')
#-------
# SETUP
#-------
def setup(client):
client.add_cog(Add_Remove(client))
``` |
{
"source": "johnnylili/VideoSuperResolution",
"score": 4
} |
#### File: VSR/DataLoader/VirtualFile.py
```python
from pathlib import Path
from io import SEEK_END, BytesIO
from PIL import Image
import numpy as np
from ..Util.Utility import to_list
class File:
def __init__(self, path, rewind=False):
"""
If path is a file, File opens it and calculates its length.
If path is a folder, File organize each file in the folder as alphabet order
Args:
path: path to a node (can be a file or just a folder)
rewind: rewind the file when reaches EOF
"""
self.path = Path(path)
self.file = []
self.length = dict()
mode = 'rb' # mode must be 'rb'
if self.path.is_file():
self.name = self.path.stem
self.file = [self.path]
with self.path.open(mode) as fd:
fd.seek(0, SEEK_END)
self.length[self.path.name] = fd.tell()
elif self.path.is_dir():
self.name = self.path.stem # TODO: is this right?
for _file in self.path.glob('*'):
self.file.append(_file)
with _file.open(mode) as fd:
fd.seek(0, SEEK_END)
self.length[_file.name] = fd.tell()
self.read_file = []
self.read_pointer = 0
self.end_pointer = sum(self.length.values())
self.cur_fd = None
self.rewind = rewind
def __len__(self):
return self.end_pointer
def reopen(self):
self.file = self.read_file + self.file
self.read_file.clear()
self.read_pointer = 0
self.cur_fd = None
def split(self, depth):
pass
def read(self, count=None):
"""
Read `count` bytes
Args:
count: number of bytes to be read, if None, read all bytes of **1** file
Return:
bytes read
"""
if count == 0:
return b''
if not self.cur_fd and self.file:
self.cur_fd = self.file[0].open('rb')
self.read_file.append(self.file[0])
self.file.pop(0)
elif not self.cur_fd:
raise FileNotFoundError('No frames in File')
read_bytes = self.cur_fd.read(count)
if read_bytes:
self.read_pointer += len(read_bytes)
if count and count > len(read_bytes):
return read_bytes + self.read(count - len(read_bytes))
else:
return read_bytes
else:
if self.file:
self.cur_fd.close()
self.cur_fd = self.file[0].open('rb')
self.read_file.append(self.file[0])
self.file.pop(0)
return self.read(count)
elif self.rewind and self.read_file:
self.file = self.read_file.copy()
self.read_file.clear()
self.cur_fd = None
return self.read(count)
else:
raise EOFError('End of File!')
def read_frame(self, frames=1, *args):
pass
def seek(self, offset, where):
"""
Seek the position by `offset` relative to `where`
Args:
offset: move the read pointer by `offset` bytes
where: could be io.SEEK_END, io.SEEK_CUR, io.SEEK_SET
"""
pass
def tell(self):
"""
Tell the current position of the read pointer
"""
return self.read_pointer
def size(self, name):
"""
Get the length of the file named `name`
Return:
length in bytes
"""
path = Path(name)
name = path.stem if path.exists() else name
return self.length.get(name)
_ALLOWED_RAW_FORMAT = [
'YV12',
'YV21',
'NV12',
'NV21',
'RGB4',
'BGR4'
]
class RawFile(File):
def __init__(self, path, mode, size, rewind=False):
"""
Initiate Raw object. The file is lazy loaded, which means
the file is opened but not loaded into memory.
Arguments:
path: file path or handle
mode: since raw file has no headers, type must be explicitly given
size: a tuple of (width, height), must be explicitly given
rewind: rewind the file when reaches EOF
Raise:
TypeError
"""
if not mode.upper() in _ALLOWED_RAW_FORMAT:
raise TypeError('unknown mode: ' + mode)
self.mode = mode.upper()
self.size = to_list(size)
self.pitch, self.channel_pitch = self._get_frame_pitch()
super(RawFile, self).__init__(path, rewind)
def _get_frame_pitch(self):
"""Get bytes length of one frame.
For the detail of mode fourcc, please see https://www.fourcc.org/
RGB, BGR, and UV channel of NV12, NV21 is packed, while YV12 and YV21 is planar, hence we have:
- **channel0** of YV12, YV21, NV12, NV21 if Y
- **channel1** of YV12 is U, of YV21 is V, of NV12 is UV, of NV21 is VU
- **channel2** of YV12 is V, of YV21 is U
"""
mode = self.mode
width, height = self.size
if mode in ('YV12', 'YV21'):
return height * width * 3 // 2, [height * width, height * width // 4, height * width // 4]
if mode in ('NV12', 'NV21'):
return height * width * 3 // 2, [height * width, height * width // 2]
if mode in ('RGB', 'BGR'):
return height * width * 3, [height * width * 3]
def _get_frame_channel_shape(self):
"""Get each channel's shape according to mode and frame length.
For the detail of mode fourcc, please see https://www.fourcc.org/
"""
mode = self.mode
width, height = self.size
if mode in ('YV12', 'YV21'):
return np.array([1, height, width]), np.array([1, height // 2, width // 2]), np.array(
[1, height // 2, width // 2])
if mode in ('NV12', 'NV21'):
return np.array([1, height, width]), np.array([2, height // 2, width // 2])
if mode in ('RGB', 'BGR'):
return np.array([height, width, 3])
def read_frame(self, frames=1, *args):
"""
read number `frames` of the file.
Arguments:
frames: number of frames to be loaded
id: specify frame format to store (default gray-scale)
Raise:
"""
if self.mode in ('YV12', 'YV21', 'NV12', 'NV21',):
# discard uv plain for acceleration
_image_mode = 'L'
else:
_image_mode = 'RGB'
return [Image.frombytes(_image_mode, self.size, self.read(self.pitch)) for _ in range(frames)]
@property
def shape(self):
return self.size
@property
def frames(self):
return (self.end_pointer - self.read_pointer) // self.pitch
class ImageFile(File):
def __init__(self, path, rewind):
"""Open image1 file or a sequence of image1 frames
Args:
path: file path or handle
rewind: rewind the file when reaches EOF
"""
super(ImageFile, self).__init__(path, rewind)
def read_frame(self, frames=1, *args):
"""read number `frames` of the file.
Args:
frames: number of frames to be loaded
"""
image_bytes = [BytesIO(self.read()) for _ in range(frames)]
return [Image.open(fp) for fp in image_bytes]
@property
def shape(self):
with Image.open(self.file[0]) as img:
return img.width, img.height
@property
def frames(self):
return len(self.file)
```
#### File: VSR/Framework/Callbacks.py
```python
from pathlib import Path
from functools import partial
import numpy as np
from PIL.Image import Image
from ..Util.ImageProcess import array_to_img, img_to_array, imresize
def _sub_residual(**kwargs):
img = kwargs.get('input')
res = kwargs.get('output') or np.zeros_like(img)
res = res[0] if isinstance(res, list) else res
return img - res
def _save_model_predicted_images(output, index, mode='YCbCr', **kwargs):
save_dir = kwargs.get('save_dir') or '.'
name = kwargs.get('name')
if output is not None:
img = output[index] if isinstance(output, list) else output
img = _to_normalized_image(img, mode)
path = Path(f'{save_dir}/{name}_PR.png')
path.parent.mkdir(parents=True, exist_ok=True)
rep = 1
while path.exists():
path = Path(f'{save_dir}/{name}_PR_{rep}.png')
rep += 1
img.convert('RGB').save(str(path))
return output
def _colored_grayscale_image(outputs, input, **kwargs):
ret = []
for img in outputs:
assert img.shape[-1] == 1
scale = np.array(img.shape[1:3]) // np.array(input.shape[1:3])
uv = array_to_img(input[0], 'YCbCr')
uv = imresize(uv, scale)
uv = img_to_array(uv)[..., 1:]
img = np.concatenate([img[0], uv], axis=-1)
img = np.clip(img, 0, 255)
ret.append(array_to_img(img, 'YCbCr'))
return ret
def _to_normalized_image(img, mode):
img = np.asarray(img)
# squeeze to [H, W, C]
for i in range(np.ndim(img)):
try:
img = np.squeeze(img, i)
except ValueError:
pass
img = np.clip(img, 0, 255)
if img.ndim < 2 or img.ndim > 3:
raise ValueError('Invalid img data, must be an array of 2D image1 with channel less than 3')
return array_to_img(img, mode)
def _add_noise(feature, stddev, mean, clip, **kwargs):
x = feature.astype('float') + np.random.normal(mean, stddev, feature.shape)
return np.clip(x, 0, 255) if clip else x
def _add_random_noise(feature, low, high, step, mean, clip, **kwargs):
n = list(range(low, high, step))
i = np.random.randint(len(n))
stddev = n[i]
return _add_noise(feature, stddev, mean, clip)
def _gaussian_blur(feature, width, size, **kwargs):
from scipy.ndimage.filters import gaussian_filter as gf
y = []
for img in np.split(feature, feature.shape[0]):
c = []
for channel in np.split(img, img.shape[-1]):
channel = np.squeeze(channel).astype('float')
c.append(gf(channel, width, mode='constant', truncate=(size // 2) / width))
y.append(np.stack(c, axis=-1))
return np.stack(y)
def _exponential_decay(lr, start_lr, epochs, steps, decay_step, decay_rate):
return start_lr * decay_rate ** (steps / decay_step)
def _poly_decay(lr, start_lr, end_lr, epochs, steps, decay_step, power):
return (start_lr - end_lr) * (1 - steps / decay_step) ** power + end_lr
def _stair_decay(lr, start_lr, epochs, steps, decay_step, decay_rate):
return start_lr * decay_rate ** (steps // decay_step)
def _eval_psnr(outputs, label, max_val, name, **kwargs):
if not isinstance(outputs, list):
outputs = [outputs]
if isinstance(label, Image):
label = img_to_array(label.convert('RGB'))
for outp in outputs:
if isinstance(outp, Image):
outp = img_to_array(outp.convert('RGB'))
label = np.squeeze(label)
outp = np.squeeze(outp)
mse = np.mean(np.square(outp - label))
psnr = 20 * np.log10(max_val / np.sqrt(mse))
print(f'{name}\'s PSNR = {psnr:.2f}dB')
def save_image(save_dir='.', output_index=0, **kwargs):
return partial(_save_model_predicted_images, save_dir=save_dir, index=output_index, **kwargs)
def print_psnr(max_val=255.0):
return partial(_eval_psnr, max_val=max_val)
def reduce_residual(**kwargs):
return partial(_sub_residual, **kwargs)
def to_rgb(**kwargs):
return partial(_colored_grayscale_image, **kwargs)
def to_gray():
def _gray_colored_image(inputs, **kwargs):
return inputs[..., 0:1]
return _gray_colored_image
def to_uv():
def _uv_colored_image(inputs, **kwargs):
return inputs[..., 1:]
return _uv_colored_image
def add_noise(sigma, mean=0, clip=False):
return partial(_add_noise, stddev=sigma, mean=mean, clip=clip)
def add_random_noise(low, high, step=1, mean=0, clip=False):
return partial(_add_random_noise, low=low, high=high, step=step, mean=mean, clip=clip)
def lr_decay(method, lr, **kwargs):
if method == 'exp':
return partial(_exponential_decay, start_lr=lr, **kwargs)
elif method == 'poly':
return partial(_poly_decay, start_lr=lr, **kwargs)
elif method == 'stair':
return partial(_stair_decay, start_lr=lr, **kwargs)
else:
raise ValueError('invalid decay method!')
def blur(kernel_width, kernel_size, method='gaussian'):
return partial(_gaussian_blur, width=kernel_width, size=kernel_size)
```
#### File: VSR/Framework/GAN.py
```python
from .SuperResolution import SuperResolution
import tensorflow as tf
import functools
import numpy as np
logging = tf.logging
tfgan_eval = tf.contrib.gan.eval
def Discriminator(net, input_shape=None, scope='Critic', use_bias=False):
"""A simple D-net, for image generation usage
Args:
net: your base class of the caller to this method
input_shape: identify the shape of the image if the dense layer is used.
if the input_shape is None, the dense layer is replaced by
global average pooling layer
scope: name of the scope
use_bias: use bias in convolution
Return:
the **callable** which returns the prediction and feature maps of each layer
"""
def critic(inputs):
assert isinstance(net, SuperResolution)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if input_shape is not None:
x = tf.reshape(inputs, input_shape)
else:
x = inputs
fet = []
x = net.conv2d(x, 64, 3, activation='lrelu', use_batchnorm=False, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 64, 3, strides=2, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 128, 4, strides=1, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 128, 4, strides=2, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 256, 4, strides=1, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 256, 4, strides=2, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 512, 4, strides=1, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
x = net.conv2d(x, 512, 4, strides=2, activation='lrelu', use_batchnorm=True, use_bias=use_bias,
kernel_initializer='he_normal')
fet.append(x)
if input_shape:
x = tf.layers.flatten(x)
x = tf.layers.dense(x, 1024, activation=tf.nn.leaky_relu)
x = tf.layers.dense(x, 1)
else:
x = net.conv2d(x, 1, 3)
x = tf.reduce_mean(x, [1, 2, 3])
return x, fet
return critic
def loss_gan(y_true, y_pred, discriminator):
"""Original GAN loss with BCE"""
if not callable(discriminator):
raise TypeError('Discriminator is not a callable!')
y_real = discriminator(y_true)
y_fake = discriminator(y_pred)
d_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(y_real), y_real) + \
tf.losses.sigmoid_cross_entropy(tf.zeros_like(y_fake), y_fake)
g_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(y_fake), y_fake)
return g_loss, d_loss
def loss_wgan(y_true, y_pred, discriminator):
"""W-GAN"""
if not callable(discriminator):
raise TypeError('Discriminator is not a callable!')
y_real = discriminator(y_true)
y_fake = discriminator(y_pred)
d_loss = tf.reduce_mean(y_fake - y_real)
g_loss = -tf.reduce_mean(y_fake)
return g_loss, d_loss
def loss_wgan_gp(y_true, y_pred, discriminator, lamb=10):
"""W-GAN Gradient penalty"""
g_loss, d_loss = loss_wgan(y_true, y_pred, discriminator)
diff = y_pred - y_true
alpha = tf.random_uniform(tf.shape(diff), minval=0., maxval=1.)
interp = y_true + alpha * diff
gradients = tf.gradients(discriminator(interp), [interp])
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients[0]), reduction_indices=[1]))
gp = tf.reduce_mean((slopes - 1.) ** 2.)
return g_loss, d_loss + lamb * gp
def loss_lsgan(y_true, y_pred, discriminator):
"""LSGAN"""
if not callable(discriminator):
raise TypeError('Discriminator is not a callable!')
y_real = discriminator(y_true)
y_fake = discriminator(y_pred)
d_loss = tf.reduce_mean((y_real - 1) ** 2) + tf.reduce_mean(y_fake ** 2)
g_loss = tf.reduce_mean((y_fake - 1) ** 2)
return g_loss, d_loss
def loss_relative_lsgan(y_true, y_pred, discriminator, average=False):
"""R(A)LSGAN"""
if not callable(discriminator):
raise TypeError('Discriminator is not a callable!')
y_real = discriminator(y_true)
y_fake = discriminator(y_pred)
if average:
d_loss = tf.reduce_mean((y_real - tf.reduce_mean(y_fake) - 1) ** 2) + \
tf.reduce_mean((y_fake - tf.reduce_mean(y_real) + 1) ** 2)
g_loss = tf.reduce_mean((y_real - tf.reduce_mean(y_fake) + 1) ** 2) + \
tf.reduce_mean((y_fake - tf.reduce_mean(y_real) - 1) ** 2)
else:
d_loss = tf.reduce_mean((y_real - y_fake - 1) ** 2)
g_loss = tf.reduce_mean((y_fake - y_real - 1) ** 2)
return g_loss, d_loss
```
#### File: VSR/Models/Srcnn.py
```python
from ..Framework.SuperResolution import SuperResolution
from ..Util.Utility import bicubic_rescale, to_list, ConvolutionDeltaOrthogonal
import tensorflow as tf
SCALE = 1
SHIFT = 0
convolutional_delta_orthogonal = ConvolutionDeltaOrthogonal
class SRCNN(SuperResolution):
def __init__(self, scale, layers=3, filters=64, kernel=(9, 5, 5), name='srcnn', **kwargs):
self.name = name
self.layers = layers
self.filters = filters
self.kernel_size = to_list(kernel)
if len(self.kernel_size) < self.layers:
self.kernel_size += to_list(kernel[-1], self.layers - len(self.kernel_size))
super(SRCNN, self).__init__(scale=scale, **kwargs)
def build_graph(self):
with tf.variable_scope(self.name):
super(SRCNN, self).build_graph()
x = self.inputs_preproc[-1] * SCALE + SHIFT
x = bicubic_rescale(x, self.scale)
f = self.filters
ks = self.kernel_size
x = self.conv2d(x, f, ks[0], activation='relu', use_batchnorm=False, kernel_regularizer='l2',
kernel_initializer=convolutional_delta_orthogonal())
for i in range(1, self.layers - 1):
x = self.conv2d(x, f, ks[i], activation='relu', use_batchnorm=False, kernel_regularizer='l2',
kernel_initializer=convolutional_delta_orthogonal())
x = self.conv2d(x, 1, ks[-1], use_batchnorm=False, kernel_regularizer='l2',
kernel_initializer='he_normal')
self.outputs.append((x - SHIFT) / SCALE)
def build_loss(self):
with tf.name_scope('loss'):
y_pred = self.outputs[-1] * SCALE + SHIFT
y_true = self.label[-1] * SCALE + SHIFT
opt = tf.train.AdamOptimizer(self.learning_rate)
mse = tf.losses.mean_squared_error(y_true, y_pred)
loss = tf.losses.get_total_loss()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.loss.append(opt.minimize(loss, self.global_steps))
self.exp_loss()
self.train_metric['loss'] = loss
self.metrics['mse'] = mse
self.metrics['psnr'] = tf.reduce_mean(tf.image.psnr(self.label[-1], self.outputs[-1], max_val=255))
self.metrics['ssim'] = tf.reduce_mean(tf.image.ssim(self.label[-1], self.outputs[-1], max_val=255))
def build_summary(self):
tf.summary.scalar('loss/training', self.train_metric['loss'])
tf.summary.scalar('loss/mse', self.metrics['mse'])
tf.summary.scalar('psnr', self.metrics['psnr'])
tf.summary.scalar('ssim', self.metrics['ssim'])
self.exp_summary()
def exp_loss(self):
pass
def exp_summary(self):
pass
``` |
{
"source": "JohnnyLin-a/ecom-inventory-stock-checker",
"score": 2
} |
#### File: internal/ecom/MetrohobbiesCa.py
```python
from pkg.api.ecomm.Ecomm import EcommInterface
from pkg.api.webengine import WebEngine
from pkg.api.ecomm.Item import Item
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import requests
class MetrohobbiesCa(EcommInterface):
webhookFull: str = "DISCORD_WEBHOOK_METROHOBBIES_FULL"
webhookDiff: str = "DISCORD_WEBHOOK_METROHOBBIES_DIFF"
def __init__(self):
pass
def execute(self, webEngine: WebEngine) -> dict:
# Use rest api directly
inStockItems = {"*": []}
page = 1
totalPages = 1
while page <= totalPages:
print("Onto page " + str(page) + "/" + str(totalPages))
req = requests.get("https://cdn5.editmysite.com/app/store/api/v17/editor/users/131444256/sites/426079854127040612/products?page=" + str(page) + "&per_page=180&sort_by=created_date&sort_order=desc&in_stock=1&excluded_fulfillment=dine_in")
if not req.ok:
return {"error": "failed to get data for page " + str(page)}
try:
data = req.json()
except:
return {"error": "failed to convert to json for page " + str(page)}
# Set max page
totalPages = data['meta']['pagination']['total_pages']
# iterate over items
for item in data['data']:
if item['badges']['out_of_stock'] == False:
inStockItems["*"].append(Item(1, item['name'], '*'))
page += 1
return inStockItems
@staticmethod
def getUrl() -> str:
return "https://www.metrohobbies.ca"
``` |
{
"source": "Johnny-Liou/LightDance-RPi",
"score": 3
} |
#### File: LightDance-RPi/cli/cli.py
```python
import os
import sys
import cmd2
from cmd2 import Fg, argparse, style
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from method import *
from pysocket import ZMQSocket
INTRO = r"""
_ __ ______ __ __ ______ ______
/ | / //_ __// / / // ____// ____/
/ |/ / / / / / / // __/ / __/
/ /| / / / / /_/ // /___ / /___
/_/ |_/ /_/ \____//_____//_____/
__ _ __ __ ____
/ / (_)____ _ / /_ / /_ / __ \ ____ _ ____ _____ ___
/ / / // __ `// __ \ / __// / / // __ `// __ \ / ___// _ \
/ /___ / // /_/ // / / // /_ / /_/ // /_/ // / / // /__ / __/
/_____//_/ \__, //_/ /_/ \__//_____/ \__,_//_/ /_/ \___/ \___/
/____/
"""
def file_path(path):
if os.path.isfile(path):
return path
else:
raise argparse.ArgumentTypeError("File not found!")
class LightDanceCLI(cmd2.Cmd):
"""LightDanceCLI"""
def __init__(self):
# CMD2 init
self.intro = style(INTRO, fg=Fg.BLUE, bold=True)
self.prompt = "LightDance CLI> "
shortcuts = dict(cmd2.DEFAULT_SHORTCUTS)
super().__init__(shortcuts=shortcuts, startup_script="./cli/startup")
# ZMQ methods init
self.socket = ZMQSocket(port=8000)
self.METHODS = {
"shutDown": ShutDown(),
"reboot": Reboot(),
"boardinfo": BoardInfo(),
"uploadControl": UploadJsonFile(socket=self.socket),
"load": Load(socket=self.socket),
"play": Play(socket=self.socket),
"pause": Pause(socket=self.socket),
"stop": Stop(socket=self.socket),
"statuslight": StatusLight(socket=self.socket),
"eltest": ELTest(socket=self.socket),
"ledtest": LEDTest(socket=self.socket),
"list": List(socket=self.socket),
"quit": Quit(socket=self.socket),
"send": Send(socket=self.socket),
}
# vars init
self.load = False
def response_parser(self, response: str):
if "error" in response.lower():
self.perror(response)
elif "success" in response.lower():
self.poutput(response)
elif "warning" in response.lower():
self.pwarning(response)
def do_boardinfo(self, args):
"""boardinfo"""
info = self.METHODS["boardinfo"]()
self.poutput(info)
def do_reboot(self, args):
"""reboot"""
self.METHODS["reboot"]()
def do_shutdown(self, args):
"""shutdown"""
self.METHODS["shutdown"]()
# load [path]
load_parser = cmd2.Cmd2ArgumentParser()
load_parser.add_argument(
"control_path",
nargs="?",
default="data/control.json",
type=file_path,
help="Path to control JSON file.",
)
@cmd2.with_argparser(load_parser)
def do_load(self, args):
"""Load control JSON file"""
control_path = args.control_path
with open(control_path, "r") as f:
control = f.read()
if not control:
self.pwarning("Warning: control.json is empty")
payload = {"path": control_path}
response = self.METHODS["load"](payload)
self.response_parser(response)
self.load = True
complete_load = cmd2.Cmd.path_complete
# play [start_time] [delay_time]
play_parser = cmd2.Cmd2ArgumentParser()
play_parser.add_argument(
"start_time", nargs="?", default=0, type=int, help="start time"
)
play_parser.add_argument(
"delay_time", nargs="?", default=0, type=int, help="delay time"
)
@cmd2.with_argparser(play_parser)
def do_play(self, args):
"""play"""
start_time = args.start_time
delay_time = args.delay_time
if not self.load:
self.perror("Error: play failed, need to load first")
return
payload = {"start_time": str(start_time), "delay_time": str(delay_time)}
response = self.METHODS["play"](payload)
self.response_parser(response)
def do_pause(self, args):
"""pause"""
response = self.METHODS["pause"]()
self.response_parser(response)
def do_stop(self, args):
"""stop"""
response = self.METHODS["stop"]()
self.response_parser(response)
def do_statuslight(self, args): # TODO
"""statuslight"""
response = self.METHODS["statuslight"]()
self.response_parser(response)
def do_list(self, args): # TODO
"""list"""
response = self.METHODS["list"]()
self.response_parser(response)
def do_quit(self, args):
"""quit"""
response = self.METHODS["quit"]()
self.response_parser(response)
return 1
send_parser = cmd2.Cmd2ArgumentParser()
send_parser.add_argument(
"message", nargs="?", default="Hello", type=str, help="message"
)
@cmd2.with_argparser(send_parser)
def do_send(self, args):
"""send"""
payload = {"message": args.message}
response = self.METHODS["send"](payload)
self.response_parser(response)
# eltest [id] [brightness]
eltest_parser = cmd2.Cmd2ArgumentParser()
eltest_parser.add_argument("id", nargs="?", default=-1, type=int, help="id 0~31")
eltest_parser.add_argument(
"brightness", nargs="?", default=4095, type=int, help="brightness 0~4095"
)
@cmd2.with_argparser(eltest_parser)
def do_eltest(self, args):
"""test el"""
id = args.id
brightness = args.brightness
if brightness > 4095:
self.pwarning(
"Warning: brightness is bigger than 4095, light brightness as 4095"
)
brightness = 4095
payload = {"id": str(id), "brightness": str(brightness)}
response = self.METHODS["eltest"](payload)
self.response_parser(response)
def do_ledtest(self, args): # TODO
"""test led"""
response = self.METHODS["ledtest"]()
self.response_parser(response)
# sendlight [id] [vector]
sendlight_parser = cmd2.Cmd2ArgumentParser()
sendlight_parser.add_argument("id", nargs="?", default=-1, type=int, help="id 0~31")
sendlight_parser.add_argument(
"vector", nargs="?", default=4095, type=int, help="brightness 0~4095"
)
@cmd2.with_argparser(sendlight_parser)
def do_sendlight(self, args):
"""send light"""
id = args.id
vector = args.vector
payload = {"id": str(id), "vector": str(vector)}
response = self.METHODS["sendlight"](payload)
self.response_parser(response)
if __name__ == "__main__":
app = LightDanceCLI()
app.debug = True
sys.exit(app.cmdloop())
``` |
{
"source": "Johnny-liqiang/AI-Research-Intern-In-Sinovation-Venture",
"score": 2
} |
#### File: AI-Research-Intern-In-Sinovation-Venture/project_gpt2_demo/generate.py
```python
import os
import argparse
import logging
import torch
import torch.nn.functional as F
from modeling_gpt2 import GPT2LMHeadModel, GPT2Config
from tokenization_bert import BertTokenizer
import math
logger = logging.getLogger(__name__)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def top_kp_search(args, model, tokenizer, curr_input_tensor):
sep_token_id = tokenizer.vocab["[SEP]"]
unk_token_id = tokenizer.vocab["[UNK]"]
generated = []
for _ in range(args.max_pred_len):
with torch.no_grad():
outputs = model(input_ids=curr_input_tensor)
next_token_logits = outputs[0][-1, :]
for id in set(generated):
next_token_logits[id] /= args.repetition_penalty
next_token_logits = next_token_logits / args.temperature
next_token_logits[unk_token_id] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
if next_token == sep_token_id:
break
generated.append(next_token.item())
curr_input_tensor = torch.cat((curr_input_tensor, next_token), dim=0)
return "".join([token.replace("##","") for token in tokenizer.convert_ids_to_tokens(generated)])
def k_best_outputs(curr_input_tensor, outputs, log_scores, index, beam_size):
probs, ix = outputs[:, index-1].data.topk(beam_size)
log_probs = torch.Tensor([math.log(p) for p in probs.data.view(-1)]).view(beam_size, -1) + log_scores.transpose(0, 1)
k_probs, k_ix = log_probs.view(-1).topk(beam_size)
row = k_ix // beam_size
col = k_ix % beam_size
curr_input_tensor[:, :index] = curr_input_tensor[row, :index]
curr_input_tensor[:, index] = ix[row, col]
log_scores = k_probs.unsqueeze(0)
return curr_input_tensor, log_scores
def beam_search(args, model, tokenizer, input_tensor):
sep_token_id = tokenizer.vocab["[SEP]"]
unk_token_id = tokenizer.vocab["[UNK]"]
beam_size = args.beam_size
log_scores = torch.FloatTensor([0.0]*beam_size).unsqueeze(0)
input_tensor_len = input_tensor.size(-1)
ind = None
curr_input_tensor = torch.zeros(beam_size, input_tensor_len + args.max_pred_len).long().to(args.device)
curr_input_tensor[:, :input_tensor_len] = input_tensor
for index in range(args.max_pred_len):
with torch.no_grad():
outputs = model(input_ids=curr_input_tensor)
outputs = F.softmax(outputs[0], dim=-1)
curr_input_tensor, log_scores = k_best_outputs(curr_input_tensor, outputs, log_scores, input_tensor_len+index, beam_size)
ones = (curr_input_tensor == sep_token_id).nonzero() # Occurrences of end symbols for all input sentences.
sentence_lengths = torch.zeros(len(curr_input_tensor), dtype=torch.long)
for vec in ones:
sentence_lengths[vec[0]] += 1
num_finished_sentences = len([s for s in sentence_lengths if s == 2])
if num_finished_sentences == beam_size:
alpha = 0.7
div = 1 / (sentence_lengths.type_as(log_scores) ** alpha)
_, ind = torch.max(log_scores * div, 1)
ind = ind.data[0]
break
if ind is None:
ind = 0
best_output = curr_input_tensor[ind]
sep_indexs = [x[0].item() for x in (best_output == sep_token_id).nonzero()]
if len(sep_indexs) == 1:
sep_indexs.append(input_tensor_len+args.max_pred_len)
generated = best_output[input_tensor_len: sep_indexs[1]].detach().cpu().numpy()
return "".join([token.replace("##","") for token in tokenizer.convert_ids_to_tokens(generated)])
def get_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_path", default=None, type=str, help="model path")
parser.add_argument("--max_pred_len", default=300, type=int, help="max length of predicted text")
parser.add_argument('--temperature', default=1, type=float, required=False, help='temperature')
parser.add_argument('--topk', default=5, type=int, required=False, help='')
parser.add_argument('--topp', default=0, type=float, required=False, help='')
parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False, help="")
parser.add_argument('--beam_size', default=5, type=int, required=False, help="")
parser.add_argument('--search', default="beam", type=str, required=False, help="beam seatch, top_kp")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return args
def main():
args = get_args()
config = GPT2Config.from_pretrained(args.model_path)
tokenizer = BertTokenizer.from_pretrained(os.path.join(args.model_path, "vocab.txt"))
model = GPT2LMHeadModel.from_pretrained(args.model_path, config=config)
model.to(args.device)
#demo: Given a text
text = "深圳改革开放四十周年"
token_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
token_ids.extend(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)))
token_ids.extend(tokenizer.convert_tokens_to_ids(["[SEP]"]))
input_features = torch.tensor(token_ids, dtype=torch.long).to(args.device)
if args.search == "top_kp":
gen_text = top_kp_search(args, model, tokenizer, input_features)
elif args.search == "beam":
gen_text = beam_search(args, model, tokenizer, input_features)
else:
raise Exception
print("Generate text: {}".format(gen_text))
if __name__ == "__main__":
main()
``` |
{
"source": "johnnyliu27/openmc",
"score": 2
} |
#### File: openmc/data/endf.py
```python
import io
import re
import os
from math import pi
from pathlib import PurePath
from collections import OrderedDict
from collections.abc import Iterable
import numpy as np
from numpy.polynomial.polynomial import Polynomial
from .data import ATOMIC_SYMBOL, gnd_name
from .function import Tabulated1D, INTERPOLATION_SCHEME
from openmc.stats.univariate import Uniform, Tabular, Legendre
_LIBRARY = {0: 'ENDF/B', 1: 'ENDF/A', 2: 'JEFF', 3: 'EFF',
4: 'ENDF/B High Energy', 5: 'CENDL', 6: 'JENDL',
17: 'TENDL', 18: 'ROSFOND', 21: 'SG-21', 31: 'INDL/V',
32: 'INDL/A', 33: 'FENDL', 34: 'IRDF', 35: 'BROND',
36: 'INGDB-90', 37: 'FENDL/A', 41: 'BROND'}
_SUBLIBRARY = {
0: 'Photo-nuclear data',
1: 'Photo-induced fission product yields',
3: 'Photo-atomic data',
4: 'Radioactive decay data',
5: 'Spontaneous fission product yields',
6: 'Atomic relaxation data',
10: 'Incident-neutron data',
11: 'Neutron-induced fission product yields',
12: 'Thermal neutron scattering data',
19: 'Neutron standards',
113: 'Electro-atomic data',
10010: 'Incident-proton data',
10011: 'Proton-induced fission product yields',
10020: 'Incident-deuteron data',
10030: 'Incident-triton data',
20030: 'Incident-helion (3He) data',
20040: 'Incident-alpha data'
}
SUM_RULES = {1: [2, 3],
3: [4, 5, 11, 16, 17, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33, 34, 35,
36, 37, 41, 42, 44, 45, 152, 153, 154, 156, 157, 158, 159, 160,
161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
173, 174, 175, 176, 177, 178, 179, 180, 181, 183, 184, 185,
186, 187, 188, 189, 190, 194, 195, 196, 198, 199, 200],
4: list(range(50, 92)),
16: list(range(875, 892)),
18: [19, 20, 21, 38],
27: [18, 101],
101: [102, 103, 104, 105, 106, 107, 108, 109, 111, 112, 113, 114,
115, 116, 117, 155, 182, 191, 192, 193, 197],
103: list(range(600, 650)),
104: list(range(650, 700)),
105: list(range(700, 750)),
106: list(range(750, 800)),
107: list(range(800, 850))}
ENDF_FLOAT_RE = re.compile(r'([\s\-\+]?\d*\.\d+)([\+\-]\d+)')
def float_endf(s):
"""Convert string of floating point number in ENDF to float.
The ENDF-6 format uses an 'e-less' floating point number format,
e.g. -1.23481+10. Trying to convert using the float built-in won't work
because of the lack of an 'e'. This function allows such strings to be
converted while still allowing numbers that are not in exponential notation
to be converted as well.
Parameters
----------
s : str
Floating-point number from an ENDF file
Returns
-------
float
The number
"""
return float(ENDF_FLOAT_RE.sub(r'\1e\2', s))
def _int_endf(s):
"""Convert string to int. Used for INTG records where blank entries
indicate a 0.
Parameters
----------
s : str
Integer or spaces
Returns
-------
integer
The number or 0
"""
s = s.strip()
return int(s) if s else 0
def get_text_record(file_obj):
"""Return data from a TEXT record in an ENDF-6 file.
Parameters
----------
file_obj : file-like object
ENDF-6 file to read from
Returns
-------
str
Text within the TEXT record
"""
return file_obj.readline()[:66]
def get_cont_record(file_obj, skipC=False):
"""Return data from a CONT record in an ENDF-6 file.
Parameters
----------
file_obj : file-like object
ENDF-6 file to read from
skipC : bool
Determine whether to skip the first two quantities (C1, C2) of the CONT
record.
Returns
-------
list
The six items within the CONT record
"""
line = file_obj.readline()
if skipC:
C1 = None
C2 = None
else:
C1 = float_endf(line[:11])
C2 = float_endf(line[11:22])
L1 = int(line[22:33])
L2 = int(line[33:44])
N1 = int(line[44:55])
N2 = int(line[55:66])
return [C1, C2, L1, L2, N1, N2]
def get_head_record(file_obj):
"""Return data from a HEAD record in an ENDF-6 file.
Parameters
----------
file_obj : file-like object
ENDF-6 file to read from
Returns
-------
list
The six items within the HEAD record
"""
line = file_obj.readline()
ZA = int(float_endf(line[:11]))
AWR = float_endf(line[11:22])
L1 = int(line[22:33])
L2 = int(line[33:44])
N1 = int(line[44:55])
N2 = int(line[55:66])
return [ZA, AWR, L1, L2, N1, N2]
def get_list_record(file_obj):
"""Return data from a LIST record in an ENDF-6 file.
Parameters
----------
file_obj : file-like object
ENDF-6 file to read from
Returns
-------
list
The six items within the header
list
The values within the list
"""
# determine how many items are in list
items = get_cont_record(file_obj)
NPL = items[4]
# read items
b = []
for i in range((NPL - 1)//6 + 1):
line = file_obj.readline()
n = min(6, NPL - 6*i)
for j in range(n):
b.append(float_endf(line[11*j:11*(j + 1)]))
return (items, b)
def get_tab1_record(file_obj):
"""Return data from a TAB1 record in an ENDF-6 file.
Parameters
----------
file_obj : file-like object
ENDF-6 file to read from
Returns
-------
list
The six items within the header
openmc.data.Tabulated1D
The tabulated function
"""
# Determine how many interpolation regions and total points there are
line = file_obj.readline()
C1 = float_endf(line[:11])
C2 = float_endf(line[11:22])
L1 = int(line[22:33])
L2 = int(line[33:44])
n_regions = int(line[44:55])
n_pairs = int(line[55:66])
params = [C1, C2, L1, L2]
# Read the interpolation region data, namely NBT and INT
breakpoints = np.zeros(n_regions, dtype=int)
interpolation = np.zeros(n_regions, dtype=int)
m = 0
for i in range((n_regions - 1)//3 + 1):
line = file_obj.readline()
to_read = min(3, n_regions - m)
for j in range(to_read):
breakpoints[m] = int(line[0:11])
interpolation[m] = int(line[11:22])
line = line[22:]
m += 1
# Read tabulated pairs x(n) and y(n)
x = np.zeros(n_pairs)
y = np.zeros(n_pairs)
m = 0
for i in range((n_pairs - 1)//3 + 1):
line = file_obj.readline()
to_read = min(3, n_pairs - m)
for j in range(to_read):
x[m] = float_endf(line[:11])
y[m] = float_endf(line[11:22])
line = line[22:]
m += 1
return params, Tabulated1D(x, y, breakpoints, interpolation)
def get_tab2_record(file_obj):
# Determine how many interpolation regions and total points there are
params = get_cont_record(file_obj)
n_regions = params[4]
# Read the interpolation region data, namely NBT and INT
breakpoints = np.zeros(n_regions, dtype=int)
interpolation = np.zeros(n_regions, dtype=int)
m = 0
for i in range((n_regions - 1)//3 + 1):
line = file_obj.readline()
to_read = min(3, n_regions - m)
for j in range(to_read):
breakpoints[m] = int(line[0:11])
interpolation[m] = int(line[11:22])
line = line[22:]
m += 1
return params, Tabulated2D(breakpoints, interpolation)
def get_intg_record(file_obj):
"""
Return data from an INTG record in an ENDF-6 file. Used to store the
covariance matrix in a compact format.
Parameters
----------
file_obj : file-like object
ENDF-6 file to read from
Returns
-------
numpy.ndarray
The correlation matrix described in the INTG record
"""
# determine how many items are in list and NDIGIT
items = get_cont_record(file_obj)
ndigit = int(items[2])
npar = int(items[3]) # Number of parameters
nlines = int(items[4]) # Lines to read
NROW_RULES = {2: 18, 3: 12, 4: 11, 5: 9, 6: 8}
nrow = NROW_RULES[ndigit]
# read lines and build correlation matrix
corr = np.identity(npar)
for i in range(nlines):
line = file_obj.readline()
ii = _int_endf(line[:5]) - 1 # -1 to account for 0 indexing
jj = _int_endf(line[5:10]) - 1
factor = 10**ndigit
for j in range(nrow):
if jj+j >= ii:
break
element = _int_endf(line[11+(ndigit+1)*j:11+(ndigit+1)*(j+1)])
if element > 0:
corr[ii, jj] = (element+0.5)/factor
elif element < 0:
corr[ii, jj] = (element-0.5)/factor
# Symmetrize the correlation matrix
corr = corr + corr.T - np.diag(corr.diagonal())
return corr
def get_evaluations(filename):
"""Return a list of all evaluations within an ENDF file.
Parameters
----------
filename : str
Path to ENDF-6 formatted file
Returns
-------
list
A list of :class:`openmc.data.endf.Evaluation` instances.
"""
evaluations = []
with open(str(filename), 'r') as fh:
while True:
pos = fh.tell()
line = fh.readline()
if line[66:70] == ' -1':
break
fh.seek(pos)
evaluations.append(Evaluation(fh))
return evaluations
class Evaluation(object):
"""ENDF material evaluation with multiple files/sections
Parameters
----------
filename_or_obj : str or file-like
Path to ENDF file to read or an open file positioned at the start of an
ENDF material
Attributes
----------
info : dict
Miscellaneous information about the evaluation.
target : dict
Information about the target material, such as its mass, isomeric state,
whether it's stable, and whether it's fissionable.
projectile : dict
Information about the projectile such as its mass.
reaction_list : list of 4-tuples
List of sections in the evaluation. The entries of the tuples are the
file (MF), section (MT), number of records (NC), and modification
indicator (MOD).
"""
def __init__(self, filename_or_obj):
if isinstance(filename_or_obj, (str, PurePath)):
fh = open(str(filename_or_obj), 'r')
else:
fh = filename_or_obj
self.section = {}
self.info = {}
self.target = {}
self.projectile = {}
self.reaction_list = []
# Determine MAT number for this evaluation
MF = 0
while MF == 0:
position = fh.tell()
line = fh.readline()
MF = int(line[70:72])
self.material = int(line[66:70])
fh.seek(position)
while True:
# Find next section
while True:
position = fh.tell()
line = fh.readline()
MAT = int(line[66:70])
MF = int(line[70:72])
MT = int(line[72:75])
if MT > 0 or MAT == 0:
fh.seek(position)
break
# If end of material reached, exit loop
if MAT == 0:
fh.readline()
break
section_data = ''
while True:
line = fh.readline()
if line[72:75] == ' 0':
break
else:
section_data += line
self.section[MF, MT] = section_data
self._read_header()
def __repr__(self):
if 'zsymam' in self.target:
name = self.target['zsymam'].replace(' ', '')
else:
name = 'Unknown'
return '<{} for {} {}>'.format(self.info['sublibrary'], name,
self.info['library'])
def _read_header(self):
file_obj = io.StringIO(self.section[1, 451])
# Information about target/projectile
items = get_head_record(file_obj)
Z, A = divmod(items[0], 1000)
self.target['atomic_number'] = Z
self.target['mass_number'] = A
self.target['mass'] = items[1]
self._LRP = items[2]
self.target['fissionable'] = (items[3] == 1)
try:
library = _LIBRARY[items[4]]
except KeyError:
library = 'Unknown'
self.info['modification'] = items[5]
# Control record 1
items = get_cont_record(file_obj)
self.target['excitation_energy'] = items[0]
self.target['stable'] = (int(items[1]) == 0)
self.target['state'] = items[2]
self.target['isomeric_state'] = m = items[3]
self.info['format'] = items[5]
assert self.info['format'] == 6
# Set correct excited state for Am242_m1, which is wrong in ENDF/B-VII.1
if Z == 95 and A == 242 and m == 1:
self.target['state'] = 2
# Control record 2
items = get_cont_record(file_obj)
self.projectile['mass'] = items[0]
self.info['energy_max'] = items[1]
library_release = items[2]
self.info['sublibrary'] = _SUBLIBRARY[items[4]]
library_version = items[5]
self.info['library'] = (library, library_version, library_release)
# Control record 3
items = get_cont_record(file_obj)
self.target['temperature'] = items[0]
self.info['derived'] = (items[2] > 0)
NWD = items[4]
NXC = items[5]
# Text records
text = [get_text_record(file_obj) for i in range(NWD)]
if len(text) >= 5:
self.target['zsymam'] = text[0][0:11]
self.info['laboratory'] = text[0][11:22]
self.info['date'] = text[0][22:32]
self.info['author'] = text[0][32:66]
self.info['reference'] = text[1][1:22]
self.info['date_distribution'] = text[1][22:32]
self.info['date_release'] = text[1][33:43]
self.info['date_entry'] = text[1][55:63]
self.info['identifier'] = text[2:5]
self.info['description'] = text[5:]
# File numbers, reaction designations, and number of records
for i in range(NXC):
line = file_obj.readline()
mf = int(line[22:33])
mt = int(line[33:44])
nc = int(line[44:55])
try:
mod = int(line[55:66])
except ValueError:
# In JEFF 3.2, a few isotopes of U have MOD values that are
# missing. This prevents failure on these isotopes.
mod = 0
self.reaction_list.append((mf, mt, nc, mod))
@property
def gnd_name(self):
return gnd_name(self.target['atomic_number'],
self.target['mass_number'],
self.target['isomeric_state'])
class Tabulated2D(object):
"""Metadata for a two-dimensional function.
This is a dummy class that is not really used other than to store the
interpolation information for a two-dimensional function. Once we refactor
to adopt GND-like data containers, this will probably be removed or
extended.
Parameters
----------
breakpoints : Iterable of int
Breakpoints for interpolation regions
interpolation : Iterable of int
Interpolation scheme identification number, e.g., 3 means y is linear in
ln(x).
"""
def __init__(self, breakpoints, interpolation):
self.breakpoints = breakpoints
self.interpolation = interpolation
```
#### File: openmc/deplete/chain.py
```python
from collections import OrderedDict, defaultdict
from io import StringIO
from itertools import chain
import math
import re
import os
# Try to use lxml if it is available. It preserves the order of attributes and
# provides a pretty-printer by default. If not available, use OpenMC function to
# pretty print.
try:
import lxml.etree as ET
_have_lxml = True
except ImportError:
import xml.etree.ElementTree as ET
_have_lxml = False
import scipy.sparse as sp
import openmc.data
from openmc._xml import clean_indentation
from .nuclide import Nuclide, DecayTuple, ReactionTuple
# tuple of (reaction name, possible MT values, (dA, dZ)) where dA is the change
# in the mass number and dZ is the change in the atomic number
_REACTIONS = [
('(n,2n)', set(chain([16], range(875, 892))), (-1, 0)),
('(n,3n)', {17}, (-2, 0)),
('(n,4n)', {37}, (-3, 0)),
('(n,gamma)', {102}, (1, 0)),
('(n,p)', set(chain([103], range(600, 650))), (0, -1)),
('(n,a)', set(chain([107], range(800, 850))), (-3, -2))
]
def replace_missing(product, decay_data):
"""Replace missing product with suitable decay daughter.
Parameters
----------
product : str
Name of product in GND format, e.g. 'Y86_m1'.
decay_data : dict
Dictionary of decay data
Returns
-------
product : str
Replacement for missing product in GND format.
"""
# Determine atomic number, mass number, and metastable state
Z, A, state = openmc.data.zam(product)
symbol = openmc.data.ATOMIC_SYMBOL[Z]
# Replace neutron with proton
if Z == 0 and A == 1:
return 'H1'
# First check if ground state is available
if state:
product = '{}{}'.format(symbol, A)
# Find isotope with longest half-life
half_life = 0.0
for nuclide, data in decay_data.items():
m = re.match(r'{}(\d+)(?:_m\d+)?'.format(symbol), nuclide)
if m:
# If we find a stable nuclide, stop search
if data.nuclide['stable']:
mass_longest_lived = int(m.group(1))
break
if data.half_life.nominal_value > half_life:
mass_longest_lived = int(m.group(1))
half_life = data.half_life.nominal_value
# If mass number of longest-lived isotope is less than that of missing
# product, assume it undergoes beta-. Otherwise assume beta+.
beta_minus = (mass_longest_lived < A)
# Iterate until we find an existing nuclide
while product not in decay_data:
if Z > 98:
Z -= 2
A -= 4
else:
if beta_minus:
Z += 1
else:
Z -= 1
product = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)
return product
class Chain(object):
"""Full representation of a depletion chain.
A depletion chain can be created by using the :meth:`from_endf` method which
requires a list of ENDF incident neutron, decay, and neutron fission product
yield sublibrary files. The depletion chain used during a depletion
simulation is indicated by either an argument to
:class:`openmc.deplete.Operator` or through the
:envvar:`OPENMC_DEPLETE_CHAIN` environment variable.
Attributes
----------
nuclides : list of openmc.deplete.Nuclide
Nuclides present in the chain.
reactions : list of str
Reactions that are tracked in the depletion chain
nuclide_dict : OrderedDict of str to int
Maps a nuclide name to an index in nuclides.
"""
def __init__(self):
self.nuclides = []
self.reactions = []
self.nuclide_dict = OrderedDict()
def __contains__(self, nuclide):
return nuclide in self.nuclide_dict
def __getitem__(self, name):
"""Get a Nuclide by name."""
return self.nuclides[self.nuclide_dict[name]]
def __len__(self):
"""Number of nuclides in chain."""
return len(self.nuclides)
@classmethod
def from_endf(cls, decay_files, fpy_files, neutron_files):
"""Create a depletion chain from ENDF files.
Parameters
----------
decay_files : list of str
List of ENDF decay sub-library files
fpy_files : list of str
List of ENDF neutron-induced fission product yield sub-library files
neutron_files : list of str
List of ENDF neutron reaction sub-library files
"""
chain = cls()
# Create dictionary mapping target to filename
print('Processing neutron sub-library files...')
reactions = {}
for f in neutron_files:
evaluation = openmc.data.endf.Evaluation(f)
name = evaluation.gnd_name
reactions[name] = {}
for mf, mt, nc, mod in evaluation.reaction_list:
if mf == 3:
file_obj = StringIO(evaluation.section[3, mt])
openmc.data.endf.get_head_record(file_obj)
q_value = openmc.data.endf.get_cont_record(file_obj)[1]
reactions[name][mt] = q_value
# Determine what decay and FPY nuclides are available
print('Processing decay sub-library files...')
decay_data = {}
for f in decay_files:
data = openmc.data.Decay(f)
# Skip decay data for neutron itself
if data.nuclide['atomic_number'] == 0:
continue
decay_data[data.nuclide['name']] = data
print('Processing fission product yield sub-library files...')
fpy_data = {}
for f in fpy_files:
data = openmc.data.FissionProductYields(f)
fpy_data[data.nuclide['name']] = data
print('Creating depletion_chain...')
missing_daughter = []
missing_rx_product = []
missing_fpy = []
missing_fp = []
for idx, parent in enumerate(sorted(decay_data, key=openmc.data.zam)):
data = decay_data[parent]
nuclide = Nuclide()
nuclide.name = parent
chain.nuclides.append(nuclide)
chain.nuclide_dict[parent] = idx
if not data.nuclide['stable'] and data.half_life.nominal_value != 0.0:
nuclide.half_life = data.half_life.nominal_value
nuclide.decay_energy = sum(E.nominal_value for E in
data.average_energies.values())
sum_br = 0.0
for i, mode in enumerate(data.modes):
type_ = ','.join(mode.modes)
if mode.daughter in decay_data:
target = mode.daughter
else:
print('missing {} {} {}'.format(parent, ','.join(mode.modes), mode.daughter))
target = replace_missing(mode.daughter, decay_data)
# Write branching ratio, taking care to ensure sum is unity
br = mode.branching_ratio.nominal_value
sum_br += br
if i == len(data.modes) - 1 and sum_br != 1.0:
br = 1.0 - sum(m.branching_ratio.nominal_value
for m in data.modes[:-1])
# Append decay mode
nuclide.decay_modes.append(DecayTuple(type_, target, br))
if parent in reactions:
reactions_available = set(reactions[parent].keys())
for name, mts, changes in _REACTIONS:
if mts & reactions_available:
delta_A, delta_Z = changes
A = data.nuclide['mass_number'] + delta_A
Z = data.nuclide['atomic_number'] + delta_Z
daughter = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)
if name not in chain.reactions:
chain.reactions.append(name)
if daughter not in decay_data:
missing_rx_product.append((parent, name, daughter))
# Store Q value
for mt in sorted(mts):
if mt in reactions[parent]:
q_value = reactions[parent][mt]
break
else:
q_value = 0.0
nuclide.reactions.append(ReactionTuple(
name, daughter, q_value, 1.0))
if any(mt in reactions_available for mt in [18, 19, 20, 21, 38]):
if parent in fpy_data:
q_value = reactions[parent][18]
nuclide.reactions.append(
ReactionTuple('fission', 0, q_value, 1.0))
if 'fission' not in chain.reactions:
chain.reactions.append('fission')
else:
missing_fpy.append(parent)
if parent in fpy_data:
fpy = fpy_data[parent]
if fpy.energies is not None:
nuclide.yield_energies = fpy.energies
else:
nuclide.yield_energies = [0.0]
for E, table in zip(nuclide.yield_energies, fpy.independent):
yield_replace = 0.0
yields = defaultdict(float)
for product, y in table.items():
# Handle fission products that have no decay data available
if product not in decay_data:
daughter = replace_missing(product, decay_data)
product = daughter
yield_replace += y.nominal_value
yields[product] += y.nominal_value
if yield_replace > 0.0:
missing_fp.append((parent, E, yield_replace))
nuclide.yield_data[E] = []
for k in sorted(yields, key=openmc.data.zam):
nuclide.yield_data[E].append((k, yields[k]))
# Display warnings
if missing_daughter:
print('The following decay modes have daughters with no decay data:')
for mode in missing_daughter:
print(' {}'.format(mode))
print('')
if missing_rx_product:
print('The following reaction products have no decay data:')
for vals in missing_rx_product:
print('{} {} -> {}'.format(*vals))
print('')
if missing_fpy:
print('The following fissionable nuclides have no fission product yields:')
for parent in missing_fpy:
print(' ' + parent)
print('')
if missing_fp:
print('The following nuclides have fission products with no decay data:')
for vals in missing_fp:
print(' {}, E={} eV (total yield={})'.format(*vals))
return chain
@classmethod
def from_xml(cls, filename):
"""Reads a depletion chain XML file.
Parameters
----------
filename : str
The path to the depletion chain XML file.
"""
chain = cls()
# Load XML tree
root = ET.parse(str(filename))
for i, nuclide_elem in enumerate(root.findall('nuclide')):
nuc = Nuclide.from_xml(nuclide_elem)
chain.nuclide_dict[nuc.name] = i
# Check for reaction paths
for rx in nuc.reactions:
if rx.type not in chain.reactions:
chain.reactions.append(rx.type)
chain.nuclides.append(nuc)
return chain
def export_to_xml(self, filename):
"""Writes a depletion chain XML file.
Parameters
----------
filename : str
The path to the depletion chain XML file.
"""
root_elem = ET.Element('depletion_chain')
for nuclide in self.nuclides:
root_elem.append(nuclide.to_xml_element())
tree = ET.ElementTree(root_elem)
if _have_lxml:
tree.write(str(filename), encoding='utf-8', pretty_print=True)
else:
clean_indentation(root_elem)
tree.write(str(filename), encoding='utf-8')
def form_matrix(self, rates):
"""Forms depletion matrix.
Parameters
----------
rates : numpy.ndarray
2D array indexed by (nuclide, reaction)
Returns
-------
scipy.sparse.csr_matrix
Sparse matrix representing depletion.
"""
matrix = defaultdict(float)
reactions = set()
for i, nuc in enumerate(self.nuclides):
if nuc.n_decay_modes != 0:
# Decay paths
# Loss
decay_constant = math.log(2) / nuc.half_life
if decay_constant != 0.0:
matrix[i, i] -= decay_constant
# Gain
for _, target, branching_ratio in nuc.decay_modes:
# Allow for total annihilation for debug purposes
if target != 'Nothing':
branch_val = branching_ratio * decay_constant
if branch_val != 0.0:
k = self.nuclide_dict[target]
matrix[k, i] += branch_val
if nuc.name in rates.index_nuc:
# Extract all reactions for this nuclide in this cell
nuc_ind = rates.index_nuc[nuc.name]
nuc_rates = rates[nuc_ind, :]
for r_type, target, _, br in nuc.reactions:
# Extract reaction index, and then final reaction rate
r_id = rates.index_rx[r_type]
path_rate = nuc_rates[r_id]
# Loss term -- make sure we only count loss once for
# reactions with branching ratios
if r_type not in reactions:
reactions.add(r_type)
if path_rate != 0.0:
matrix[i, i] -= path_rate
# Gain term; allow for total annihilation for debug purposes
if target != 'Nothing':
if r_type != 'fission':
if path_rate != 0.0:
k = self.nuclide_dict[target]
matrix[k, i] += path_rate * br
else:
# Assume that we should always use thermal fission
# yields. At some point it would be nice to account
# for the energy-dependence..
energy, data = sorted(nuc.yield_data.items())[0]
for product, y in data:
yield_val = y * path_rate
if yield_val != 0.0:
k = self.nuclide_dict[product]
matrix[k, i] += yield_val
# Clear set of reactions
reactions.clear()
# Use DOK matrix as intermediate representation, then convert to CSR and return
n = len(self)
matrix_dok = sp.dok_matrix((n, n))
dict.update(matrix_dok, matrix)
return matrix_dok.tocsr()
```
#### File: openmc/openmc/particle_restart.py
```python
import h5py
import openmc.checkvalue as cv
_VERSION_PARTICLE_RESTART = 2
class Particle(object):
"""Information used to restart a specific particle that caused a simulation to
fail.
Parameters
----------
filename : str
Path to the particle restart file
Attributes
----------
current_batch : int
The batch containing the particle
generations_per_batch : int
Number of generations per batch
current_generation : int
The generation containing the particle
n_particles : int
Number of particles per generation
run_mode : int
Type of simulation (criticality or fixed source)
id : long
Identifier of the particle
type : int
Particle type (1 = neutron, 2 = photon, 3 = electron, 4 = positron)
weight : float
Weight of the particle
energy : float
Energy of the particle in eV
xyz : list of float
Position of the particle
uvw : list of float
Directional cosines of the particle
"""
def __init__(self, filename):
self._f = h5py.File(filename, 'r')
# Ensure filetype and version are correct
cv.check_filetype_version(self._f, 'particle restart',
_VERSION_PARTICLE_RESTART)
@property
def current_batch(self):
return self._f['current_batch'].value
@property
def current_generation(self):
return self._f['current_generation'].value
@property
def energy(self):
return self._f['energy'].value
@property
def generations_per_batch(self):
return self._f['generations_per_batch'].value
@property
def id(self):
return self._f['id'].value
@property
def type(self):
return self._f['type'].value
@property
def n_particles(self):
return self._f['n_particles'].value
@property
def run_mode(self):
return self._f['run_mode'].value.decode()
@property
def uvw(self):
return self._f['uvw'].value
@property
def weight(self):
return self._f['weight'].value
@property
def xyz(self):
return self._f['xyz'].value
```
#### File: regression_tests/salphabeta/test.py
```python
import openmc
import openmc.model
from tests.testing_harness import PyAPITestHarness
def make_model():
model = openmc.model.Model()
# Materials
m1 = openmc.Material()
m1.set_density('g/cc', 4.5)
m1.add_nuclide('U235', 1.0)
m1.add_nuclide('H1', 1.0)
m1.add_s_alpha_beta('c_H_in_H2O', fraction=0.5)
m2 = openmc.Material()
m2.set_density('g/cc', 4.5)
m2.add_nuclide('U235', 1.0)
m2.add_nuclide('C0', 1.0)
m2.add_s_alpha_beta('c_Graphite')
m3 = openmc.Material()
m3.set_density('g/cc', 4.5)
m3.add_nuclide('U235', 1.0)
m3.add_nuclide('Be9', 1.0)
m3.add_nuclide('O16', 1.0)
m3.add_s_alpha_beta('c_Be_in_BeO')
m3.add_s_alpha_beta('c_O_in_BeO')
m4 = openmc.Material()
m4.set_density('g/cm3', 5.90168)
m4.add_nuclide('H1', 0.3)
m4.add_nuclide('Zr90', 0.15)
m4.add_nuclide('Zr91', 0.1)
m4.add_nuclide('Zr92', 0.1)
m4.add_nuclide('Zr94', 0.05)
m4.add_nuclide('Zr96', 0.05)
m4.add_nuclide('U235', 0.1)
m4.add_nuclide('U238', 0.15)
m4.add_s_alpha_beta('c_Zr_in_ZrH')
m4.add_s_alpha_beta('c_H_in_ZrH')
model.materials += [m1, m2, m3, m4]
# Geometry
x0 = openmc.XPlane(x0=-10, boundary_type='vacuum')
x1 = openmc.XPlane(x0=-5)
x2 = openmc.XPlane(x0=0)
x3 = openmc.XPlane(x0=5)
x4 = openmc.XPlane(x0=10, boundary_type='vacuum')
root_univ = openmc.Universe()
surfs = (x0, x1, x2, x3, x4)
mats = (m1, m2, m3, m4)
cells = []
for i in range(4):
cell = openmc.Cell()
cell.region = +surfs[i] & -surfs[i+1]
cell.fill = mats[i]
root_univ.add_cell(cell)
model.geometry.root_universe = root_univ
# Settings
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 1000
model.settings.source = openmc.Source(space=openmc.stats.Box(
[-4, -4, -4], [4, 4, 4]))
return model
def test_salphabeta():
model = make_model()
harness = PyAPITestHarness('statepoint.5.h5', model)
harness.main()
```
#### File: regression_tests/triso/test.py
```python
import random
from math import sqrt
import numpy as np
import openmc
import openmc.model
from tests.testing_harness import PyAPITestHarness
class TRISOTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Define TRISO matrials
fuel = openmc.Material()
fuel.set_density('g/cm3', 10.5)
fuel.add_nuclide('U235', 0.14154)
fuel.add_nuclide('U238', 0.85846)
fuel.add_nuclide('C0', 0.5)
fuel.add_nuclide('O16', 1.5)
porous_carbon = openmc.Material()
porous_carbon.set_density('g/cm3', 1.0)
porous_carbon.add_nuclide('C0', 1.0)
porous_carbon.add_s_alpha_beta('c_Graphite')
ipyc = openmc.Material()
ipyc.set_density('g/cm3', 1.90)
ipyc.add_nuclide('C0', 1.0)
ipyc.add_s_alpha_beta('c_Graphite')
sic = openmc.Material()
sic.set_density('g/cm3', 3.20)
sic.add_nuclide('C0', 1.0)
sic.add_element('Si', 1.0)
opyc = openmc.Material()
opyc.set_density('g/cm3', 1.87)
opyc.add_nuclide('C0', 1.0)
opyc.add_s_alpha_beta('c_Graphite')
graphite = openmc.Material()
graphite.set_density('g/cm3', 1.1995)
graphite.add_nuclide('C0', 1.0)
graphite.add_s_alpha_beta('c_Graphite')
# Create TRISO particles
spheres = [openmc.Sphere(R=r*1e-4)
for r in [212.5, 312.5, 347.5, 382.5]]
c1 = openmc.Cell(fill=fuel, region=-spheres[0])
c2 = openmc.Cell(fill=porous_carbon, region=+spheres[0] & -spheres[1])
c3 = openmc.Cell(fill=ipyc, region=+spheres[1] & -spheres[2])
c4 = openmc.Cell(fill=sic, region=+spheres[2] & -spheres[3])
c5 = openmc.Cell(fill=opyc, region=+spheres[3])
inner_univ = openmc.Universe(cells=[c1, c2, c3, c4, c5])
# Define box to contain lattice and to pack TRISO particles in
min_x = openmc.XPlane(x0=-0.5, boundary_type='reflective')
max_x = openmc.XPlane(x0=0.5, boundary_type='reflective')
min_y = openmc.YPlane(y0=-0.5, boundary_type='reflective')
max_y = openmc.YPlane(y0=0.5, boundary_type='reflective')
min_z = openmc.ZPlane(z0=-0.5, boundary_type='reflective')
max_z = openmc.ZPlane(z0=0.5, boundary_type='reflective')
box_region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
box = openmc.Cell(region=box_region)
outer_radius = 422.5*1e-4
centers = openmc.model.pack_spheres(radius=outer_radius,
region=box_region, num_spheres=100)
trisos = [openmc.model.TRISO(outer_radius, inner_univ, c)
for c in centers]
# Create lattice
ll, ur = box.region.bounding_box
shape = (3, 3, 3)
pitch = (ur - ll) / shape
lattice = openmc.model.create_triso_lattice(
trisos, ll, pitch, shape, graphite)
box.fill = lattice
root = openmc.Universe(0, cells=[box])
geom = openmc.Geometry(root)
geom.export_to_xml()
settings = openmc.Settings()
settings.batches = 5
settings.inactive = 0
settings.particles = 100
settings.source = openmc.Source(space=openmc.stats.Point())
settings.export_to_xml()
mats = openmc.Materials([fuel, porous_carbon, ipyc, sic, opyc, graphite])
mats.export_to_xml()
def test_triso():
harness = TRISOTestHarness('statepoint.5.h5')
harness.main()
```
#### File: tests/unit_tests/__init__.py
```python
import numpy as np
import pytest
def assert_unbounded(obj):
"""Assert that a region/cell is unbounded."""
ll, ur = obj.bounding_box
assert ll == pytest.approx((-np.inf, -np.inf, -np.inf))
assert ur == pytest.approx((np.inf, np.inf, np.inf))
```
#### File: tools/ci/travis-install.py
```python
import os
import shutil
import subprocess
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def install(omp=False, mpi=False, phdf5=False, dagmc=False):
# Create build directory and change to it
shutil.rmtree('build', ignore_errors=True)
os.mkdir('build')
os.chdir('build')
# Build in debug mode by default
cmake_cmd = ['cmake', '-Ddebug=on']
# Turn off OpenMP if specified
if not omp:
cmake_cmd.append('-Dopenmp=off')
# Use MPI wrappers when building in parallel
if mpi:
os.environ['FC'] = 'mpifort' if which('mpifort') else 'mpif90'
os.environ['CC'] = 'mpicc'
os.environ['CXX'] = 'mpicxx'
# Tell CMake to prefer parallel HDF5 if specified
if phdf5:
if not mpi:
raise ValueError('Parallel HDF5 must be used in '
'conjunction with MPI.')
cmake_cmd.append('-DHDF5_PREFER_PARALLEL=ON')
else:
cmake_cmd.append('-DHDF5_PREFER_PARALLEL=OFF')
if dagmc:
cmake_cmd.append('-Ddagmc=ON')
# Build and install
cmake_cmd.append('..')
print(' '.join(cmake_cmd))
subprocess.check_call(cmake_cmd)
subprocess.check_call(['make', '-j4'])
subprocess.check_call(['sudo', 'make', 'install'])
def main():
# Convert Travis matrix environment variables into arguments for install()
omp = (os.environ.get('OMP') == 'y')
mpi = (os.environ.get('MPI') == 'y')
phdf5 = (os.environ.get('PHDF5') == 'y')
dagmc = (os.environ.get('DAGMC') == 'y')
# Build and install
install(omp, mpi, phdf5, dagmc)
if __name__ == '__main__':
main()
``` |
{
"source": "johnny-longneck/petchat",
"score": 3
} |
#### File: petchat/app/app.py
```python
from importlib import import_module
from flask import Flask, render_template, Response, request, send_file, jsonify
import os
import cv2
# import camera driver. Otherwise use pi camera by default
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
#else:
# from camera_pi import Camera
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
def gen_frames():
"""Video streaming generator function."""
camera = cv2.VideoCapture(0)
while True:
success, frame = camera.read() # read the camera frame
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
``` |
{
"source": "johnnylord/gRPC-with-protobuf",
"score": 3
} |
#### File: johnnylord/gRPC-with-protobuf/client.py
```python
import os
import os.path as osp
import sys
BUILD_DIR = osp.join(osp.dirname(osp.abspath(__file__)), "build/service/")
sys.path.insert(0, BUILD_DIR)
import argparse
import grpc
import fib_pb2
import fib_pb2_grpc
def main(args):
host = f"{args['ip']}:{args['port']}"
print(host)
with grpc.insecure_channel(host) as channel:
stub = fib_pb2_grpc.FibCalculatorStub(channel)
request = fib_pb2.FibRequest()
request.order = args['order']
response = stub.Compute(request)
print(response.value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8080)
parser.add_argument("--order", type=int, default=10)
args = vars(parser.parse_args())
main(args)
``` |
{
"source": "johnnylord/mtmc-testbed",
"score": 2
} |
#### File: app/bodyposeapp/__init__.py
```python
import os
import os.path as osp
import logging
import cv2
import numpy as np
from ..base import App
from ..base import Keyboard as kb
from ..gui.container import check_ready
from ..gui.media import MediaType
from ..utils.transform import convert_bbox_coordinate
from ..utils.visualize import draw_bbox, draw_bodypose
logger = logging.getLogger(__name__)
__all__ = [ "BodyPoseApp" ]
class BodyPoseApp(App):
MATCHED_WORKER = "BodyPoseWorker"
def __init__(self, **kwargs):
raise RuntimeError("Cannot directly instantiate object from BodyPoseApp")
def boot(self):
"""Prepare runtime environment for worker"""
self.video_results = {}
self.event_handler = { 'detect': self._detect_handler }
def export(self, output_dir):
"""Export tracking result to output directory"""
# Check output directory exists
output_dir = osp.join(output_dir, self.__class__.__name__)
if not osp.exists(output_dir):
os.makedirs(output_dir)
# Export video result panel-by-panel
for panel, result in self.video_results.items():
fname = "{}.txt".format(osp.basename(panel.src))
fname = osp.join(output_dir, fname)
with open(fname, "w") as f:
fids = sorted(result.keys())
for fid in fids:
people = result[fid]
for person in people:
bbox = person['bbox'] # (xmin, ymin, xmax, ymax)
# keypoints (18, 3)
# - 18 types of keypoint
# - 3 means (x, y, score)
# The order of keypoint is conformed to openpose project
keypoints = person['keypoints']
keypoints = keypoints.reshape(-1)
line = f"{fid},0"
bline = ",".join([ str(bbox[i]) for i in range(4) ])
kline = ",".join([ str(v) for v in keypoints ])
line = ",".join([line, bline, kline]) + '\n'
f.write(line)
logger.info(f"Export result to '{output_dir}'")
@check_ready
def run(self):
"""App loop for running app"""
while not self.is_stop():
content = self.render()
fid, frame = content['fid'], content['container_frame']
if not self.is_pause():
# Send request
request = { 'action': 'detect' }
self.send(request)
# Send raw frames to workers
video_frames = []
for panel in self.panel_to_channel.keys():
media_frame = panel.media_cache
media_frame = cv2.resize(media_frame, self.trans_resolution)
frame_bytes = cv2.imencode('.jpg', media_frame)[1]
video_frames.append({ 'panel': panel, 'frame_bytes': frame_bytes })
self.parallel_send_videos(video_frames)
# Catch response from remote worker
response = self.recv()
if response is None:
break
# Handle server response
handler = self.event_handler[response['action']]
new_content = handler(response)
fid, frame = new_content['fid'], new_content['container_frame']
last_frame = frame
# Show applications
cv2.imshow(self.winname, frame)
cv2.setTrackbarPos(self.barname, self.winname, fid)
# Handling keyboard events
key = cv2.waitKey(1) & 0xff
self.keyboaord_handler(key)
cv2.destroyAllWindows()
def keyboaord_handler(self, key):
# When certain panel is in focused
# ====================================
if self.mode == App.OPERATION_MODE:
if key == kb.ESC:
self.focus_panel.focus = False
self.focus_panel = None
self.mode = App.SELECT_MODE
return
# Common key handler
# =====================================
super().keyboaord_handler(key)
def mouse_callback(self, event, x, y, flags, param):
# Wait for selecting panel to focus on
# ==================================================
if self.mode == App.SELECT_MODE:
super().mouse_callback(event, x, y, flags, param)
elif self.mode == App.OPERATION_MODE:
pass
def trackbar_callback(self, value):
super().trackbar_callback(value)
def _detect_handler(self, response):
# Rerender panels (add bboxes)
panel_contents = []
for panel in response['content']:
pid = panel['pid']
people = panel['people']
bboxes = np.array([ person['bbox'] for person in people ])
keypointss = np.array([ person['keypoints'] for person in people ])
# Select target panel to manipulate
target_panel = [ panel
for panel in self.panels
if panel.pid == pid ][0]
# Convert coordinate system
target_media_frame = target_panel.media_cache
new_resolution = np.array(target_media_frame.shape[:2][::-1])
old_resolution = np.array(self.trans_resolution)
bboxes = convert_bbox_coordinate(bboxes, old_resolution, new_resolution)
# Convert keypoint system
keypointss[:, :, :2] = keypointss[:, :, :2] * (new_resolution / old_resolution)
# Save result in mot tracking format
for bbox, keypoints in zip(bboxes, keypointss):
# Check data structure format
if target_panel not in self.video_results:
self.video_results[target_panel] = {}
if target_panel.fid not in self.video_results[target_panel]:
self.video_results[target_panel][target_panel.fid] = []
record = { 'bbox': bbox, 'keypoints': keypoints }
self.video_results[target_panel][target_panel.fid].append(record)
# Draw bboxes on target panel
for bbox in bboxes:
draw_bbox(target_media_frame, bbox, thickness=self.line_thickness)
for keypoints in keypointss:
draw_bodypose(target_media_frame, keypoints, thickness=self.line_thickness)
# Rerender
target_panel_content = target_panel.rerender(target_media_frame)
panel_contents.append(target_panel_content)
# Align/Sort rerendered panel_contents
panel_contents = [ [ panel_content
for panel_content in panel_contents
if panel_content['pid'] == panel.pid ][0]
for panel in self.panels ]
# Rerender container
content = self.rerender(panel_contents)
return content
```
#### File: app/sotapp/__init__.py
```python
import os
import os.path as osp
import logging
import cv2
import numpy as np
from easydict import EasyDict
from ..base import App
from ..base import Keyboard as kb
from ..gui.container import check_ready
from ..gui.media import MediaType
from ..utils.transform import convert_bbox_coordinate
from ..utils.visualize import draw_bbox, draw_text, draw_gaussian, get_unique_color
logger = logging.getLogger(__name__)
__all__ = [ "SOTApp" ]
class SOTApp(App):
MATCHED_WORKER = "SOTWorker"
def __init__(self, **kwargs):
raise RuntimeError("Cannot directly instantiate object from SOTApp")
def boot(self):
"""Prepare runtime environment for worker"""
self.video_results = {}
self.event_handler = {
'nop': self._nop_handler,
'reset': self._reset_handler,
'track': self._track_handler,
}
self.state = EasyDict({
'debug': True,
'reset': False,
'tracked': False,
'results': {},
# Opencv app state
'app': {
'click': False,
'clicked': False,
'tlbr': [],
},
# Synchronized with remote tracker on server
'remote': {
"fid": -1,
"pid": None,
"tlbr": [],
},
})
def export(self, output_dir):
"""Export tracking result to output directory"""
# Check output directory exists
output_dir = osp.join(output_dir, self.__class__.__name__)
if not osp.exists(output_dir):
os.makedirs(output_dir)
# Export video result panel-by-panel
for panel, result in self.video_results.items():
fname = "{}.txt".format(osp.basename(panel.src))
fname = osp.join(output_dir, fname)
with open(fname, "w") as f:
fids = sorted(result.keys())
for fid in fids:
tracks = result[fid]
for t in tracks:
f.write(f"{fid},{t[0]},{t[1]},{t[2]},{t[3]},{t[4]}\n")
logger.info(f"Export result to '{output_dir}'")
@check_ready
def run(self):
"""App loop for running app"""
while not self.is_stop():
# Render new frame
content = self.render()
fid, frame = content['fid'], content['container_frame']
action = self._determine_action()
# Target object is being tracked
if action == 'track':
old_resolution = frame.shape[:2][::-1]
new_resolution = self.trans_resolution
# Prepare current tracked object position to remote server
self.state.remote['fid'] = fid
if (
self.state.tracked
and len(self.state.app.tlbr) == 4
and self.mode == App.OPERATION_MODE
):
tlbr = self.state.app.tlbr
self.state.remote.tlbr = convert_bbox_coordinate([tlbr],
old_resolution,
new_resolution)[0]
self.state.remote.pid = self.focus_panel.pid
self.state.app.tlbr = []
else:
self.state.remote.pid = None
self.state.remote.tlbr = []
# Send request
request = { 'action': action, 'remote': self.state.remote }
self.send(request)
# Send raw frames to workers
video_frames = []
for panel in self.panel_to_channel.keys():
media_frame = panel.media_cache
media_frame = cv2.resize(media_frame, self.trans_resolution)
frame_bytes = cv2.imencode('.jpg', media_frame)[1]
video_frames.append({ 'panel': panel, 'frame_bytes': frame_bytes })
self.parallel_send_videos(video_frames)
# No object is being tracked
else:
# Send request
request = { 'action': action }
self.send(request)
# Catch response from remote worker
response = self.recv()
if response is None:
break
# Handle server response
handler = self.event_handler[response['action']]
new_content = handler(response)
if response['action'] == 'track':
fid, frame = new_content['fid'], new_content['container_frame']
# Draw the selected bbox
if (
self.mode == App.OPERATION_MODE
and self.state.app.clicked
and len(self.state.app.tlbr) == 4
):
frame = self.container_cache.copy()
draw_bbox(frame, self.state.app.tlbr)
# Show applications
cv2.imshow(self.winname, frame)
cv2.setTrackbarPos(self.barname, self.winname, fid)
# Handling keyboard events
key = cv2.waitKey(1) & 0xff
self.keyboaord_handler(key)
cv2.destroyAllWindows()
def keyboaord_handler(self, key):
# When certain panel is in focused
# ====================================
if self.mode == App.OPERATION_MODE:
if key == kb.ESC:
self.focus_panel.focus = False
self.focus_panel = None
self.mode = App.SELECT_MODE
return
if key == ord('r') or key == ord('R'):
self.state.reset = True
# Common key handler
# =====================================
super().keyboaord_handler(key)
def mouse_callback(self, event, x, y, flags, param):
# Wait for selecting panel to focus on
# ==================================================
if self.mode == App.SELECT_MODE:
super().mouse_callback(event, x, y, flags, param)
elif self.mode == App.OPERATION_MODE:
# Save the top left coordinate (x, y) of the tracking bounding box
if event == cv2.EVENT_LBUTTONDOWN:
self.state.app.click = True
self.state.app.clicked = True
self.state.tracked = False
self.state.app.tlbr = [x, y]
if self.is_start():
self.pause()
# Temporarily save the bottom right coordinate (x, y) of the tracking box
elif event == cv2.EVENT_MOUSEMOVE and self.state.app.clicked:
self.state.app.click = False
if len(self.state.app.tlbr) == 4:
self.state.app.tlbr[2] = x
self.state.app.tlbr[3] = y
elif len(self.state.app.tlbr) == 2:
self.state.app.tlbr += [x, y]
# Save the final bottom right coordinate (x, y) of the tracking box
elif event == cv2.EVENT_LBUTTONUP and self.state.app.clicked:
self.state.tracked = True
self.state.app.clicked = False
# Prevent rectangle with zero area
if len(self.state.app.tlbr) == 2:
self.state.app.tlbr += [x+10, y+10]
elif len(self.state.app.tlbr) == 4:
self.state.app.tlbr[2] = x
self.state.app.tlbr[3] = y
if self.is_pause():
self.start()
def trackbar_callback(self, value):
super().trackbar_callback(value)
def _determine_action(self):
"""Given current app state determine the action and sent arguments
There are three poosible actions for single object tracking application.
- 'nop': send dummy package to the server
- 'reset': send reset signal to the server
- 'track': send tracking signal and position of tracked object
Returns:
action(str)
"""
if self.state.app.clicked and not self.state.tracked:
return 'reset'
return 'track'
def _nop_handler(self, response):
self.state.reset = False
def _reset_handler(self, response):
if self.is_start():
self.pause()
self.state.reset = False
self.state.app.click = False
logger.info("Reset")
def _track_handler(self, response):
# Rerender panels (add tracks)
panel_contents = []
for panel in response['content']:
# Extract information of tracked object
pid = panel['pid']
tids = [ track['tid']
for track in panel['tracks']
if track['state'] == "tracked" ]
bboxes = [ track['bbox']
for track in panel['tracks']
if track['state'] == "tracked" ]
covars = [ track['covar']
for track in panel['tracks']
if track['state'] == "tracked" ]
assert len(tids) <= 1
assert len(bboxes) <= 1
assert len(covars) <= 1
# Select target panel to manipulate
target_panel = [ panel
for panel in self.panels
if panel.pid == pid ][0]
target_media_frame = target_panel.media_cache
# Nothing is being tracked
if len(bboxes) == 0:
target_panel_content = target_panel.rerender(target_media_frame)
panel_contents.append(target_panel_content)
if target_panel not in self.video_results:
self.video_results[target_panel] = {}
if target_panel.fid not in self.video_results[target_panel]:
self.video_results[target_panel][target_panel.fid] = []
continue
# Convert coordinate system
old_resolution = self.trans_resolution
new_resolution = target_media_frame.shape[:2][::-1]
bboxes = convert_bbox_coordinate(bboxes, old_resolution, new_resolution)
means = np.array([ ((b[0]+b[2])//2, (b[1]+b[3])//2) for b in bboxes ])
scale_vec = np.array(new_resolution) / np.array(old_resolution)
covars = np.array(covars)*scale_vec
# Draw tracks on target panel
for tid, bbox, mean, covar in zip(tids, bboxes, means, covars):
bbox_color = get_unique_color(tid)
draw_bbox(target_media_frame, bbox=bbox, color=(bbox_color), thickness=3)
draw_text(target_media_frame, text=str(tid), position=bbox[:2],
fontScale=3, fgcolor=(255, 255, 255), bgcolor=bbox_color)
# Rerender
target_panel_content = target_panel.rerender(target_media_frame)
panel_contents.append(target_panel_content)
# Align/Sort rerendered panel_contents
panel_contents = [ [ panel_content
for panel_content in panel_contents
if panel_content['pid'] == panel.pid ][0]
for panel in self.panels ]
# Rerender container
content = self.rerender(panel_contents)
return content
```
#### File: johnnylord/mtmc-testbed/server.py
```python
import signal
import socket
import random
import logging
import argparse
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from multiprocessing import Event
import GPUtil
import torch
from network import NetworkAgent
from worker import LazyWorker
class LogFilter(object):
def __init__(self, level):
self._level = level
def filter(self, logRecord):
return logRecord.levelno == self._level
# Logging system
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Stream Handler
s_handler = logging.StreamHandler()
s_handler.setLevel(logging.INFO)
s_format = logging.Formatter('%(asctime)s, %(levelname)s, PID[%(process)d] %(name)s, %(message)s')
s_handler.setFormatter(s_format)
# Error file handler
f1_handler = logging.FileHandler("log-server-error.txt")
f1_handler.setLevel(logging.ERROR)
f1_handler.addFilter(LogFilter(logging.ERROR))
f1_format = logging.Formatter('%(asctime)s, PID[%(process)d], %(name)s, %(message)s')
f1_handler.setFormatter(f1_format)
# Info file handler
f2_handler = logging.FileHandler("log-server-info.txt")
f2_handler.setLevel(logging.INFO)
f2_handler.addFilter(LogFilter(logging.INFO))
f2_format = logging.Formatter('%(asctime)s, PID[%(process)d], %(name)s, %(message)s')
f2_handler.setFormatter(f2_format)
# Register handler on root logger
logger.addHandler(s_handler)
logger.addHandler(f1_handler)
logger.addHandler(f2_handler)
# Commandline parser
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="0.0.0.0", type=str, help="server ip")
parser.add_argument("--port", default=6666, type=int, help="server port")
# Ctrl+C handler
def signal_handler(signum, frame):
raise Exception("Ctrl+C is triggered")
signal.signal(signal.SIGINT, signal_handler)
def main(args):
# Create server socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Bind server socket
server_socket.bind((args['ip'], args['port']))
server_socket.listen(10)
logger.info("Server launch at {}:{}".format(args['ip'], args['port']))
# Handling incoming connection request
workers = []
try:
while True:
conn, addr = server_socket.accept()
logger.info("Connection from {}:{}".format(addr[0], addr[1]))
if torch.cuda.is_available():
n_devices = torch.cuda.device_count()
deviceIDs = GPUtil.getAvailable(order='memory', limit=n_devices)
random.shuffle(deviceIDs)
device = "cuda:{}".format(deviceIDs[0])
else:
device = "cpu"
# Create new worker process for handling new client
shutdown_event = Event()
worker = LazyWorker(conn=conn,
addr=addr,
device=device,
shutdown_event=shutdown_event)
worker.start()
workers.append((worker, shutdown_event))
# Parent process release socket
conn.close()
except Exception as e:
# Shutdown child process gracefully
for worker, event in workers:
event.set()
# Shutdown server socket
server_socket.close()
logger.info("Shutdown server", exc_info=True)
if __name__ == "__main__":
torch.multiprocessing.set_start_method("spawn")
args = vars(parser.parse_args())
main(args)
```
#### File: worker/echoworker/__init__.py
```python
import logging
from ..utils.time import timeit
from ..base import Worker
logger = logging.getLogger(__name__)
class EchoWorker(Worker):
"""Echo what remote client sent
This worker is just a simple example showing how to communicate with remote
client. You can based on this worker to develop your own worker.
"""
DEFAULT_CONFIG = {}
def __init__(self):
raise RuntimeError("You cannot directly instantiate EchoWorker")
def boot(self, config):
pass
def run(self):
"""Worker job"""
try:
while not self.shutdown_event.is_set():
request = self.recv()
videos = self.parallel_recv_videos()
# Remote client socket is closed
if request is None:
break
# Send back message
self.send(request)
except Exception as e:
logger.warning(f"Error occur in echoworker", exc_info=True)
# Cleanup process
self.close()
def close(self):
pass
```
#### File: mtmcworker/cluster/__init__.py
```python
import logging
import numpy as np
from scipy.optimize import linear_sum_assignment
from ...utils.time import timeit
from .centroid import TargetCentroid
logger = logging.getLogger(__name__)
class AdaptiveKmeans:
"""Adaptive Kmeans clustering algorithm to cluster tracked targets"""
NEXT_TID = 0
def __init__(self):
self.centroids = {}
def __str__(self):
lines = [ "[TID:{}:{}]".format(tid, tc) for tid, tc in self.centroids.items() ]
content = ", ".join(lines)
return content
def __repr__(self):
return str(self)
def miss(self):
"""Perform miss action on all tracked centroids"""
# Miss for all centroids
_ = [ tc.miss() for tid, tc in self.centroids.values() ]
# Delete dead centroids
tids = list(self.centroids.keys())
for tid in tids:
if self.centroids[tid].is_dead():
del self.centroids[tid]
def predict(self, points):
"""Predict the track ID for each data point
Arguments:
points (ndarray): 2D ndarray representing embeddings from one video
Returns:
a list of track ids representing the label of each points
NOTE:
`points` should only contain the embeddings from one video. As the
labels(track IDs) are not determined by the minimum distance between
points and clusters. They are determined by the result of linear
assignment algorithm. Each unique label (track ID) will only
associate with one point.
The number of centroids should always larger than the number of
points.
"""
# Common setup
centroids = np.array([ tc.embedding for tc in self.centroids.values() ])
label2tid = dict([ (label, tid)
for label, tid in enumerate(self.centroids.keys()) ])
# Predict tids for points
distances = self._pdist(points, centroids)
pindices, cindices = linear_sum_assignment(distances)
tids = np.array([ label2tid[cidx] for cidx in cindices ])
return tids
@timeit(logger)
def fit(self, group_points, n_clusters):
"""Perform adaptive kmeans clustering
Arguments:
group_points (list): list of ndarrays, where each element in the
list representing the embeddings of targets in specific frame.
n_clusters (int): the ideal number of clusters in current state
"""
# Flatten group_points
points = np.concatenate(group_points)
# Initialize clusters
if len(self.centroids) == 0:
self._init_centroids(points, n_clusters)
return
# Common setup
centroids = np.array([ tc.embedding for tc in self.centroids.values() ])
label2tid = dict([ (label, tid)
for label, tid in enumerate(self.centroids.keys()) ])
# Dynamic add new clusters
if len(self.centroids) < n_clusters:
# Extract anomaly points group by group to form two sets:
# - normal points
# - anomaly points
normal_group_points, anomaly_points = [], []
for gpoints in group_points:
# Find labels for each point
distances = self._pdist(gpoints, centroids)
sorted_labels = np.argsort(distances)
# As point to centroid is a one-to-one mapping in each group,
# filter out points that get assigned to the centroids that
# already assigned to some points before
normal_points = []
unique_cindices = set()
for pidx, cindices in enumerate(sorted_labels):
cidx = cindices[0]
if cidx in unique_cindices:
anomaly_points.append(gpoints[pidx])
else:
normal_points.append(gpoints[pidx])
unique_cindices.add(cidx)
normal_group_points.append(np.array(normal_points))
# Add new clusters to fit anomaly points
new_clusters = n_clusters - len(self.centroids)
anomaly_points = np.array(anomaly_points)
self._init_centroids(anomaly_points, new_clusters)
# Normal points for updating current clusters
group_points = normal_group_points
# Assign centroid to each point in each group
hit_cindices = set()
group_labels = {}
for gidx, gpoints in enumerate(group_points):
distances = self._pdist(gpoints, centroids)
pindices, cindices = linear_sum_assignment(distances)
hit_cindices = hit_cindices.union(set(cindices))
group_labels[gidx] = list(zip(pindices, cindices))
# Compute new centroids
new_centroids = []
for target_cidx, c in enumerate(centroids):
new_centroid = []
for gidx, matches in group_labels.items():
for pidx, cidx in matches:
if cidx == target_cidx:
new_centroid.append(group_points[gidx][pidx])
if len(new_centroid) > 0:
new_centroid = np.array(new_centroid).mean(axis=0)
else:
new_centroid = c
new_centroids.append(new_centroid)
new_centroids = np.array(new_centroids)
# Replace new clusters
for label, c in enumerate(new_centroids):
tid = label2tid[label]
self.centroids[tid].embedding = c
# Update state of clusters
hit_cindices = hit_cindices
miss_cindices = list(set(range(len(centroids)))-hit_cindices)
_ = [ self.centroids[label2tid[hidx]].hit() for hidx in hit_cindices ]
_ = [ self.centroids[label2tid[midx]].miss() for midx in miss_cindices ]
# Cleanup outdated clusters
tids = list(self.centroids.keys())
for tid in tids:
if self.centroids[tid].is_dead():
del self.centroids[tid]
# Merge clusters that are too close to each other
self._merge_cluster()
def _init_centroids(self, points, n_clusters):
"""Initialize clusters that fit the specified points
Arguments:
points (ndarray): 2D ndarray data for clustering
n_clusters (int): number of clusters to initialize
"""
# Random select centroids from current data points
centroids = points.copy()
np.random.shuffle(centroids)
centroids = centroids[:n_clusters]
# Fine-tune centroids that best fit data points
centroids = self._fit(points, centroids)
for c in centroids:
self.centroids[AdaptiveKmeans.NEXT_TID] = TargetCentroid(embedding=c)
AdaptiveKmeans.NEXT_TID += 1
def _pdist(self, points, centroids):
"""Compute pair-wise distance between data points and centroids
Arguments:
points (ndarray): 2D ndarray representing data points with N rows
centroids (ndarray): 2D ndarray representing centroids with M rows
Returns:
A NxM 2D ndarray representing the euclidean distances between data
points and centroids
"""
dists = np.sqrt(((points[:, np.newaxis, :]-centroids)**2).sum(axis=2))
return dists
def _fit(self, points, centroids, n_iter=10, threshold=1e-3):
"""Perform kmeans algorithm to fit the centroids to the data points
Arguments:
points (ndarray): 2D ndarray representing data points
centroids (ndarray): 2D ndarray representing centroids
Returns:
A 2D ndarray representing the fine-tuned centroids
"""
counter = 0
while counter < n_iter:
# Find closet centroid to each point
distances = self._pdist(points, centroids)
labels = np.argmin(distances, axis=1)
# Compute new centroids
new_centroids = np.array([ points[labels==label].mean(axis=0)
if np.sum(labels==label) > 0 else c
for label, c in enumerate(centroids) ])
# Break when converge
diff = np.sum(np.sqrt(((centroids - new_centroids)**2).sum(axis=1)))
if diff > threshold:
centroids = new_centroids
else:
break
counter += 1
return new_centroids
def _merge_cluster(self):
# Merge clusters that are too close to each other
centroids = np.array([ tc.embedding for tc in self.centroids.values() ])
label2tid = dict([ (label, tid)
for label, tid in enumerate(self.centroids.keys()) ])
# Find unique clusters
# [ {1, 2}, {3}, {4} ] means there are three unique clusters, and
# {1, 2} clusters are considered as same cluster.
unique_clusters = []
distances = self._pdist(centroids, centroids)
for cidx, distance in enumerate(distances):
# Distance between clusters less than 0.4 should be considered as
# same cluster
same_clusters = set(np.argwhere(distance < 0.4).reshape(-1).tolist())
# Try to merge `same_clusters` into the existing unique cluster
merge_flag = False
for i in range(len(unique_clusters)):
unique_cluster = unique_clusters[i]
if len(unique_cluster.intersection(same_clusters)) > 0:
unique_clusters[i] = unique_cluster.union(same_clusters)
merge_flag = True
break
# From unique cluster from `same_clusters`
if not merge_flag:
unique_clusters.append(same_clusters)
# Merge clusters
for clusters in unique_clusters:
if len(clusters) == 1:
continue
tids = sorted([ label2tid[cidx] for cidx in clusters ])
embeddings = np.array([ self.centroids[tid].embedding
for tid in tids ])
new_centroid = np.mean(embeddings, axis=0)
self.centroids[tids[0]].embedding = new_centroid
for tid in tids[1:]:
del self.centroids[tid]
```
#### File: mtmcworker/tracker/__init__.py
```python
import logging
import numpy as np
from scipy.optimize import linear_sum_assignment
from ...utils.time import timeit
from .track import DeepTrack
from .kalman import chi2inv95
from .utils import tlbr_to_xyah
logger = logging.getLogger(__name__)
class DeepTracker:
def __init__(self):
self._tracks = []
self._counter = 0
@property
def tracks(self):
return [ { 'tid': t.tid,
'state': t.state,
'bbox': t.bbox,
'velocity': t.velocity,
'covar': t.covariance[:2, :2],
'feature': t.feature }
for t in self._tracks ]
def propagate(self):
for track in self._tracks:
if track.state == "lost":
track.predict(hold_covariance=True)
else:
track.predict()
@timeit(logger)
def associate(self, measurements):
"""Associate meansurements to tracks in state-wise fashion"""
hit_tracks = []
miss_tracks = []
# Split tracks by their states
tracked_tracks = [ t for t in self._tracks if t.state == "tracked" ]
lost_tracks = [ t for t in self._tracks if t.state == "lost" ]
tentative_tracks = [ t for t in self._tracks if t.state == "tentative" ]
# STAGE_1: Associate with tracked tracks
# =============================================================
match_tindices, match_mindices = self._match(tracks=tracked_tracks,
measurements=measurements,
metric="cosine", threshold=0.3)
hit_tracks += [ t for i, t in enumerate(tracked_tracks) if i in match_tindices ]
miss_tracks += [ t for i, t in enumerate(tracked_tracks) if i not in match_tindices ]
measurements = np.array([ m for i, m in enumerate(measurements) if i not in match_mindices ])
# STAGE_2: Associate with lost tracks
# =============================================================
match_tindices, match_mindices = self._match(tracks=lost_tracks,
measurements=measurements,
metric="cosine", threshold=0.3)
hit_tracks += [ t for i, t in enumerate(lost_tracks) if i in match_tindices ]
miss_tracks += [ t for i, t in enumerate(lost_tracks) if i not in match_tindices ]
measurements = np.array([ m for i, m in enumerate(measurements) if i not in match_mindices ])
# STAGE_3: Associate with tentative tracks
# =============================================================
match_tindices, match_mindices = self._match(tracks=tentative_tracks,
measurements=measurements,
metric="iou", threshold=0.3)
hit_tracks += [ t for i, t in enumerate(tentative_tracks) if i in match_tindices ]
miss_tracks += [ t for i, t in enumerate(tentative_tracks) if i not in match_tindices ]
measurements = np.array([ m for i, m in enumerate(measurements) if i not in match_mindices ])
# STAGE_4: Remove dead tracks & Create new tracks
# =================================================================
_ = [ t.hit() for t in hit_tracks ]
_ = [ t.miss() for t in miss_tracks ]
self._tracks = [ t for t in self._tracks if t.state != "inactive" ]
new_tracks = []
for measurement in measurements:
bbox = measurement[:4]
embedding = measurement[4:]
new_tracks.append(DeepTrack(bbox, embedding, tid=self._counter))
self._counter += 1
self._tracks += new_tracks
def _match(self, tracks, measurements, metric, threshold):
# Edge cases
if (
(len(tracks) == 0 and len(measurements) == 0)
or (len(tracks) == 0 and len(measurements) != 0)
or (len(tracks) != 0 and len(measurements) == 0)
):
return [], []
# Compute cost matrix
bboxes = measurements[:, :4]
embeddings = measurements[:, 4:]
if metric == 'iou':
costs = 1 - np.array([ t.iou(bboxes) for t in tracks ])
elif metric == 'cosine':
dcosts = np.array([ t.mahalanobis_distance(bboxes, only_position=True) for t in tracks ])
costs = np.array([ t.cosine_similarity(embeddings) for t in tracks ])
costs[dcosts > chi2inv95[2]] = 10000.
# Perform linear assignment
tindices, mindices = linear_sum_assignment(costs)
match_pairs = [ pair
for pair in zip(tindices, mindices)
if costs[pair[0], pair[1]] <= threshold ]
# Update track state
for tind, mind in match_pairs:
track = tracks[tind]
track.update(bboxes[mind])
track.add_feature(embeddings[mind])
# Return matched indice
match_tindices = [ tind for tind, _ in match_pairs ]
match_mindices = [ mind for _, mind in match_pairs ]
return match_tindices, match_mindices
```
#### File: worker/pdetection/base.py
```python
import logging
import numpy as np
from abc import ABC, abstractmethod
from ..utils.time import timeit
logger = logging.getLogger(__name__)
class PoseDetector(ABC):
def __init__(self,
stride=8,
hthreshold=0.1,
pthreshold=0.05,
pretrain_model=None,
device="cpu",
**kwargs):
self.stride = stride
self.hthreshold = hthreshold
self.pthreshold = pthreshold
self.pretrain_model = pretrain_model
self.device = device
self.model = None
@timeit(logger)
def __call__(self, imgs):
self._check_input(imgs)
data = self.preprocessing(imgs)
pafs, heatmaps = self.model(data)
peoples = self.postprocessing(imgs, heatmaps, pafs)
self._check_output(peoples)
return peoples
def _check_input(self, imgs):
assert type(imgs) == list
# img is numpy array
for img in imgs:
assert type(img) == np.ndarray
# all imgs with same size
size = tuple(imgs[0].shape)
for img in imgs:
assert tuple(img.shape) == size
def _check_output(self, peoples):
assert type(peoples) == list
for people in peoples:
assert type(people) == list
for person in people:
assert 'conf' in person
assert 'bbox' in person
assert 'n_parts' in person
assert 'keypoints' in person
@abstractmethod
def preprocessing(self, imgs):
pass
@abstractmethod
def postprocessing(self, imgs, heatmaps, pafs):
pass
```
#### File: worker/pdetection/__init__.py
```python
import sys
from .bodypose import BodyPoseDetector
def get_detector(model_name, model_config={}):
model_cls = vars(sys.modules[__name__])[model_name]
detector = model_cls(**model_config)
return detector
```
#### File: recognition/reid/__init__.py
```python
import logging
import cv2
import numpy as np
from PIL import Image
import torch
from torch.hub import load_state_dict_from_url
from torchvision import transforms
from ...utils.time import timeit
from ..base import PersonRecognizer
from .model import resnet18_reid
logger = logging.getLogger(__name__)
__all__ = [ "Resnet18" ]
class Resnet18(PersonRecognizer):
PRETRAIN_URL = "https://www.dropbox.com/s/lnr9megu682n6ef/crossentropytriplet_market1501dukemtmc_resnet18reid.pth?dl=1"
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Load pretrained model
self.model = resnet18_reid(features=128)
if self.pretrain_model is not None:
state_dict = torch.load(self.pretrain_model)
else:
state_dict = load_state_dict_from_url(Resnet18.PRETRAIN_URL)
# Drop classifier layer
drop_keys = [ k for k in state_dict.keys() if 'classifier' in k ]
for k in drop_keys:
del state_dict[k]
self.model.load_state_dict(state_dict, strict=False)
self.model.to(self.device)
self.model.eval()
# Preprocessing layer
self.preprocess = transforms.Compose([
transforms.Resize((self.size[1], self.size[0])),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
@timeit(logger)
def preprocessing(self, imgs):
inputs = []
for img in imgs:
img = img.astype(np.uint8)
pil_img = Image.fromarray(img)
input_ = self.preprocess(pil_img)
inputs.append(input_)
inputs = torch.stack(inputs)
inputs = inputs.to(self.device)
return inputs
@timeit(logger)
def postprocessing(self, output):
embeddings = output.detach().cpu().numpy()
return embeddings
```
#### File: recognition/reid/model.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet50, resnet34, resnet18
__all__ = [ "resnet50_reid", "resnet34_reid", "resnet18_reid" ]
class ReIDResnet(nn.Module):
"""ReID model with resnet backbone
Argument:
resnet (model): resnet model pretrained on imagenet
resnet_features (int): size of latent features before fc layer in resnet
features (int): size of reid latent feature
"""
def __init__(self, resnet, resnet_features, features, classes):
super().__init__()
self.encoder = resnet
self.embedding = nn.Sequential(nn.Linear(resnet_features, features))
self.bnneck = nn.Sequential(nn.BatchNorm1d(features))
self.classifier = nn.Sequential(nn.Linear(features, classes))
def _encoder_forward(self, x):
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
x = self.encoder.relu(x)
x = self.encoder.maxpool(x)
x = self.encoder.layer1(x)
x = self.encoder.layer2(x)
x = self.encoder.layer3(x)
x = self.encoder.layer4(x)
x = self.encoder.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self._encoder_forward(x)
embeddings = self.embedding(x)
norm_embeddings = F.normalize(self.bnneck(embeddings), p=2, dim=1)
if not self.training:
return norm_embeddings
labels = self.classifier(norm_embeddings)
return embeddings, labels
def resnet18_reid(features=128, classes=1502):
resnet = resnet18(pretrained=True)
resnet.layer4[0].downsample[0].stride = (1, 1)
resnet.layer4[0].conv1.stride = (1, 1)
resnet.fc = None
model = ReIDResnet(resnet, 512, features, classes)
return model
def resnet34_reid(features=128, classes=1502):
resnet = resnet34(pretrained=True)
resnet.layer4[0].downsample[0].stride = (1, 1)
resnet.layer4[0].conv1.stride = (1, 1)
resnet.fc = None
model = ReIDResnet(resnet, 512, features, classes)
return model
def resnet50_reid(features=128, classes=1502):
resnet = resnet50(pretrained=True)
resnet.layer4[0].downsample[0].stride = (1, 1)
resnet.layer4[0].conv2.stride = (1, 1)
resnet.fc = None
model = ReIDResnet(resnet, 2048, features, classes)
return model
if __name__ == "__main__":
from torchsummary import summary
# Instantiate model
model = resnet50_reid()
# Random input
summary(model, (3, 256, 128), device="cpu")
```
#### File: worker/utils/time.py
```python
import time
from functools import wraps
__all__ = [ "timeit" ]
def timeit(logger):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
logger.info(f"[{func.__module__}:{func.__name__}] - execution time: {end_time-start_time}")
return result
return wrapper
return decorator
``` |
{
"source": "johnnylord/trytry-bodypose-orientation",
"score": 3
} |
#### File: trytry-bodypose-orientation/model/orient.py
```python
import torch
import torch.nn as nn
class OrientNet(nn.Module):
"""Orientation Network to predict the orientation of a body pose
The keypoints of bodypose is obtained from openpose pose-estimation model
with 25 keypoints. Each keypoint consists of three values (x, y, conf).
The orientation of a bodypose is divided into 8 category
"""
def __init__(self, n_keypoints=25, n_orients=8):
super().__init__()
self.n_keypoints = n_keypoints
self.n_orients = n_orients
self.feature = nn.Sequential(
nn.Linear(n_keypoints*3, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 32),
nn.ReLU(inplace=True))
self.fc = nn.Sequential(nn.Linear(32, n_orients))
def forward(self, x):
feature = self.feature(x)
output = self.fc(feature)
return output
if __name__ == "__main__":
from torchsummary import summary
x = torch.rand(3, 25*3)
model = OrientNet()
y = model(x)
print(x.shape, y.shape)
```
#### File: trytry-bodypose-orientation/utils/cost.py
```python
import numpy as np
def compute_iou_dist(bboxes1, bboxes2):
"""Return iou distance between bboxes1 and bboxes2
Args:
bboxes1 (np.ndarray): array of shape (N, 4)
bboxes2 (np.ndarray): array of shape (M, 4)
Return:
A N by M dimensional distance vector
Note:
A bbox is (xmin, ymin, xmax, ymax)
"""
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
xA = np.maximum(x11, np.transpose(x21))
yA = np.maximum(y11, np.transpose(y21))
xB = np.minimum(x12, np.transpose(x22))
yB = np.minimum(y12, np.transpose(y22))
interArea = np.maximum((xB-xA+1), 0)*np.maximum((yB-yA+1), 0)
bbox1Area = (x12-x11+1)*(y12-y11+1)
bbox2Area = (x22-x21+1)*(y22-y21+1)
iou = interArea / (bbox1Area+np.transpose(bbox2Area)-interArea)
return 1 - iou
``` |
{
"source": "johnnylu305/Simple-does-it-weakly-supervised-instance-and-semantic-segmentation",
"score": 2
} |
#### File: Simple_does_it/Dataset/make_train.py
```python
import os
import sys
import numpy as np
import tqdm
from bs4 import BeautifulSoup
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from Parser_.parser import make_pair_parser
import voc12_class
# standard output format
SPACE = 35
# tqdm parameter
UNIT_SCALE = True
BAR_FORMAT = '{}{}{}'.format('{l_bar}', '{bar}', '| {n_fmt}/{total_fmt}')
class Maker:
def __init__(self):
args = make_pair_parser()
# get dataset path
self.dataset_path = args.dataset
# get training set name
self.train_name = args.train_set_name
# get annotation name
self.ann_dir_name = args.ann_dir_name
# annotation information
self.ann_info = np.array([])
# get train pair name
self.train_pair_name = args.train_pair_name
def save_train_pair(self):
with open(self.dataset_path + '/' + self.train_pair_name,
'w') as w, open(self.dataset_path + '/' + self.train_name,
'r') as r:
# load image name
for img_name in tqdm.tqdm(
r, desc='{:{}}'.format('Save pair name', SPACE),
unit_scale=UNIT_SCALE):
img_name = img_name.rstrip()
# load annotation
self.load_annotation(img_name + '.xml')
# save train pair
for i, info in enumerate(self.ann_info):
if info[0] in voc12_class.voc12_classes:
grabcut_name = '{}_{}_{}.png'.format(
img_name, i,
voc12_class.voc12_classes[info[0]])
w.write('{}###{}###{}###{}###{}###{}###{}\n'.format(
img_name, grabcut_name, info[2], info[1], info[4],
info[3], info[0]))
r.close()
w.close()
print('Save set successful')
# load annotation
def load_annotation(self, filename):
with open(self.dataset_path + '/' + self.ann_dir_name + '/' + filename,
'r') as r:
soup = BeautifulSoup(r, 'xml')
# get bounding boxes coordinate
xmins = soup.find_all('xmin')
ymins = soup.find_all('ymin')
xmaxs = soup.find_all('xmax')
ymaxs = soup.find_all('ymax')
# get class name
names = soup.find_all('name')
# extract information
self.ann_info = np.array([])
for name, xmin, ymin, xmax, ymax in zip(names, xmins, ymins, xmaxs,
ymaxs):
self.ann_info = np.append(self.ann_info, np.array(
[name.string, xmin.string, ymin.string, xmax.string,
ymax.string]))
self.ann_info = self.ann_info.reshape(-1, 5)
r.close()
def main():
make_pair = Maker()
make_pair.save_train_pair()
if __name__ == '__main__':
main()
```
#### File: Simple_does_it/Dataset/save_result.py
```python
import os
import sys
import scipy.misc
import matplotlib as mlp
import matplotlib.pyplot as plt
import numpy as np
mlp.use('Agg')
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from Dataset import voc12_color
class Save:
def __init__(self, img, masks, img_name, pred_dir_path, pair_dir_path,
classes):
# get segmentation
self.masks = masks
# get image
self.img = img
# get image name
self.img_name = img_name
# get directory for saving prediction
self.pred_dir_path = pred_dir_path
# get directory for
self.pair_dir_path = pair_dir_path
# get classes
self.classes = classes
def save(self):
# save segmentation
scipy.misc.toimage(
self.masks, cmin=0, cmax=255, pal=voc12_color.colors_map,
mode='P').save(self.pred_dir_path + '/' + self.img_name+'.png')
# create figure
fig = plt.figure()
# convert to inch
# dpi: dot per inch
w = self.img.shape[1] / float(fig.get_dpi())
h = self.img.shape[0] / float(fig.get_dpi())
# set figure size
fig.set_size_inches(w, h)
for i in range(1, self.classes):
# get color for mask
color = voc12_color.colors[i]
m = self.masks[:, :, np.newaxis]
# add mask
for c in range(3):
self.img[:, :, c] = np.where(
(m[:, :, 0] == i), self.img[:, :, c] * 0.3 + 0.7 *
color[c], self.img[:, :, c])
# show image
plt.figimage(self.img)
# save image with grabcut masks
fig.savefig(self.pair_dir_path + '/' + self.img_name + '.png')
plt.cla()
plt.clf()
plt.close('all')
```
#### File: Simple_does_it/Model/model.py
```python
import os
import sys
import tensorflow as tf
import numpy as np
import math
import matplotlib as mlp
import copy
import random
import tqdm
BASEDIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, BASEDIR)
from Dataset.load import Load
from Parser_.parser import model_parser
from Dataset.save_result import Save
from Postprocess.dense_CRF import dense_CRF
mlp.use('Agg')
args = model_parser()
# parameter for Loading
DATASET = args.dataset
SET_NAME = args.set_name
LABEL_DIR_NAME = args.label_dir_name
IMG_DIR_NAME = args.img_dir_name
# dataset
# classes for segmentation
# default: 21
CLASS = args.classes
# training set size
# default: get from loading data
TRAIN_SIZE = None
# testing set size
# default: get from loading data
TEST_SIZE = None
# output format
SPACE = 15
# tqdm parameter
UNIT_SCALE = True
BAR_FORMAT = '{}{}{}'.format('{l_bar}', '{bar}', '| {n_fmt}/{total_fmt}')
# hyperparameter
# batch size
# default: 16
BATCH_SIZE = args.batch_size
# epoch
# default: 2000
EPOCH = args.epoch
# learning rate
# defalut: 0.01
LR = args.learning_rate
# momentum for optimizer
# default: 0.9
MOMENTUM = tf.Variable(args.momentum)
# probability for dropout
# default: 0.5
KEEP_PROB = args.keep_prob
# training or testing
# default: False
IS_TRAIN = args.is_train
# iteration
# ITER = TRAIN_SIZE/BATCH_SIZE
ITER = None
# widht and height after resize
# get from loading data
WIDTH = args.width
HEIGHT = args.height
# learning decay step
# default: 500
DECAY_STEP = 500
# learning rate decay rate
# default: 0.1
DECAY_RATE = 0.1
# staircase
# default: True
STAIRCASE = True
# weight decay
# default = 0.0005
WEIGHT_DECAY = 0.0005
# saving and restore weight
# VGG_16
VGG16_CKPT_PATH = BASEDIR + "/Model/models/vgg_16.ckpt"
# saving weight each SAVE_STEP
# default: 2
SAVE_STEP = args.save_step
# resore weights number
RESTORE_TARGET = int(args.restore_target)
# restore weights path
RESTORE_CKPT_PATH = BASEDIR + "/Model/models/model_" + \
str(RESTORE_TARGET) + ".ckpt"
# location for saving results
PRED_DIR_PATH = DATASET + '/' + args.pred_dir_name
PAIR_DIR_PATH = DATASET + '/' + args.pair_dir_name
CRF_DIR_PATH = DATASET + '/' + args.crf_dir_name
CRF_PAIR_DIR_PATH = DATASET + '/' + args.crf_pair_dir_name
# define placeholder
xp = tf.placeholder(tf.float32, shape=(None, None, None, 3))
yp = tf.placeholder(tf.int32, shape=(None, None, None, 1))
global_step = tf.placeholder(tf.int32)
# set gpu utilization
# config gpu utilization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# build convolution layer for deeplab
def build_conv(input_, shape, name, weight_decay=WEIGHT_DECAY,
strides=[1, 1, 1, 1], padding='SAME', activation=True,
c_name='PRETRAIN_VGG16', holes=None):
# tf.AUTO_REUSE for using exist variable
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# define l2 regularizer
regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay)
# define initializer for weights and biases
w_initializer = tf.contrib.layers.xavier_initializer()
b_initializer = tf.zeros_initializer()
# define variable for weights and biases
biases = tf.get_variable(initializer=b_initializer, shape=shape[-1],
name='biases',
collections=[c_name, tf.GraphKeys.
GLOBAL_VARIABLES])
kernel = tf.get_variable(initializer=w_initializer, shape=shape,
name='weights',
collections=[c_name, tf.GraphKeys.
GLOBAL_VARIABLES],
regularizer=regularizer)
# convolution
if not holes:
layer = tf.nn.conv2d(input=input_, filter=kernel, strides=strides,
padding=padding)
else:
layer = tf.nn.atrous_conv2d(value=input_, filters=kernel,
rate=holes, padding=padding)
# add biases
layer = tf.nn.bias_add(layer, biases)
# use activation or not
if activation:
layer = tf.nn.relu(tf.layers.batch_normalization(inputs=layer,
axis=-1,
training=IS_TRAIN)
)
return layer
# define network
def network():
# get input from placeholder
x = xp
y = yp
# get batch size, width, height
BATCH_SIZE = tf.shape(x)[0]
WIDTH = tf.shape(x)[2]
HEIGHT = tf.shape(x)[1]
# learning rate schedule
lr = tf.train.exponential_decay(LR, global_step, DECAY_STEP, DECAY_RATE,
STAIRCASE)
# DeepLab-LargeFOV
with tf.variable_scope('vgg_16'):
with tf.variable_scope('conv1'):
layer1 = build_conv(x, [3, 3, 3, 64], 'conv1_1')
layer2 = build_conv(layer1, [3, 3, 64, 64], 'conv1_2')
pool1 = tf.nn.max_pool(value=layer2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME',
name='pool1')
with tf.variable_scope('conv2'):
layer3 = build_conv(pool1, [3, 3, 64, 128], 'conv2_1')
layer4 = build_conv(layer3, [3, 3, 128, 128], 'conv2_2')
pool2 = tf.nn.max_pool(value=layer4, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME',
name='pool2')
with tf.variable_scope('conv3'):
layer5 = build_conv(pool2, [3, 3, 128, 256], 'conv3_1')
layer6 = build_conv(layer5, [3, 3, 256, 256], 'conv3_2')
layer7 = build_conv(layer6, [3, 3, 256, 256], 'conv3_3')
pool3 = tf.nn.max_pool(value=layer7, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME',
name='pool3')
with tf.variable_scope('conv4'):
layer8 = build_conv(pool3, [3, 3, 256, 512], 'conv4_1')
layer9 = build_conv(layer8, [3, 3, 512, 512], 'conv4_2')
layer10 = build_conv(layer9, [3, 3, 512, 512], 'conv4_3')
pool4 = tf.nn.max_pool(value=layer10, ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1], padding='SAME',
name='pool4')
with tf.variable_scope('conv5'):
layer11 = build_conv(pool4, [3, 3, 512, 512], 'conv5_1', holes=2)
layer12 = build_conv(layer11, [3, 3, 512, 512], 'conv5_2', holes=2)
layer13 = build_conv(layer12, [3, 3, 512, 512], 'conv5_3', holes=2)
pool5 = tf.nn.max_pool(value=layer13, ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1], padding='SAME',
name='pool5')
pool5_1 = tf.nn.avg_pool(value=pool5, ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1], padding='SAME',
name='pool5_1')
layer14 = build_conv(pool5_1, [3, 3, 512, 1024], 'fc6', padding='SAME',
c_name='UNPRETRAIN', holes=12)
dropout6 = tf.nn.dropout(layer14, keep_prob=KEEP_PROB, name='dropout6')
layer15 = build_conv(dropout6, [1, 1, 1024, 1024], 'fc7',
padding='VALID', c_name='UNPRETRAIN')
dropout7 = tf.nn.dropout(layer15, keep_prob=KEEP_PROB, name='dropout7')
layer16 = build_conv(dropout7, [1, 1, 1024, CLASS], 'fc8',
padding='VALID', activation=False,
c_name='UNPRETRAIN_LAST')
predictions = layer16
# to one-hot
y = tf.reshape(y, shape=[BATCH_SIZE, -1])
y = tf.one_hot(y, depth=CLASS)
y = tf.reshape(y, shape=[-1, CLASS])
# resize predictions for cross entropy
predictions = tf.image.resize_bilinear(predictions, [HEIGHT, WIDTH])
predictions = tf.reshape(predictions, [-1, CLASS])
prob_prediction = tf.reshape(tf.nn.softmax(predictions),
[BATCH_SIZE, HEIGHT, WIDTH, CLASS])
# define loss function
with tf.variable_scope('loss'):
loss = -tf.reduce_mean(y*tf.log(tf.nn.softmax(predictions)+1e-10))
tf.summary.scalar('loss', loss)
# get variables
last_var = tf.get_collection('UNPRETRAIN_LAST')
other_var = list(set(tf.global_variables()) - set(last_var))
# operations for batch normalization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# define optimizer
with tf.variable_scope('optimizer'):
# batch normalization operations added as a dependency
with tf.control_dependencies(update_ops):
optimizer1 = tf.train.MomentumOptimizer(
learning_rate=lr, momentum=MOMENTUM).minimize(
loss, var_list=other_var)
optimizer2 = tf.train.MomentumOptimizer(
learning_rate=lr*10, momentum=MOMENTUM).minimize(
loss, var_list=last_var)
optimizer = tf.group(optimizer1, optimizer2)
# resize to image format
predictions = tf.argmax(predictions, axis=1)
predictions = tf.reshape(predictions, [BATCH_SIZE, HEIGHT, WIDTH, 1])
return loss, optimizer, predictions, prob_prediction
# shuffle data
def shuffle_unison(x, y):
state = np.random.get_state()
np.random.shuffle(x)
np.random.set_state(state)
np.random.shuffle(y)
# augmentation
def augmentation(img, label):
img_ = []
label_ = []
h = int(HEIGHT*0.626)
w = int(WIDTH*0.626)
for i in range(img.shape[0]):
# random crop
shift1 = random.randint(0, HEIGHT - h)
shift2 = random.randint(0, WIDTH - w)
img_.append(img[i][shift1:h + shift1, shift2:w + shift2][:])
label_.append(label[i][shift1:h + shift1, shift2:w + shift2][:])
# flip
if random.randint(0, 1) == 0:
img_[i] = np.flip(img_[i], 1)
label_[i] = np.flip(label_[i], 1)
return img_, label_
# mean substraction by RGB
def mean_substraction(x):
# Uncomment these block if you train on other dataset.
# Change the dtyp of x in load.py to np.float64 to get the precision.
# Then replace mean and std with new mean and new std.
"""
mean = np.mean(x, axis = (0, 1, 2))
print ('{:{}}: {}'.format('Mean', SPACE, mean))
std = np.std(x, axis = (0, 1, 2))
print ('{:{}}: {}'.format('Std', SPACE, std))
"""
# Mean and Std computed from VOC train set
mean = [116.47913155, 112.99590528, 104.12249927]
std = [69.29213195, 68.4138099, 72.42007962]
if IS_TRAIN:
for i in range(3):
x[:, :, :, i] = (x[:, :, :, i] - mean[i]) / (std[i] + 1e-7)
else:
for i in range(TEST_SIZE):
for j in range(3):
x[i][:, :, j] = (x[i][:, :, j] - mean[j]) / (std[j] + 1e-7)
return x
# training
def train_network(x_train, y_train):
with tf.Session() as sess:
# get network
loss, optimizer, predictions, prob_predictions = network()
# setup tensorboard
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(BASEDIR + "/Model/Logs/", sess.graph)
if RESTORE_TARGET == 0:
pretrain_var = tf.get_collection('PRETRAIN_VGG16')
other_var = list(set(tf.global_variables()) - set(pretrain_var))
# setup saver and restorer
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)
restorer = tf.train.Saver(pretrain_var)
# load weight for untrainable variables
restorer.restore(sess, VGG16_CKPT_PATH)
# initial unpretrain variables
init = tf.variables_initializer(other_var)
sess.run(init)
else:
# setup saver
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)
# load weight
saver.restore(sess, RESTORE_CKPT_PATH)
# training
for i in range(RESTORE_TARGET, EPOCH):
print('{:{}}: {}'.format('Epoch', SPACE, i))
# shuffle data
shuffle_unison(x_train, y_train)
# split for batch
x_train_ = np.array_split(x_train, ITER)
y_train_ = np.array_split(y_train, ITER)
# save weight
if i % SAVE_STEP == 0:
saver.save(sess, BASEDIR + "/Model/models/model_" +
str(i) + ".ckpt")
avg_loss = 0
count = 0
for j in tqdm.tqdm(range(ITER), desc='{:{}}'.
format('Epoch' + str(i), SPACE),
unit_scale=UNIT_SCALE, bar_format=BAR_FORMAT):
# check empty or not
if x_train_[j].size:
# augmentation
x_train_[j], y_train_[j] = augmentation(x_train_[j],
y_train_[j])
summary, optimizer_, loss_ = sess.run(
[merged, optimizer, loss],
feed_dict={xp: x_train_[j],
yp: y_train_[j],
global_step: i})
avg_loss = avg_loss + loss_
count = count + 1
writer.add_summary(summary, i * ITER + j)
print('{:{}}: {}'.format('Average Loss', SPACE, avg_loss / count))
writer.close()
# testing
def test_network(x_test, img_names):
with tf.Session(config=config) as sess:
# get network
loss, optimizer, predictions, prob_predictions = network()
# setup restorer
restorer = tf.train.Saver(tf.global_variables())
# mean substraction
x_test_ = mean_substraction(copy.deepcopy(x_test))
# load weight
restorer.restore(sess, RESTORE_CKPT_PATH)
for i in tqdm.tqdm(range(TEST_SIZE), desc='{:{}}'.
format('Test and save', SPACE),
unit_scale=UNIT_SCALE, bar_format=BAR_FORMAT):
predictions_, prob_predictions_ = sess.run(
[predictions, prob_predictions],
feed_dict={xp: [x_test_[i]]})
save_ = Save(x_test[i].astype(np.uint8), np.squeeze(predictions_),
img_names[i], PRED_DIR_PATH, PAIR_DIR_PATH, CLASS)
save_.save()
dense_CRF_ = dense_CRF(x_test[i].astype(np.uint8),
prob_predictions_[0])
crf_mask = dense_CRF_.run_dense_CRF()
save_ = Save(x_test[i].astype(np.uint8), crf_mask, img_names[i],
CRF_DIR_PATH, CRF_PAIR_DIR_PATH, CLASS)
save_.save()
def main():
global WIDTH
global HEIGHT
global TRAIN_SIZE
global KEEP_PROB
global TEST_SIZE
global ITER
global BATCH_SIZE
if IS_TRAIN:
# load training data from VOC12 dataset
dataset = Load(IS_TRAIN, DATASET, SET_NAME, LABEL_DIR_NAME,
IMG_DIR_NAME, WIDTH, HEIGHT)
x_train, y_train = dataset.load_data()
# mean substraction
x_train = mean_substraction(x_train)
# set training set size
TRAIN_SIZE = len(x_train)
# get iteration
ITER = math.ceil(TRAIN_SIZE / BATCH_SIZE)
# get widht and height
WIDTH = x_train[0].shape[1]
HEIGHT = x_train[0].shape[0]
# train network
train_network(x_train, y_train)
else:
# load val data from VOC12 dataset
dataset = Load(IS_TRAIN, DATASET, SET_NAME, LABEL_DIR_NAME,
IMG_DIR_NAME, WIDTH, HEIGHT)
x_test, img_names = dataset.load_data()
# set testing set size
TEST_SIZE = len(x_test)
# close dropout
KEEP_PROB = 1
# set batch size
BATCH_SIZE = 1
# test network
test_network(x_test, img_names)
if __name__ == '__main__':
main()
```
#### File: Simple_does_it/Preprocess/boxi.py
```python
import numpy as np
import scipy.misc
import tqdm
import os
import sys
from bs4 import BeautifulSoup
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from Parser_.parser import boxi_parser
from Dataset import voc12_color
from Dataset import voc12_class
# tqdm parameter
UNIT_SCALE = True
BAR_FORMAT = '{}{}{}'.format('{l_bar}', '{bar}', '| {n_fmt}/{total_fmt}')
# standard output format
SPACE = 35
def create(set_, ann_path, label_path):
# load set
with open(set_, 'r') as r:
for f in tqdm.tqdm(r, desc='{:{}}'.format('Create boxi label', SPACE),
unit_scale=UNIT_SCALE):
f = f.rstrip()
# get label
save(f, ann_path, label_path)
def save(file_, ann_path, label_path):
with open(ann_path+'/'+file_+'.xml', 'r') as r:
soup = BeautifulSoup(r, 'xml')
# get image size
size = soup.find('size')
width = int(size.find('width').string)
height = int(size.find('height').string)
# create mask
mask = np.zeros((height, width), np.uint8)
# annotations
anns = []
# get onjects
objects = soup.find_all(['object'])
# get object
for object_ in objects:
# get class
name = object_.find('name').string
if name not in voc12_class.voc12_classes:
continue
class_ = voc12_class.voc12_classes[name]
# get bounding box
xmin = int(object_.find('xmin').string)
xmax = int(object_.find('xmax').string)
ymin = int(object_.find('ymin').string)
ymax = int(object_.find('ymax').string)
# compute width and height
width = xmax-xmin
height = ymax-ymin
# compute area
area = width*height
# compute in width and height
in_xmin = int(xmin+width*0.4)
in_ymin = int(ymin+height*0.4)
in_xmax = int(xmax-width*0.4)
in_ymax = int(ymax-height*0.4)
# save annotation
anns.append([area, xmin, ymin, xmax, ymax, in_xmin, in_ymin,
in_xmax, in_ymax, class_])
anns.sort(reverse=True)
for ann in anns:
# ignore label
mask[ann[2]:ann[4], ann[1]:ann[3]] = 22
# class label
mask[ann[6]:ann[8], ann[5]:ann[7]] = ann[-1]
mask = scipy.misc.toimage(mask, cmin=0, cmax=255,
pal=voc12_color.colors_map, mode='P')
mask.save(label_path+'/'+file_+'.png')
def main():
args = boxi_parser()
# get dataset path
dataset_path = args.dataset
# get annotations directory path
ann_path = dataset_path + '/' + args.ann_dir_name
# get set name
set_ = dataset_path + '/' + args.set_name
# get label directory path
label_path = dataset_path + '/' + args.label_dir_name
# create boxi label
create(set_, ann_path, label_path)
if __name__ == '__main__':
main()
```
#### File: Simple_does_it/Preprocess/grabcut.py
```python
import os
import sys
import tqdm
import cv2
import numpy as np
import matplotlib as mlp
import matplotlib.pyplot as plt
import scipy.misc
from multiprocessing import Pool
mlp.use('Agg')
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from Parser_.parser import grabcut_parser
from Dataset import voc12_color
# standard output format
SPACE = 35
# tqdm parameter
UNIT_SCALE = True
class Grabcut:
def __init__(self):
args = grabcut_parser()
# get dataset path
self.dataset_path = args.dataset
# get image directory path
self.img_dir_path = self.dataset_path + '/' + args.img_dir_name
# get train pair name
self.train_pair_name = args.train_pair_name
# get grabcut direcrory name
self.grabcut_dir_name = args.grabcut_dir_name
# get image with grabcuts name
self.img_grabcuts_dir = args.img_grabcuts_dir
# get pool size
self.pool_size = args.pool_size
# get grabcut iteration
self.grabcut_iter = args.grabcut_iter
# get label directory name
self.label_dir_name = args.label_dir_name
# get annotations
self.anns = {}
# ungrabcut image amount
self.img_num = 0
def load_annotation(self):
# record grabcut or not
table = {}
with open(self.dataset_path + '/' + self.train_pair_name, 'r') as r:
for i, ann in enumerate(tqdm.tqdm(
r, desc='{:{}}'.format('Load annotations', SPACE),
unit_scale=UNIT_SCALE), start=1):
# split annotation
ann = ann.rstrip().split('###')
# initial dict for key
if ann[0] not in self.anns:
self.anns[ann[0]] = []
# initial dict for key
if ann[0] not in table:
table[ann[0]] = False
# check grabcut or not
if table[ann[0]] or not os.path.isfile(self.dataset_path +
'/' +
self.grabcut_dir_name +
'/' + ann[1]):
table[ann[0]] = True
# load annotation
self.anns[ann[0]].append(ann)
r.close()
# leave ungrabcut item
for key in table:
if table[key]:
self.img_num += len(self.anns[key])
else:
self.anns.pop(key, None)
try:
print('{:{}}: {}'.format('Total images', SPACE, i))
print('{:{}}: {}'.format('Ungrabcut images', SPACE, self.img_num))
except UnboundLocalError:
print('{:{}}: {}'.format('Total images', SPACE, 0))
print('{:{}}: {}'.format('Ungrabcut images', SPACE, self.img_num))
def run_grabcut(self):
# generate pool for multiprocessing
p = Pool(self.pool_size)
# run grabcut by multiprocessing
for _ in tqdm.tqdm(p.imap_unordered(self.grabcut, self.anns),
total=len(self.anns)):
pass
p.close()
p.join()
def grabcut(self, key):
masks = []
for i, ann in enumerate(self.anns[key], start=1):
# get annotation
img_name, grab_img_name, miny, minx, maxy, maxx, class_ = ann
miny = self.str_to_int(miny)
minx = self.str_to_int(minx)
maxy = self.str_to_int(maxy)
maxx = self.str_to_int(maxx)
# load image
img = cv2.imread(self.img_dir_path + '/' + img_name + '.jpg')
# grabcut parameter
mask = np.zeros(img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
width = maxx - minx
height = maxy - miny
rect = (minx, miny, width, height)
# run grabcut
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, self.grabcut_iter,
cv2.GC_INIT_WITH_RECT)
# to binary mask
img_mask = np.where(
(mask == 2) | (mask == 0), 0, 1).astype('uint8')
# if mask2 no forground
# reset mask2
if np.sum(img_mask) == 0:
img_mask = np.where((mask == 0), 0, 1).astype('uint8')
# BGR to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# boundingbox to binary mask
bbox = np.zeros((img.shape[0], img.shape[1]))
bbox[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]] = 1
# count IOU
combine = bbox + img_mask
intersection = np.where((combine == 2), 1, 0).astype('float')
union = np.where((combine == 0), 0, 1).astype('float')
IOU = np.sum(intersection) / np.sum(union)
# if IOU less than 15%
# reset img_mask to bbox
if IOU < 0.15:
img_mask = bbox
masks.append([img_mask, grab_img_name, rect])
# sort by foreground size
masks.sort(key=lambda mask: np.sum(mask[0]), reverse=True)
for j in range(i):
for k in range(j + 1, i):
masks[j][0] = masks[j][0] - masks[k][0]
masks[j][0] = np.where((masks[j][0] == 1), 1, 0).astype('uint8')
# get class
grab_img_name = masks[j][1]
class_ = grab_img_name.split('_')[-1]
class_ = int(class_[:class_.rfind('.')])
# set class
masks[j][0] = np.where(
(masks[j][0] == 1), class_, 0).astype('uint8')
# save mask
scipy.misc.toimage(
masks[j][0], cmin=0, cmax=255, pal=voc12_color.colors_map,
mode='P').save(self.dataset_path + '/' +
self.grabcut_dir_name + '/' + masks[j][1])
# merge masks
mask = np.zeros(mask[0][0].shape)
for m in masks:
mask = mask + m[0]
# save merged mask
scipy.misc.toimage(
mask, cmin=0, cmax=255, pal=voc12_color.colors_map,
mode='P').save(self.dataset_path + '/' +
self.label_dir_name + '/' + img_name + '.png')
# create figure
fig = plt.figure()
# convert to inch
# dpi: dot per inch
w = img.shape[1] / float(fig.get_dpi())
h = img.shape[0] / float(fig.get_dpi())
# set figure size
fig.set_size_inches(w, h)
for m in masks:
rect = m[2]
m = m[0]
# get color for mask
color = voc12_color.colors[np.amax(m)]
m = m[:, :, np.newaxis]
# add mask
for c in range(3):
img[:, :, c] = np.where(
(m[:, :, 0] != 0), img[:, :, c] * 0.2 + 0.8 * color[c],
img[:, :, c])
# compute coordinates
left = rect[0] / img.shape[1]
bottom = 1 - (rect[1] + rect[3]) / img.shape[0]
width = (rect[0] + rect[2]) / img.shape[1] - left
height = 1 - (rect[1]) / img.shape[0] - bottom
# set bounding box
ax = fig.add_axes([left, bottom, width, height])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.patch.set_fill(False)
ax.patch.set_linewidth(5)
ax.patch.set_color('b')
# show image
plt.figimage(img)
# save image with grabcut masks
fig.savefig(self.dataset_path + '/' + self.img_grabcuts_dir + '/' +
img_name + '.png')
plt.cla()
plt.clf()
plt.close()
@staticmethod
def str_to_int(str_):
try:
return int(str_)
# Some bounding box coordinates in VOC2012 is float
# Such as 2011_006777.xml and 2011_003353.xml
except ValueError:
return int(eval(str_))
def main():
grabcut_ = Grabcut()
grabcut_.load_annotation()
grabcut_.run_grabcut()
if __name__ == '__main__':
main()
``` |
{
"source": "johnnymetz/async-techniques-python-course",
"score": 3
} |
#### File: acityscape_api/services/location_service.py
```python
import asyncio
import random
from typing import Tuple
import aiohttp
use_cached_data = False
measured_latency_in_sec = [
0.28844,
0.334_694,
0.33468,
0.343_911,
0.339_515,
0.344_329,
0.341_594,
0.352_366,
0.535_646,
0.527_148,
0.533_472,
0.53351,
0.523_462,
]
async def get_lat_long(zip_code: str, country: str) -> Tuple[float, float]:
key = f"{zip_code}, {country}"
url = (
f'http://www.datasciencetoolkit.org/street2coordinates/{key.replace(" ", "+")}'
)
if use_cached_data:
await asyncio.sleep(random.choice(measured_latency_in_sec))
return 45.50655, -122.733_888
else:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
resp.raise_for_status()
data = await resp.json()
city_data = data.get(f"{zip_code}, {country}", dict())
return city_data.get("latitude", 0.00), city_data.get("longitude", 0.00)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.