content
stringlengths 5
1.05M
|
---|
import frappe
# TODO sanitize req input
def remove_this (doctype,args,kwargs) :
kwargs.pop('cmd', None)
removed = frappe.db.delete (
doctype,
kwargs
)
return frappe.db.commit() |
import logging
import re
import json
logger = logging.getLogger(__name__)
log = logger
BOOT_DISK_CMD = "findmnt -v -n -T / -o SOURCE"
def run(ceph_cluster, **kw):
"""
Rolls updates over existing ceph-ansible deployment
Args:
ceph_cluster (ceph.ceph.Ceph): ceph cluster object
**kw(dict):
config sample:
config:
demon:
- mon
Returns:
int: non-zero on failure, zero on pass
"""
ceph_installer = ceph_cluster.get_ceph_object('installer')
ansible_dir = ceph_installer.ansible_dir
config = kw.get('config')
# Get all OSD nodes to get boot disk
osd_nodes = ceph_cluster.get_nodes("osd")
log.info("Get all OSD nodes : {}".format(osd_nodes))
# Read inventory file
log.info("Get inventory file content")
hosts_file = ceph_installer.read_inventory_file()
log.info("Previous Inventory file : \n %s" % hosts_file)
# Add boot disk for devices
for line in hosts_file:
host = ''
devices = ''
osd_node = ''
boot_disk = ''
# Searching for OSD node with devices
if not re.search(r'devices', line):
continue
host = re.search(r'^(.*)\s+monitor.*devices=\"(.*)\"', line)
devices = json.loads(host.group(2).replace("\'", "\""))
osd_node = next(filter(lambda x: x.hostname == host.group(1), osd_nodes))
out, err = osd_node.exec_command(cmd=BOOT_DISK_CMD)
boot_disk = re.sub(r"\d", '', out.read().decode().strip())
if boot_disk not in devices:
devices.insert(int(), boot_disk)
# update modified line in hosts_file
mod_line = re.sub(r'devices=.*\"', 'devices="{}"'.format(str(devices)), line)
hosts_file[hosts_file.index(line)] = mod_line
hosts_file = ''.join(hosts_file)
log.info("Modified Inventory File : \n %s" % hosts_file)
ceph_installer.write_inventory_file(hosts_file, file_name="new_hosts")
ceph_installer.setup_ansible_site_yml(ceph_cluster.containerized)
# Run Ansible with limit=OSD
log.info("Run Ansible playbook with OSD limit")
out, rc = ceph_installer.exec_command(
cmd='cd {};'
' ANSIBLE_STDOUT_CALLBACK=debug;'
' ansible-playbook -vvvv -i new_hosts site.yml --limit {daemon}'.format(ansible_dir,
daemon=config.get('demon') + 's'),
long_running=True)
# Validate failure
if rc != 0:
log.info("Failed during deployment as expected")
return 0
return rc
|
import abc
import hashlib
import struct
import bip32utils
import ecdsa
import rlp
import time
from eth_account.internal.transactions import serializable_unsigned_transaction_from_dict, encode_transaction, \
UnsignedTransaction
from eth_account.messages import defunct_hash_message
from snet_cli._vendor.ledgerblue.comm import getDongle
from snet_cli._vendor.ledgerblue.commException import CommException
from mnemonic import Mnemonic
from trezorlib.client import TrezorClient, proto
from trezorlib.transport_hid import HidTransport
class IdentityProvider(abc.ABC):
@abc.abstractmethod
def get_address(self):
raise NotImplementedError()
@abc.abstractmethod
def transact(self, transaction, out_f):
raise NotImplementedError()
@abc.abstractmethod
def sign_message(self, message, out_f):
raise NotImplementedError()
class KeyIdentityProvider(IdentityProvider):
def __init__(self, w3, private_key):
self.w3 = w3
if private_key.startswith("0x"):
self.private_key = bytes(bytearray.fromhex(private_key[2:]))
else:
self.private_key = bytes(bytearray.fromhex(private_key))
public_key = ecdsa.SigningKey.from_string(string=self.private_key,
curve=ecdsa.SECP256k1,
hashfunc=hashlib.sha256).get_verifying_key()
self.address = self.w3.toChecksumAddress("0x" + self.w3.sha3(hexstr=public_key.to_string().hex())[12:].hex())
def get_address(self):
return self.address
def transact(self, transaction, out_f):
raw_transaction = self.w3.eth.account.signTransaction(transaction, self.private_key).rawTransaction
print("Submitting transaction...\n", file=out_f)
txn_hash = self.w3.eth.sendRawTransaction(raw_transaction)
# Wait for transaction to be mined
receipt = None
while receipt is None:
time.sleep(1)
receipt = self.w3.eth.getTransactionReceipt(txn_hash)
return receipt
def sign_message(self, message, out_f):
return self.w3.eth.account.signHash(
defunct_hash_message(hexstr=self.w3.sha3(hexstr=message).hex()), self.private_key).signature
class RpcIdentityProvider(IdentityProvider):
def __init__(self, w3, index):
self.w3 = w3
self.address = self.w3.personal.listAccounts[index]
def get_address(self):
return self.address
def transact(self, transaction, out_f):
print("Submitting transaction...\n", file=out_f)
txn_hash = self.w3.eth.sendTransaction(transaction)
# Wait for transaction to be mined
receipt = None
while receipt is None:
time.sleep(1)
receipt = self.w3.eth.getTransactionReceipt(txn_hash)
return receipt
def sign_message(self, message, out_f):
return self.w3.eth.sign(self.get_address(), hexstr=self.w3.sha3(hexstr=message).hex())
class MnemonicIdentityProvider(IdentityProvider):
def __init__(self, w3, mnemonic, index):
self.w3 = w3
master_key = bip32utils.BIP32Key.fromEntropy(Mnemonic("english").to_seed(mnemonic))
purpose_subtree = master_key.ChildKey(44 + bip32utils.BIP32_HARDEN)
coin_type_subtree = purpose_subtree.ChildKey(60 + bip32utils.BIP32_HARDEN)
account_subtree = coin_type_subtree.ChildKey(bip32utils.BIP32_HARDEN)
change_subtree = account_subtree.ChildKey(0)
account = change_subtree.ChildKey(index)
self.private_key = account.PrivateKey()
public_key = ecdsa.SigningKey.from_string(string=self.private_key,
curve=ecdsa.SECP256k1,
hashfunc=hashlib.sha256).get_verifying_key()
self.address = self.w3.toChecksumAddress(
"0x" + self.w3.sha3(hexstr=public_key.to_string().hex())[12:].hex())
def get_address(self):
return self.address
def transact(self, transaction, out_f):
raw_transaction = self.w3.eth.account.signTransaction(transaction, self.private_key).rawTransaction
print("Submitting transaction...\n", file=out_f)
txn_hash = self.w3.eth.sendRawTransaction(raw_transaction)
# Wait for transaction to be mined
receipt = None
while receipt is None:
time.sleep(1)
receipt = self.w3.eth.getTransactionReceipt(txn_hash)
return receipt
def sign_message(self, message, out_f):
return self.w3.eth.account.signHash(
defunct_hash_message(hexstr=self.w3.sha3(hexstr=message).hex()), self.private_key).signature
class TrezorIdentityProvider(IdentityProvider):
def __init__(self, w3, index):
self.w3 = w3
self.client = TrezorClient(HidTransport.enumerate()[0])
self.index = index
self.address = self.w3.toChecksumAddress(
"0x" + bytes(self.client.ethereum_get_address([44 + bip32utils.BIP32_HARDEN,
60 + bip32utils.BIP32_HARDEN,
bip32utils.BIP32_HARDEN, 0,
index])).hex())
def get_address(self):
return self.address
def transact(self, transaction, out_f):
print("Sending transaction to trezor for signature...\n", file=out_f)
signature = self.client.ethereum_sign_tx(n=[44 + bip32utils.BIP32_HARDEN, 60 + bip32utils.BIP32_HARDEN,
bip32utils.BIP32_HARDEN, 0, self.index],
nonce=transaction["nonce"],
gas_price=transaction["gasPrice"],
gas_limit=transaction["gas"],
to=bytearray.fromhex(transaction["to"][2:]),
value=transaction["value"],
data=bytearray.fromhex(transaction["data"][2:]))
transaction.pop("from")
unsigned_transaction = serializable_unsigned_transaction_from_dict(transaction)
raw_transaction = encode_transaction(unsigned_transaction,
vrs=(signature[0],
int(signature[1].hex(), 16),
int(signature[2].hex(), 16)))
print("Submitting transaction...\n", file=out_f)
txn_hash = self.w3.eth.sendRawTransaction(raw_transaction)
# Wait for transaction to be mined
receipt = None
while receipt is None:
time.sleep(1)
receipt = self.w3.eth.getTransactionReceipt(txn_hash)
return receipt
def sign_message(self, message, out_f):
n = self.client._convert_prime([44 + bip32utils.BIP32_HARDEN,
60 + bip32utils.BIP32_HARDEN,
bip32utils.BIP32_HARDEN,
0,
self.index])
print("Sending message to trezor for signature...\n", file=out_f)
return self.client.call(proto.EthereumSignMessage(address_n=n, message=self.w3.sha3(hexstr=message))).signature
def parse_bip32_path(path):
if len(path) == 0:
return b""
result = b""
elements = path.split('/')
for pathElement in elements:
element = pathElement.split('\'')
if len(element) == 1:
result = result + struct.pack(">I", int(element[0]))
else:
result = result + struct.pack(">I", 0x80000000 | int(element[0]))
return result
class LedgerIdentityProvider(IdentityProvider):
GET_ADDRESS_OP = b"\xe0\x02\x00\x00"
SIGN_TX_OP = b"\xe0\x04\x00\x00"
SIGN_MESSAGE_OP = b"\xe0\x08\x00\x00"
def __init__(self, w3, index):
self.w3 = w3
try:
self.dongle = getDongle(False)
except CommException:
raise RuntimeError("Received commException from ledger. Are you sure your device is plugged in?")
self.dongle_path = parse_bip32_path("44'/60'/0'/0/{}".format(index))
apdu = LedgerIdentityProvider.GET_ADDRESS_OP
apdu += bytearray([len(self.dongle_path) + 1, int(len(self.dongle_path) / 4)]) + self.dongle_path
try:
result = self.dongle.exchange(apdu)
except CommException:
raise RuntimeError("Received commException from ledger. Are you sure your device is unlocked and the "
"Ethereum app is running?")
offset = 1 + result[0]
self.address = self.w3.toChecksumAddress(bytes(result[offset + 1: offset + 1 + result[offset]])
.decode("utf-8"))
def get_address(self):
return self.address
def transact(self, transaction, out_f):
tx = UnsignedTransaction(
nonce=transaction["nonce"],
gasPrice=transaction["gasPrice"],
gas=transaction["gas"],
to=bytes(bytearray.fromhex(transaction["to"][2:])),
value=transaction["value"],
data=bytes(bytearray.fromhex(transaction["data"][2:]))
)
encoded_tx = rlp.encode(tx, UnsignedTransaction)
apdu = LedgerIdentityProvider.SIGN_TX_OP
apdu += bytearray([len(self.dongle_path) + 1 + len(encoded_tx), int(len(self.dongle_path) / 4)])
apdu += self.dongle_path + encoded_tx
try:
print("Sending transaction to ledger for signature...\n", file=out_f)
result = self.dongle.exchange(apdu)
except CommException:
raise RuntimeError("Received commException from ledger. Are you sure your device is unlocked and the "
"Ethereum app is running?")
transaction.pop("from")
unsigned_transaction = serializable_unsigned_transaction_from_dict(transaction)
raw_transaction = encode_transaction(unsigned_transaction,
vrs=(result[0],
int.from_bytes(result[1:33], byteorder="big"),
int.from_bytes(result[33:65], byteorder="big")))
print("Submitting transaction...\n", file=out_f)
txn_hash = self.w3.eth.sendRawTransaction(raw_transaction)
# Wait for transaction to be mined
receipt = None
while receipt is None:
time.sleep(1)
receipt = self.w3.eth.getTransactionReceipt(txn_hash)
return receipt
def sign_message(self, message, out_f):
message = self.w3.sha3(hexstr=message)
apdu = LedgerIdentityProvider.SIGN_MESSAGE_OP
apdu += bytearray([len(self.dongle_path) + 1 + len(message) + 4, int(len(self.dongle_path) / 4)])
apdu += self.dongle_path + struct.pack(">I", len(message)) + message
try:
print("Sending message to ledger for signature...\n", file=out_f)
result = self.dongle.exchange(apdu)
except CommException:
raise RuntimeError("Received commException from ledger. Are you sure your device is unlocked and the "
"Ethereum app is running?")
return result[1:] + result[0:1]
def get_kws_for_identity_type(identity_type):
SECRET = True
PLAINTEXT = False
if identity_type == "rpc":
return [("eth_rpc_endpoint", PLAINTEXT)]
elif identity_type == "mnemonic":
return [("mnemonic", SECRET)]
elif identity_type == "key":
return [("private_key", SECRET)]
elif identity_type == "trezor":
return []
elif identity_type == "ledger":
return []
else:
raise RuntimeError("unrecognized identity_type {}".format(identity_type))
def get_identity_types():
return ["rpc", "mnemonic", "key", "trezor", "ledger"]
|
from os import getenv
from typing import BinaryIO, Iterable, Optional, Union
import requests
from qiniu import Auth, put_data, put_file
class YPService:
"""云片"""
def __init__(self, api_key: str):
self.api_key = api_key
def single_send(self, mobile: str, text: str) -> dict:
"""单条发送,返回云片响应数据
Args:
mobile: 手机号码
text: 短信内容
"""
data = {
'apikey': self.api_key,
'mobile': mobile,
'text': text
}
resp = requests.post('https://sms.yunpian.com/v2/sms/single_send.json', data=data)
return resp.json()
def batch_send(self, mobiles: Iterable[str], text: str) -> dict:
"""批量发送,返回云片响应数据
Args:
mobiles: 手机号码列表
text: 短信内容
"""
data = {
'apikey': self.api_key,
'mobile': ','.join(mobiles),
'text': text
}
resp = requests.post('https://sms.yunpian.com/v2/sms/batch_send.json', data=data)
return resp.json()
class QNService:
"""七牛"""
def __init__(self, access_key: str, secret_key: str, bucket: str, domain: str):
self.auth = Auth(access_key, secret_key)
self.bucket = bucket
self.domain = domain
def gen_upload_token(self, key: str=None, expires: int=3600) -> str:
"""生成上传凭证
Args:
key: 上传的文件名
expires: 上传凭证的过期时间(秒)
"""
return self.auth.upload_token(self.bucket, key=key, expires=expires)
def upload_data(self, key: str, data: Union[bytes, BinaryIO]) -> Optional[str]:
"""上传二进制流,上传成功则返回URL
Args:
key: 上传的文件名
data: 上传的二进制流
"""
up_token = self.gen_upload_token(key)
ret, _ = put_data(up_token, key, data)
if ret and ret.get('key') == key:
url = 'http://{0}/{1}'.format(self.domain, key)
return url
def upload_file(self, key: str, file_path: str) -> Optional[str]:
"""上传文件,上传成功则返回URL
Args:
key: 上传的文件名
file_path: 上传文件的路径
"""
up_token = self.gen_upload_token(key)
ret, _ = put_file(up_token, key, file_path)
if ret and ret.get('key') == key:
url = 'http://{0}/{1}'.format(self.domain, key)
return url
yp_service = YPService(getenv('YP_API_KEY'))
qn_service = QNService(getenv('QN_ACCESS_KEY'), getenv('QN_SECRET_KEY'), getenv('QN_BUCKET'), getenv('QN_DOMAIN'))
|
user_input = str(input())
user_input = user_input.split()
n, m, a = [int(i) for i in user_input]
new_length = int(n / a)
new_width = int(m / a)
if n % a != 0:
new_length += 1
if m % a != 0:
new_width += 1
print(new_length * new_width)
|
import abc
import asyncio
import collections
import concurrent
import datetime
import logging
import time
import asab
from .abc.connection import Connection
from .abc.generator import Generator
from .abc.sink import Sink
from .abc.source import Source
from .analyzer import Analyzer
from .exception import ProcessingError
#
L = logging.getLogger(__name__)
#
class Pipeline(abc.ABC, asab.ConfigObject):
"""
Multiple sources
A pipeline can have multiple sources.
They are simply passed as an list of sources to a pipeline `build()` method.
.. code:: python
class MyPipeline(bspump.Pipeline):
def __init__(self, app, pipeline_id):
super().__init__(app, pipeline_id)
self.build(
[
MySource1(app, self),
MySource2(app, self),
MySource3(app, self),
]
bspump.common.NullSink(app, self),
)
"""
ConfigDefaults = {
"async_concurency_limit": 1000, # TODO concurrency
"reset_profiler": True,
}
def __init__(self, app, id=None, config=None):
_id = id if id is not None else self.__class__.__name__
super().__init__("pipeline:{}".format(_id), config=config)
self.Id = _id
self.App = app
self.Loop = app.Loop
self.AsyncFutures = []
self.AsyncConcurencyLimit = int(self.Config["async_concurency_limit"])
self.ResetProfiler = self.Config.getboolean("reset_profiler")
assert(self.AsyncConcurencyLimit > 1)
# This object serves to identify the throttler, because list cannot be used as a throttler
self.AsyncFuturesThrottler = object()
self.Sources = []
self.Processors = [[]] # List of lists of processors, the depth is increased by a Generator object
self._source_coros = [] # List of source main() coroutines
# Publish-Subscribe for this pipeline
self.PubSub = asab.PubSub(app)
self.MetricsService = app.get_service('asab.MetricsService')
self.MetricsCounter = self.MetricsService.create_counter(
"bspump.pipeline",
tags={'pipeline': self.Id},
init_values={
'event.in': 0,
'event.out': 0,
'event.drop': 0,
'warning': 0,
'error': 0,
}
)
self.MetricsGauge = self.MetricsService.create_gauge(
"bspump.pipeline.gauge",
tags={'pipeline': self.Id},
init_values={
'warning.ratio': 0.0,
'error.ratio': 0.0,
}
)
self.MetricsDutyCycle = self.MetricsService.create_duty_cycle(
self.Loop,
"bspump.pipeline.dutycycle",
tags={'pipeline': self.Id},
init_values={
'ready': False,
}
)
self.ProfilerCounter = {}
app.PubSub.subscribe(
"Application.Metrics.Flush!",
self._on_metrics_flush
)
# Pipeline logger
self.L = PipelineLogger(
"bspump.pipeline.{}".format(self.Id),
self.MetricsCounter
)
self.LastReadyStateSwitch = self.Loop.time()
self._error = None # None if not in error state otherwise there is a tuple (context, event, exc, timestamp)
self._throttles = set()
self._ready = asyncio.Event(loop=app.Loop)
self._ready.clear()
# Chillout is used to break a pipeline processing to smaller tasks that allows other event in event loop to be processed
self._chillout_trigger = 10000
self._chillout_counter = 0
self._context = {}
def time(self):
return self.App.time()
def _on_metrics_flush(self, event_type, metric, values):
if metric != self.MetricsCounter:
return
if values["event.in"] == 0:
self.MetricsGauge.set("warning.ratio", 0.0)
self.MetricsGauge.set("error.ratio", 0.0)
return
self.MetricsGauge.set("warning.ratio", values["warning"] / values["event.in"])
self.MetricsGauge.set("error.ratio", values["error"] / values["event.in"])
def is_error(self):
return self._error is not None
def set_error(self, context, event, exc):
'''
If called with `exc is None`, then reset error (aka recovery)
'''
if exc is None:
# Reset branch
if self._error is not None:
self._error = None
L.info("Error cleared at a pipeline '{}'".format(self.Id))
for source in self.Sources:
source.restart(self.Loop)
self.PubSub.publish("bspump.pipeline.clear_error!", pipeline=self)
self._evaluate_ready()
else:
if not self.catch_error(exc, event):
self.MetricsCounter.add('warning', 1)
self.PubSub.publish("bspump.pipeline.warning!", pipeline=self)
return
self.MetricsCounter.add('error', 1)
if (self._error is not None):
L.warning("Error on a pipeline is already set!")
self._error = (context, event, exc, self.App.time())
L.warning("Pipeline '{}' stopped due to a processing error: {} ({})".format(self.Id, exc, type(exc)))
self.PubSub.publish("bspump.pipeline.error!", pipeline=self)
self._evaluate_ready()
def catch_error(self, exception, event):
"""
Override to evaluate on the pipeline processing error.
Return True for hard errors (stop the pipeline processing) or False for soft errors that will be ignored
.. code:: python
class SampleInternalPipeline(bspump.Pipeline):
def __init__(self, app, pipeline_id):
super().__init__(app, pipeline_id)
self.build(
bspump.common.InternalSource(app, self),
bspump.common.JSONParserProcessor(app, self),
bspump.common.PPrintSink(app, self)
)
def catch_error(self, exception, event):
if isinstance(exception, json.decoder.JSONDecodeError):
return False
return True
"""
return True
def throttle(self, who, enable=True):
# L.debug("Pipeline '{}' throttle {} by {}".format(self.Id, "enabled" if enable else "disabled", who))
if enable:
self._throttles.add(who)
else:
self._throttles.remove(who)
self._evaluate_ready()
def _evaluate_ready(self):
orig_ready = self.is_ready()
# Do we observed an error?
new_ready = self._error is None
# Are we throttled?
if new_ready:
new_ready = len(self._throttles) == 0
if orig_ready != new_ready:
if new_ready:
self._ready.set()
self.PubSub.publish("bspump.pipeline.ready!", pipeline=self)
self.MetricsDutyCycle.set('ready', True)
else:
self._ready.clear()
self.PubSub.publish("bspump.pipeline.not_ready!", pipeline=self)
self.MetricsDutyCycle.set('ready', False)
async def ready(self):
"""
Can be used in source: `await self.Pipeline.ready()`
"""
self._chillout_counter += 1
if self._chillout_counter >= self._chillout_trigger:
self._chillout_counter = 0
await asyncio.sleep(0.0001, loop=self.Loop)
await self._ready.wait()
return True
def is_ready(self):
return self._ready.is_set()
def _do_process(self, event, depth, context):
for processor in self.Processors[depth]:
t0 = time.perf_counter()
try:
event = processor.process(context, event)
except BaseException as e:
if depth > 0:
raise # Handle error on the top depth
L.exception("Pipeline processing error in the '{}' on depth {}".format(self.Id, depth))
self.set_error(context, event, e)
raise
finally:
self.ProfilerCounter[processor.Id].add('duration', time.perf_counter() - t0)
self.ProfilerCounter[processor.Id].add('run', 1)
if event is None: # Event has been consumed on the way
if len(self.Processors) == (depth + 1):
if isinstance(processor, Sink):
self.MetricsCounter.add('event.out', 1)
else:
self.MetricsCounter.add('event.drop', 1)
return
assert(event is not None)
try:
raise ProcessingError("Incomplete pipeline, event '{}' is not consumed by a Sink".format(event))
except BaseException as e:
L.exception("Pipeline processing error in the '{}' on depth {}".format(self.__class__.__name__, depth))
self.set_error(context, event, e)
raise
async def inject(self, context, event, depth):
"""
Inject method serves to inject events into the pipeline's depth defined by the depth attribute.
Every depth is interconnected with a generator object.
For normal operations, it is highly recommended to use process method instead (see below).
:param context:
:param event:
:param depth:
:return:
"""
if context is None:
context = self._context.copy()
else:
context = context.copy()
context.update(self._context)
self._do_process(event, depth, context)
async def process(self, event, context=None):
"""
Process method serves to inject events into the pipeline's depth 0,
while incrementing the event.in metric.
This is recommended way of inserting events into a pipeline.
:param event:
:param context:
:return:
"""
while not self.is_ready():
await self.ready()
self.MetricsCounter.add('event.in', 1)
await self.inject(context, event, depth=0)
# Future methods
def ensure_future(self, coro):
"""
You can use this method to schedule a future task that will be executed in a context of the pipeline.
The pipeline also manages a whole lifecycle of the future/task, which means,
it will collect the future result, trash it, and mainly it will capture any possible exception,
which will then block the pipeline via set_error().
If the number of futures exceeds the configured limit, the pipeline is throttled.
:param coro:
:return:
"""
future = asyncio.ensure_future(coro, loop=self.Loop)
future.add_done_callback(self._future_done)
self.AsyncFutures.append(future)
# Throttle when the number of futures exceeds the max count
if len(self.AsyncFutures) == self.AsyncConcurencyLimit:
self.throttle(self.AsyncFuturesThrottler, True)
def _future_done(self, future):
"""
Removes future from the future list and disables throttling, if the number of
futures does not exceed the configured limit.
If there is an error while processing the future, it it set to the pipeline.
:param future:
:return:
"""
# Remove the throttle
if len(self.AsyncFutures) == self.AsyncConcurencyLimit:
self.throttle(self.AsyncFuturesThrottler, False)
self.AsyncFutures.remove(future)
exception = future.exception()
if exception is not None:
self.set_error(None, None, exception)
# Construction
def set_source(self, source):
if isinstance(source, Source):
self.Sources.append(source)
else:
self.Sources.extend(source)
def append_processor(self, processor):
# TODO: Check if possible: self.Processors[*][-1] is Sink, no processors after Sink, ...
# TODO: Check if fitting
self.Processors[-1].append(processor)
if isinstance(processor, Generator):
processor.set_depth(len(self.Processors) - 1)
self.Processors.append([])
def insert_before(self, id, processor):
"""
Insert the processor into a pipeline before another processor specified by id
:return: True on success. False otherwise (id not found)
"""
for processors in self.Processors:
for idx, _processor in enumerate(processors):
if _processor.Id == id:
processors.insert(idx, processor)
return True
return False
def insert_after(self, id, processor):
"""
Insert the processor into a pipeline after another processor specified by id
:return: True on success. False otherwise (id not found)
"""
for processors in self.Processors:
for idx, _processor in enumerate(processors):
if _processor.Id == id:
processors.insert(idx + 1, processor)
return True
return False
def build(self, source, *processors):
self.set_source(source)
for processor in processors:
self.append_processor(processor)
self.ProfilerCounter[processor.Id] = self.MetricsService.create_counter(
'bspump.pipeline.profiler',
tags={
'processor': processor.Id,
'pipeline': self.Id,
},
init_values={'duration': 0.0, 'run': 0},
reset=self.ResetProfiler,
)
if isinstance(processor, Analyzer):
self.ProfilerCounter['analyzer_' + processor.Id] = self.MetricsService.create_counter(
'bspump.pipeline.profiler',
tags={
'analyzer': processor.Id,
'pipeline': self.Id,
},
init_values={'duration': 0.0, 'run': 0},
reset=self.ResetProfiler,
)
def iter_processors(self):
"""
Iterate thru all processors.
"""
for processors in self.Processors:
for processor in processors:
yield processor
# Locate ...
def locate_source(self, address):
"""
Find a source by id.
"""
for source in self.Sources:
if source.Id == address:
return source
return None
def locate_connection(self, app, connection_id):
if isinstance(connection_id, Connection):
return connection_id
svc = app.get_service("bspump.PumpService")
connection = svc.locate_connection(connection_id)
if connection is None:
raise RuntimeError("Cannot locate connection '{}'".format(connection_id))
return connection
def locate_processor(self, processor_id):
"""
Find by a processor by id.
"""
for processor in self.iter_processors():
if processor.Id == processor_id:
return processor
# Lifecycle ...
def start(self):
self.PubSub.publish("bspump.pipeline.start!", pipeline=self)
# Start all non-started sources
for source in self.Sources:
source.start(self.Loop)
self._evaluate_ready()
async def stop(self):
# Stop all futures
while len(self.AsyncFutures) > 0:
# The futures are removed in _future_done
await asyncio.wait(
self.AsyncFutures,
loop=self.Loop,
return_when=concurrent.futures.ALL_COMPLETED
)
# Stop all started sources
for source in self.Sources:
await source.stop()
# Rest API
def rest_get(self):
rest = {
'Id': self.Id,
'Ready': self.is_ready(),
'Throttles': list(self._throttles),
'Sources': self.Sources,
'Processors': [],
'Metrics': self.MetricsService.MemstorTarget,
'Log': [record.__dict__ for record in self.L.Deque]
}
for l, processors in enumerate(self.Processors):
rest['Processors'].append(processors)
if self._error:
error_text = str(self._error[2]) # (context, event, exc, timestamp)[2]
error_time = self._error[3]
if len(error_text) == 0:
error_text = str(type(self._error[2]))
rest['Error'] = error_text
rest['ErrorTimestamp'] = error_time
return rest
###
class PipelineLogger(logging.Logger):
def __init__(self, name, metrics_counter, level=logging.NOTSET):
super().__init__(name, level=level)
self.Deque = collections.deque([], 50)
self._metrics_counter = metrics_counter
# TODO: configurable maxlen that is now 50 ^^
# TODO: configurable log level (per pipeline, from its config)
def handle(self, record):
# Count errors and warnings
if record.levelno == logging.WARNING:
self._metrics_counter.add("warning", 1)
elif record.levelno >= logging.ERROR:
self._metrics_counter.add("error", 1)
# Add formatted timestamp
record.timestamp = self._format_time(record)
# Add record
self.Deque.append(record)
def _format_time(self, record):
try:
ct = datetime.datetime.fromtimestamp(record.created)
return ct.isoformat()
except BaseException as e:
L.error("ERROR when logging: {}".format(e))
return str(record.created)
|
from django.db import models
class Articles(models.Model):
title = models.CharField(max_length = 120)
post = models.TextField()
date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
def __str__(self):
return self.title
|
from sys import platform
import getpass
from gyomu.user_windows import _WindowsUser
from gyomu.user import DummyUser, User
class UserFactory:
_current_user: 'User' = None
@staticmethod
def get_current_user() -> 'User':
if UserFactory._current_user is not None:
return UserFactory._current_user
uid = getpass.getuser()
if platform == "win32":
UserFactory._current_user = _WindowsUser(uid)
elif platform == "linux" or platform == "linux2":
return None
return UserFactory._current_user
@staticmethod
def get_user(user_id: str) -> 'User':
return DummyUser(user_id)
|
from django import template
import markdown
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='markdown')
def markdown_filter(text):
return mark_safe(markdown.markdown(text, extensions=['markdown.extensions.extra', 'markdown.extensions.codehilite',
'markdown.extensions.toc', ], safe_mode=True,
enable_attributes=False))
|
from django import template
register = template.Library()
@register.simple_tag
def dfirtrack_version():
versionnumber = 'v0.2.0'
return versionnumber
|
import json
import math
import time
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.db import connection
from pretix.base.models import Event, Invoice, Order, OrderPosition, Organizer
from pretix.celery_app import app
if settings.HAS_REDIS:
import django_redis
redis = django_redis.get_redis_connection("redis")
REDIS_KEY = "pretix_metrics"
_INF = float("inf")
_MINUS_INF = float("-inf")
def _float_to_go_string(d):
# inspired by https://github.com/prometheus/client_python/blob/master/prometheus_client/core.py
if d == _INF:
return '+Inf'
elif d == _MINUS_INF:
return '-Inf'
elif math.isnan(d):
return 'NaN'
else:
return repr(float(d))
class Metric(object):
"""
Base Metrics Object
"""
def __init__(self, name, helpstring, labelnames=None):
self.name = name
self.helpstring = helpstring
self.labelnames = labelnames or []
def __repr__(self):
return self.name + "{" + ",".join(self.labelnames) + "}"
def _check_label_consistency(self, labels):
"""
Checks if the given labels provides exactly the labels that are required.
"""
# test if every required label is provided
for labelname in self.labelnames:
if labelname not in labels:
raise ValueError("Label {0} not specified.".format(labelname))
# now test if no further labels are required
if len(labels) != len(self.labelnames):
raise ValueError("Unknown labels used: {}".format(", ".join(set(labels) - set(self.labelnames))))
def _construct_metric_identifier(self, metricname, labels=None, labelnames=None):
"""
Constructs the scrapable metricname usable in the output format.
"""
if not labels:
return metricname
else:
named_labels = []
for labelname in (labelnames or self.labelnames):
named_labels.append('{}="{}"'.format(labelname, labels[labelname]))
return metricname + "{" + ",".join(named_labels) + "}"
def _inc_in_redis(self, key, amount, pipeline=None):
"""
Increments given key in Redis.
"""
if settings.HAS_REDIS:
if not pipeline:
pipeline = redis
pipeline.hincrbyfloat(REDIS_KEY, key, amount)
def _set_in_redis(self, key, value, pipeline=None):
"""
Sets given key in Redis.
"""
if settings.HAS_REDIS:
if not pipeline:
pipeline = redis
pipeline.hset(REDIS_KEY, key, value)
def _get_redis_pipeline(self):
if settings.HAS_REDIS:
return redis.pipeline()
def _execute_redis_pipeline(self, pipeline):
if settings.HAS_REDIS:
return pipeline.execute()
class Counter(Metric):
"""
Counter Metric Object
Counters can only be increased, they can neither be set to a specific value
nor decreased.
"""
def inc(self, amount=1, **kwargs):
"""
Increments Counter by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Counter cannot be increased by negative values.")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount)
class Gauge(Metric):
"""
Gauge Metric Object
Gauges can be set to a specific value, increased and decreased.
"""
def set(self, value=1, **kwargs):
"""
Sets Gauge to a specific value for the labels specified in kwargs.
"""
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._set_in_redis(fullmetric, value)
def inc(self, amount=1, **kwargs):
"""
Increments Gauge by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use dec().")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount)
def dec(self, amount=1, **kwargs):
"""
Decrements Gauge by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use inc().")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount * -1)
class Histogram(Metric):
"""
Histogram Metric Object
"""
def __init__(self, name, helpstring, labelnames=None,
buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 30.0, _INF)):
if list(buckets) != sorted(buckets):
# This is probably an error on the part of the user,
# so raise rather than sorting for them.
raise ValueError('Buckets not in sorted order')
if buckets and buckets[-1] != _INF:
buckets.append(_INF)
if len(buckets) < 2:
raise ValueError('Must have at least two buckets')
self.buckets = buckets
super().__init__(name, helpstring, labelnames)
def observe(self, amount, **kwargs):
"""
Stores a value in the histogram for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use inc().")
self._check_label_consistency(kwargs)
pipe = self._get_redis_pipeline()
countmetric = self._construct_metric_identifier(self.name + '_count', kwargs)
self._inc_in_redis(countmetric, 1, pipeline=pipe)
summetric = self._construct_metric_identifier(self.name + '_sum', kwargs)
self._inc_in_redis(summetric, amount, pipeline=pipe)
kwargs_le = dict(kwargs.items())
for i, bound in enumerate(self.buckets):
if amount <= bound:
kwargs_le['le'] = _float_to_go_string(bound)
bmetric = self._construct_metric_identifier(self.name + '_bucket', kwargs_le,
labelnames=self.labelnames + ["le"])
self._inc_in_redis(bmetric, 1, pipeline=pipe)
self._execute_redis_pipeline(pipe)
def estimate_count_fast(type):
"""
See https://wiki.postgresql.org/wiki/Count_estimate
"""
if 'postgres' in settings.DATABASES['default']['ENGINE']:
cursor = connection.cursor()
cursor.execute("select reltuples from pg_class where relname='%s';" % type._meta.db_table)
row = cursor.fetchone()
return int(row[0])
else:
return type.objects.count()
def metric_values():
"""
Produces the the values to be presented to the monitoring system
"""
metrics = defaultdict(dict)
# Metrics from redis
if settings.HAS_REDIS:
for key, value in redis.hscan_iter(REDIS_KEY):
dkey = key.decode("utf-8")
splitted = dkey.split("{", 2)
value = float(value.decode("utf-8"))
metrics[splitted[0]]["{" + splitted[1]] = value
# Aliases
aliases = {
'pretix_view_requests_total': 'pretix_view_duration_seconds_count'
}
for a, atarget in aliases.items():
metrics[a] = metrics[atarget]
# Throwaway metrics
exact_tables = [
Order, OrderPosition, Invoice, Event, Organizer
]
for m in apps.get_models(): # Count all models
if any(issubclass(m, p) for p in exact_tables):
metrics['pretix_model_instances']['{model="%s"}' % m._meta] = m.objects.count()
else:
metrics['pretix_model_instances']['{model="%s"}' % m._meta] = estimate_count_fast(m)
if settings.HAS_CELERY:
client = app.broker_connection().channel().client
for q in settings.CELERY_TASK_QUEUES:
llen = client.llen(q.name)
lfirst = client.lindex(q.name, -1)
metrics['pretix_celery_tasks_queued_count']['{queue="%s"}' % q.name] = llen
if lfirst:
ldata = json.loads(lfirst)
dt = time.time() - ldata.get('created', 0)
metrics['pretix_celery_tasks_queued_age_seconds']['{queue="%s"}' % q.name] = dt
else:
metrics['pretix_celery_tasks_queued_age_seconds']['{queue="%s"}' % q.name] = 0
return metrics
"""
Provided metrics
"""
pretix_view_duration_seconds = Histogram("pretix_view_duration_seconds", "Return time of views.",
["status_code", "method", "url_name"])
pretix_task_runs_total = Counter("pretix_task_runs_total", "Total calls to a celery task",
["task_name", "status"])
pretix_task_duration_seconds = Histogram("pretix_task_duration_seconds", "Call time of a celery task",
["task_name"])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 13:40:32 2020
@author: frederik
"""
import cv2
locator = 0
for x in range(255):
cap = cv2.VideoCapture(locator)
if (cap.isOpened() is True):
print("Found camera %s", locator)
locator += 1
if (cap.isOpened() is False):
print("Did not find camera %s", locator)
locator += 1
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
cap.release()
cv2.destroyAllWindows() |
# __author__ = 'WeiFu'
from __future__ import print_function, division
import sys, pdb, random
from ruler import *
from Abcd import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from tuner import *
from processarff import *
from sk import rdivDemo
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.classifiers import Classifier
from scipy.integrate import trapz
import subprocess
@setting
def cart(**d):
"""
this is for tuning cart.
"""
return o(max_features=None, max_depth=None, min_samples_split=2, min_samples_leaf=1).update(**d)
@setting
def cppWHICH(**d):
"""
this is for tuning cppwhch
"""
return o(alpha=1, beta=1, gamma=0, bins=2, improvements=0.2
).update(**d)
def readcsv(f="./data/ant/ant-1.7copy.csv"):
ff = open(f, "r")
# content = ff.readline().split("\r")
content = ff.readlines()
n = content[0].split(",")
d = [map(float, row.split(",")) for kk, row in enumerate(content[1:])]
return data(names=n, data=d)
def _range():
LIB(seed=1)
RULER(tiny=4)
def _ranges():
for z in ranges(csv()): print(z)
run(_ranges)
def _Abcd(predicted, actual):
abcd = Abcd(db='Traing', rx='Testing')
for act, pre in zip(actual, predicted):
abcd.tell(act, pre)
abcd.header()
score = abcd.ask()
def XY(t, mydata, flag=False):
'''generate X, Y coordinates for plotting'''
if len(mydata) == 0: return [np.array([]), np.array([])]
data = sorted(mydata, key=lambda z: z[the.DATA.loc], reverse=flag)
Loc, TP = 0, 0
xx, pd = [], []
for d in data:
if d.cells[-1] == 1:
TP += d.cells[-1]
Loc += d.cells[the.DATA.loc]
xx += [100 * Loc / t.total[the.DATA.loc]]
pd += [100 * TP / (t.defective + 0.00001)]
x = np.array(xx)
pd = np.array(pd)
return [x, pd]
def manual(t, up=False):
"""
false : ascending order ==> UP method
true : descending order ==> Down method
"""
# data = sorted(t.data, key=lambda z: z[the.DATA.loc], reverse=up)
return XY(t, t.data, up)
def gbest(t):
'''the best method which has highest score'''
mydata = [d for d in t.data if d[-1] == 1]
# data = sorted(data, key=lambda z: z[the.DATA.loc])
return XY(t, mydata)
def sklearn_data(train, test):
train_x = [t.cells[:-1] for t in train.data]
train_y = [(t.cells[-1]) for t in train.data]
test_x = [t.cells[:-1] for t in test.data]
test_y = [(t.cells[-1]) for t in test.data]
return [train_x, train_y, test_x, test_y]
def cart(train, test, tuning=True):
data = sklearn_data(train, test)
clf = DecisionTreeRegressor(random_state=1, max_features=the.cart.max_features, max_depth=the.cart.max_depth,
min_samples_split=the.cart.min_samples_split,
min_samples_leaf=the.cart.min_samples_leaf).fit(data[0], data[1])
if not tuning: # default cart
clf = DecisionTreeRegressor(random_state=1).fit(data[0], data[1])
predictresult = [i for i in clf.predict(data[2])] # to change the format from ndarray to list
predicted = [test.data[j] for j, p in enumerate(predictresult) if
p == 1] # all these data are predicted to be defective
return XY(predicted)
def C45(train, test):
return wekaCALL(train, test, "weka.classifiers.trees.J48")
def RIPPER(train, test):
return wekaCALL(train, test, "weka.classifiers.rules.JRip")
def NaiveBayes(train, test):
return wekaCALL(train, test, "weka.classifiers.bayes.NaiveBayes")
def wekaCALL(train, test, learner):
if not jvm.started: jvm.start()
loader = Loader(classname="weka.core.converters.ArffLoader")
train_data = loader.load_file(train)
test_data = loader.load_file(test)
train_data.class_is_last()
test_data.class_is_last()
cls = Classifier(classname=learner)
cls.build_classifier(train_data)
predicted, name = [], []
has_defects = False
for index, inst in enumerate(test_data):
pred = cls.classify_instance(inst)
if inst.values[-1] == 1: has_defects = True
if pred != 0:
predicted += [
[inst.values[i] for i in range(inst.num_attributes)]] # this API changes "false" to 0, and "true" to 1
name += ["0"] # this is a fake name for each column, which is made to use data() function in readdata.
if has_defects and len(predicted) == 0: return [np.array([]), np.array([])]
ss = data(names=name, data=predicted)
return XY(ss, ss.data)
def cppWhich(arfftrain, arfftest, options=None):
cmd = ["././which", "-t", arfftrain, "-T", arfftest, "-score", "effort"]
if options:
temp = options.split(" ")
cmd.extend(temp)
try:
printout = subprocess.check_output(cmd)
x = map(float, printout.split("\n")[0].split(" ")[:-1]) # this line is X
pd = map(float, printout.split("\n")[1].split(" ")[:-1]) # this line is pd, last element is null, ignored.
return [np.array(x), np.array(pd)]
except: # for some parameters, the cpp version can't return a valid results, showing returned exit status -8
return [np.array([]), np.array([])]
# p = subprocess.Popen(cmd,stdout = subprocess.PIPE)
# printout = p.communicate()[0]
def tunedwhich(arfftrain, arfftune, arfftest, csvtune):
tunner = WHICHCPP(arfftrain, arfftune, csvtune)
tunner.DE()
para = "-bins " + str(the.cppWHICH.bins) + " -alpha " + str(the.cppWHICH.alpha) + " -beta " + str(
the.cppWHICH.beta) + " -gamma " + str(the.cppWHICH.gamma) + " -imp " +str(the.cppWHICH.improvements)
print(para)
return cppWhich(arfftrain, arfftest, para)
def plot(result):
# color = ['r-','k-','b-','b^','g-','y-','c-','m-']
# labels = ['WHICH','Tuned_WHICH','manualUp','manualDown','minimum','best','Tuned_CART','CART']
color = ['r-', 'k-', 'b-', 'g-', 'y-', 'c-', 'm-']
labels = ['WHICH', 'manualUp', 'manualDown', 'minimum', 'best', 'CART', 'C4.5']
plt.figure(1)
for j, x in enumerate(result):
plt.plot(x[0], x[1], color[j], label=labels[j])
plt.xlabel("Effort(% LOC inspected)")
plt.ylabel("PD(% probability of detection)")
plt.title("Effort-vs-PD")
plt.ylim(0, 100)
plt.legend(loc='best')
plt.show()
def _rule(train):
LIB(seed=1)
# RULER(tiny=4,better=gt) initialize
# print(train.score, "baseline :",len(train.data))
for z in ruler(train):
print(z.score, z)
try:
best = ruler(train)[0]
except IndexError, e:
return None
return best
def postCalculation(result):
areaLst = []
for data in result:
if data == None:
continue # ignore the none.
areaLst += [area(data)]
return percentage(areaLst)
def preSK(stats):
names = ["manualUp", "manualDown", "C4.5", "RIPPER", "NaiveBayes", "MICRO-20", "WHICH-2", "WHICH-4", "WHICH-8",
"WHICH-Tuned"]
out = []
for key, value in stats.iteritems():
ordered = sorted(value)
ordered.insert(0, names[key])
out += [ordered]
return out
def area(result):
X = result[0]
Y = result[1]
if len(X) == 0 or len(Y) == 0: return 0
if 100 not in X:
X = np.append(X, [100]) # if this curve does not reach top right, we need to add it
Y = np.append(Y, Y[-1]) # just repeat last value in Y
return trapz(Y, X)
def percentage(lst): # lst[0] is the best which is the base.
val = []
if lst[0] == 0 or len(lst) == 0: return val # return empty list
for i in range(1, len(lst)):
val += [lst[i] / lst[0]]
return val
def crossEval(repeats=10, folds=3, src="../DATASET"):
def process(result):
mypercentage = postCalculation(result)
if len(mypercentage) == 0: return # this is the case, where the best is 0
if first_Time: # initialize: for each data set, stats contains all the results of methods for that data set.
for t, each in enumerate(mypercentage):
stats[t] = stats.get(t, []) + [each]
combine[j] = [stats]
else:
for t, each in enumerate(mypercentage):
combine[j][0][t] = combine.get(j)[0][t] + [each]
def learner(csvtest, csvtrain, csvtune, arfftest, arfftrain, arfftune):
result = [] # keep all learners' results for one evaluation
result += [gbest(csvtest)]
result += [manual(csvtest, False)] # up : ascending order
result += [manual(csvtest, True)] # down: descending order
# result += [cart(csvtrain, csvtest, False)] # default cart
result += [C45(arfftrain, arfftest)]
result += [RIPPER(arfftrain, arfftest)]
result += [NaiveBayes(arfftrain, arfftest)]
for para in which_settings:
result += [cppWhich(arfftrain, arfftest, para)]
result += [tunedwhich(arfftrain, arfftune, arfftest, csvtune)]
return result
combine = {}
first_Time = True
files_name = ["ar3", "ar4", "ar5", "cm1", "kc1", "kc2", "kc3", "wm1", "pc"]
which_settings = ["-micro 20 -bins 2", "-bins 2", "-bins 4", "-bins 8"]
# cmd for micro-20, which-2, which-4, which-8
for k in range(repeats):
All(src, folds) # prepare 3 cross-way evaluation data sets
datasets = [join(src, f) for f in listdir(src) if not isfile(join(src, f)) and ".git" not in f and ".idea" not in f]
for j in range(len(datasets)):
stats = {} # keep all learners' results for a complete 3 cross evaluation for one data set.
for i in range(folds):
csvtrain = readcsv(datasets[j] + '/csv/train' + str(i) + '.csv')
csvtest = readcsv(datasets[j] + '/csv/test' + str(i) + '.csv')
csvtune = readcsv(datasets[j] + '/csv/tune' + str(i) + '.csv')
arfftrain = datasets[j] + '/arff/train' + str(i) + '.arff'
arfftest = datasets[j] + '/arff/test' + str(i) + '.arff'
arfftune = datasets[j] + '/arff/tune' + str(i) + '.arff'
process(learner(csvtest, csvtrain, csvtune, arfftest, arfftrain, arfftune)) # calculate percentage and others.
first_Time = False
for key, stats in combine.iteritems(): # print results for each data set
print("*" * 15 + files_name[key] + "*" * 15)
out = preSK(stats[0])
rdivDemo(out)
print("DONE!")
def subprocesstest(options=""):
arfftrain = "/Users/WeiFu/Github/WHICH/CppVersion1.0/cpp/cm1Train.arff"
arfftest = "/Users/WeiFu/Github/WHICH/CppVersion1.0/cpp/cm1Train.arff"
printout = subprocess.check_output(["././which", "-t", arfftrain, "-T", arfftest, "-score", "effort"])
x = map(float, printout.split("\n")[0].split(" ")[:-1]) # this line is X
pd = map(float, printout.split("\n")[1].split(" ")[:-1]) # this line is pd, last element is null, ignored.
Q = [np.array(x), np.array(pd)]
print(printout)
pdb.set_trace()
if __name__ == "__main__":
# run(main())
crossEval()
# subprocesstest() |
"""
=============================
gprof_nn.bin.run_preprocessor
=============================
This module implements a CLI interface to run the preprocessor on a
range of L1C files.
"""
import argparse
import logging
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from quantnn.qrnn import QRNN
from quantnn.normalizer import Normalizer
from rich.progress import track
import gprof_nn.logging
from gprof_nn.retrieval import RetrievalDriver, RetrievalGradientDriver
from gprof_nn import sensors
from gprof_nn import statistics
LOGGER = logging.getLogger(__name__)
def add_parser(subparsers):
"""
Add parser for 'run_preprocessor' command to top-level parser. This
function should be called from the top-level parser defined in
'gprof_nn.bin'.
Args:
subparsers: The subparsers object provided by the top-level parser.
"""
parser = subparsers.add_parser(
"run_preprocessor",
description=(
"""
Run preprocessor on L1C file and store results as NetCDF dataset.
"""
),
)
parser.add_argument(
"sensor",
metavar="sensor",
type=str,
help="The sensor corresponding to the data.",
)
parser.add_argument(
"configuration",
metavar="sensor",
type=str,
help="The sensor corresponding to the data.",
)
parser.add_argument(
"input",
metavar="input",
type=str,
help="The path to the directory tree containing" "the input data.",
nargs="*",
)
parser.add_argument(
"output",
metavar="output",
type=str,
help="Path to the folder to which to write the " "results.",
)
parser.add_argument(
"--n_processes",
metavar="n",
type=int,
default=4,
help="The number of processes to use for the processing.",
)
parser.set_defaults(func=run)
def process_file(input_file, sensor, configuration, output, log_queue):
"""
Run preprocessor on given input file and store results as
NetCDF files in a given output directory.
Args:
input_file: The path pointing to the input file.
sensor: Sensor object representing the sensor for which
to run the preprocessor.
configuration: Which configuration of the preprocessor to
run.
destination: The folder to which to store the results using
the name of the input file with the suffix replaced by
'.nc'.
log_queue: Queue to handle the logging from sub processes.
"""
from gprof_nn.data.preprocessor import run_preprocessor
input_file = Path(input_file)
output = Path(output)
if not output.parent.exists():
output.parent.mkdir(parents=True, exist_ok=True)
# If output file ends in .pp, don't convert to NetCDF.
if output.suffix == ".pp":
run_preprocessor(
input_file, sensor, configuration=configuration, output_file=output
)
return None
# Else store output in NetCDF format.
data = run_preprocessor(input_file, sensor, configuration=configuration)
data.to_netcdf(output)
def run(args):
"""
Run preprocessor.
Args:
args: The namespace object provided by the top-level parser.
"""
#
# Check and load inputs.
#
sensor = getattr(sensors, args.sensor.upper(), None)
if sensor is None:
LOGGER.error("Sensor '%s' is not supported.", args.sensor)
return 1
configuration = args.configuration.upper()
if not configuration in ["ERA5", "GANAL"]:
LOGGER.error("Configuration must be one of 'ERA5' or 'GANAL'.")
return 1
inputs = [Path(f) for f in args.input]
for path in inputs:
if not path.exists():
LOGGER.error("The given input path '%s' doesn't exist", path)
return 1
output = Path(args.output)
if output.suffix == "" and not output.exists():
LOGGER.error("The given output path '%s' doesn't exist", output)
return 1
n_procs = args.n_processes
pool = ProcessPoolExecutor(max_workers=n_procs)
input_files = []
output_files = []
for path in inputs:
if path.is_dir():
files = list(path.glob("**/*.HDF5"))
input_files += files
output_files += [
output / (str(f.relative_to(path))[:-4] + "nc") for f in files
]
else:
input_files.append(path)
if output.is_dir():
output_files.append(output / (path.stem + ".nc"))
else:
output_files.append(output)
tasks = []
log_queue = gprof_nn.logging.get_log_queue()
for f, o in zip(input_files, output_files):
tasks.append(pool.submit(process_file, f, sensor, configuration, o, log_queue))
for f, t in track(
list(zip(input_files, tasks)),
description="Processing files:",
console=gprof_nn.logging.get_console(),
):
gprof_nn.logging.log_messages()
try:
t.result()
except Exception as e:
LOGGER.error(
"Processing of file '%s' failed with the following" "exception: %s",
f,
e,
)
|
"""
Data Import | Cannlytics
Authors:
Keegan Skeate <[email protected]>
Created: 6/16/2021
Updated: 6/16/2021
TODO:
- Import model fields from organization's settings in Firestore so
the user can upload custom data points.
"""
import pandas as pd
from cannlytics.firebase import update_document
def import_analyses(directory):
"""Import analyses to Firestore from a .csv or .xlsx file.
Args:
filename (str): The full filename of a data file.
"""
analyses = pd.read_excel(directory + 'analyses.xlsx')
analytes = pd.read_excel(directory + 'analytes.xlsx')
for index, analysis in analyses.iterrows():
analyte_data = []
analyte_names = analysis.analyte_keys.split(', ')
for analyte_key in analyte_names:
analyte_item = analytes.loc[analytes.key == analyte_key]
analyte_data.append(analyte_item.to_dict(orient='records'))
analyses.at[index, 'analytes'] = analyte_data
analyses_data = analyses.to_dict(orient='records')
for index, values in analyses_data.iterrows():
doc_id = str(values.key)
doc_data = values.to_dict()
update_document(, doc_data)
# doc_data = data.to_dict(orient='index')
# data_ref = create_reference(db, ref)
# data_ref.document(doc_id).set(doc_data, merge=True)
# data_ref.set(doc_data, merge=True)
return NotImplementedError
def import_analytes():
"""Import analytes to Firestore from a .csv or .xlsx file.
Args:
filename (str): The full filename of a data file.
"""
return NotImplementedError
def import_instruments():
"""Import instruments to Firestore from a .csv or .xlsx file.
Args:
filename (str): The full filename of a data file.
"""
return NotImplementedError
|
import librosa
import constants
def custom_mfcc(data):
fs= constants.fs
mfcc_data = librosa.feature.mfcc(y=data, sr=fs, n_mfcc=12, n_fft=len(data), hop_length=len(data)+2)[:, 0]
centroid = librosa.feature.spectral_centroid(y=data, sr=fs, n_fft=len(data), hop_length=len(data)+2)[:, 0]
return mfcc_data, centroid[0] |
# Author: Stephen Mugisha
# FSM exceptions
class InitializationError(Exception):
"""
State Machine InitializationError
exception raised.
"""
def __init__(self, message, payload=None):
self.message = message
self.payload = payload #more exception args
def __str__(self):
return str(self.message)
|
import torch
import wandb
from torch.utils.data import DataLoader
from src.utils import save_models, saving_path_rrn, get_answer, names_models
from src.utils import BabiDataset, batchify, get_answer_separately
from collections import defaultdict
REASONING_STEPS = 3
def train(train_stories, validation_stories, epochs, lstm, rrn, criterion, optimizer, batch_size, no_save, device, result_folder):
train_babi_dataset = BabiDataset(train_stories)
best_acc = 0.
val_accuracies = []
val_losses = []
avg_train_accuracies = []
avg_train_losses = []
for epoch in range(1,epochs+1):
train_accuracies = []
train_losses = []
train_dataset = DataLoader(train_babi_dataset, batch_size=batch_size, shuffle=True, collate_fn=batchify, drop_last=True)
rrn.train()
lstm.train()
for batch_id, (question_batch,answer_batch,facts_batch,_,_) in enumerate(train_dataset):
if (batch_id+1) % 5000 == 0:
print("Batch ", batch_id, "/", len(train_dataset), " - epoch ", epoch, ".")
question_batch,answer_batch,facts_batch = question_batch.to(device), \
answer_batch.to(device), \
facts_batch.to(device)
lstm.zero_grad()
rrn.zero_grad()
h_q = lstm.reset_hidden_state_query()
h_f = lstm.reset_hidden_state_fact(facts_batch.size(0))
question_emb, h_q = lstm.process_query(question_batch, h_q)
facts_emb, one_of_k, h_f = lstm.process_facts_rrn(facts_batch, h_f)
input_mlp = torch.cat( (facts_emb, question_emb.unsqueeze(1).repeat(1,facts_emb.size(1),1), one_of_k), dim=2)
final_input = rrn.process_input(input_mlp)
correct_rr = 0.
loss_rr = 0.
loss = 0.
h = rrn.reset_g(final_input.size(0)*final_input.size(1))
for reasoning_step in range(REASONING_STEPS):
rr, hidden, h = rrn(final_input, facts_emb , h, question_emb)
loss += criterion(rr, answer_batch)
with torch.no_grad():
correct, _ = get_answer(rr, answer_batch)
correct_rr += correct
loss_rr += loss.item()
loss.backward()
optimizer.step()
train_accuracies.append(correct_rr / float(REASONING_STEPS))
train_losses.append(loss_rr / float(REASONING_STEPS))
avg_train_losses.append(sum(train_losses)/len(train_losses))
avg_train_accuracies.append(sum(train_accuracies)/len(train_accuracies))
val_loss, val_accuracy = test(validation_stories,lstm,rrn,criterion, device, batch_size)
val_accuracies.append(val_accuracy)
val_losses.append(val_loss)
if not no_save:
if val_accuracies[-1] > best_acc:
save_models([(lstm, names_models[0]), (rrn, names_models[2])], result_folder, saving_path_rrn)
best_acc = val_accuracies[-1]
print("Train loss: ", avg_train_losses[-1], ". Validation loss: ", val_losses[-1])
print("Train accuracy: ", avg_train_accuracies[-1], ". Validation accuracy: ", val_accuracies[-1])
print()
wandb.log({
'epoch': epoch,
'train_loss': avg_train_losses[-1],
'train_accuracy': avg_train_accuracies[-1],
'val_loss': val_loss,
'val_accuracy': val_accuracy
})
return avg_train_losses, avg_train_accuracies, val_losses, val_accuracies
def test(stories, lstm, rrn, criterion, device, batch_size):
lstm.eval()
rrn.eval()
with torch.no_grad():
test_babi_dataset = BabiDataset(stories)
test_accuracy = 0.
test_loss = 0.
test_dataset = DataLoader(test_babi_dataset, batch_size=batch_size, shuffle=False, collate_fn=batchify, drop_last=True)
for batch_id, (question_batch,answer_batch,facts_batch,_,_) in enumerate(test_dataset):
if (batch_id+1) % 5000 == 0:
print("Test Batch ", batch_id, "/", len(test_dataset))
question_batch,answer_batch,facts_batch = question_batch.to(device), \
answer_batch.to(device), \
facts_batch.to(device)
h_q = lstm.reset_hidden_state_query()
h_f = lstm.reset_hidden_state_fact(facts_batch.size(0))
question_emb, h_q = lstm.process_query(question_batch, h_q)
facts_emb, one_of_k, h_f = lstm.process_facts_rrn(facts_batch, h_f)
input_mlp = torch.cat( (facts_emb, question_emb.unsqueeze(1).repeat(1,facts_emb.size(1),1), one_of_k), dim=2)
final_input = rrn.process_input(input_mlp)
loss = 0.
h = rrn.reset_g(final_input.size(0)*final_input.size(1))
for reasoning_step in range(REASONING_STEPS):
rr, hidden, h = rrn(final_input, facts_emb , h, question_emb)
if reasoning_step == REASONING_STEPS-1:
loss = criterion(rr, answer_batch).item()
correct, _ = get_answer(rr, answer_batch)
test_accuracy += correct
test_loss += loss
return test_loss / float(len(test_dataset)), test_accuracy / float(len(test_dataset))
def test_separately(stories, lstm, rrn, device, batch_size):
lstm.eval()
rrn.eval()
with torch.no_grad():
accuracies = defaultdict(list)
test_babi_dataset = BabiDataset(stories)
test_dataset = DataLoader(test_babi_dataset, batch_size=batch_size, shuffle=False, collate_fn=batchify, drop_last=True)
for batch_id, (question_batch,answer_batch,facts_batch,task_label,_) in enumerate(test_dataset):
if (batch_id+1) % 5000 == 0:
print("Test Batch ", batch_id, "/", len(test_dataset))
question_batch,answer_batch,facts_batch, task_label = question_batch.to(device), \
answer_batch.to(device), \
facts_batch.to(device), \
task_label.tolist()
h_q = lstm.reset_hidden_state_query()
h_f = lstm.reset_hidden_state_fact(facts_batch.size(0))
question_emb, h_q = lstm.process_query(question_batch, h_q)
facts_emb, one_of_k, h_f = lstm.process_facts_rrn(facts_batch, h_f)
input_mlp = torch.cat( (facts_emb, question_emb.unsqueeze(1).repeat(1,facts_emb.size(1),1), one_of_k), dim=2)
final_input = rrn.process_input(input_mlp)
h = rrn.reset_g(final_input.size(0)*final_input.size(1))
for reasoning_step in range(REASONING_STEPS):
rr, hidden, h = rrn(final_input, facts_emb , h, question_emb)
if reasoning_step == REASONING_STEPS -1:
corrects = get_answer_separately(rr, answer_batch)
for el, correct in zip(task_label, corrects):
accuracies[el].append(1.) if correct else accuracies[el].append(0.)
f = lambda x: sum(x) / float(len(x)) # get mean over each list values of dictionary
avg_test_acc = {k: f(v) for k,v in accuracies.items()}
return avg_test_acc
|
import json
import os
import pickle
import pandas as pd
import sqlalchemy
class StringFolder(object):
"""
Class that will fold strings. See 'fold_string'.
This object may be safely deleted or go out of scope when
strings have been folded.
"""
def __init__(self):
self.unicode_map = {}
def fold_string(self, s):
"""
Given a string (or unicode) parameter s, return a string object
that has the same value as s (and may be s). For all objects
with a given value, the same object will be returned. For unicode
objects that can be coerced to a string with the same value, a
string object will be returned.
If s is not a string or unicode object, it is returned unchanged.
:param s: a string or unicode object.
:return: a string or unicode object.
"""
# If s is not a string or unicode object, return it unchanged
if not isinstance(s, str):
return s
# If s is already a string, then str() has no effect.
# If s is Unicode, try and encode as a string and use intern.
# If s is Unicode and can't be encoded as a string, this try
# will raise a UnicodeEncodeError.
try:
return str(s)
except UnicodeEncodeError:
# Fall through and handle s as Unicode
pass
# Look up the unicode value in the map and return
# the object from the map. If there is no matching entry,
# store this unicode object in the map and return it.
t = self.unicode_map.get(s, None)
if t is None:
# Put s in the map
t = self.unicode_map[s] = s
return t
def string_folding_wrapper(results):
keys = results.keys()
folder = StringFolder()
for row in results:
yield tuple(folder.fold_string(row[key]) for key in keys)
class DBConnection:
"""
Class that create connection to the DB
"""
def __init__(self, cred_location):
self.cred_location = cred_location
self.engine = self.create_engine()
def create_engine(self):
with open(self.cred_location) as fh:
creds = json.loads(fh.read())
db_connection = 'postgresql://' + \
creds['user_name'] + ':' + creds['password'] + '@' + \
creds['host_name'] + ':' + creds['port_num'] + '/' + creds['db_name']
engine = sqlalchemy.create_engine(db_connection)
return engine
def sql_query_to_data_frame(self, query, cust_id):
"""return a dataframe based ona query
query: SQL query as a string, without a ; in the end
cust_id: boolean,
True means the dataframe contains the customer_id and
customer_nr too, and code drops the customer_id to save
memory.
False: it does not drop customer_id
"""
query = query.replace("%", "%%")
# connection is closed exiting the with
with self.engine.begin() as connection:
results = (connection.execution_options(stream_results=True).execute(query))
df = pd.DataFrame(string_folding_wrapper(results))
df.columns = results.keys()
df = df.iloc[:, ~df.columns.duplicated()]
if 'customer_id' in df.columns and cust_id==True:
df=df.drop(columns=['customer_id'])
#connection.close()
self.engine.dispose()
return df
def load_and_save_data_frame(self, filename, query, overwrite=False):
# Check if the directory exists, otherwise create
dir_name = os.path.dirname(filename)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.isfile(filename) and os.stat(filename).st_size > 0 and not overwrite:
with open(filename, 'r') as file:
df = pickle.load(file)
else:
with open(filename, 'w') as file:
df = self.sql_query_to_data_frame(query)
pickle.dump(df, file)
return df |
'''
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
def _impl(ctx):
# We currently generate both PDF and HTML. Originally I wanted to provide an attribute which can
# be used to select the document output format. However using a declare_file with a filename
# generated from the output name and the extension doesn't seem to work. The rule would not run
# thinking that there is nothing to do. There is probably solution for that, but in the meantime
# we just create both.
for fmt in ["latexpdf", "html"]:
# Get tool and output file based on desired format
tool = None
out = None
if fmt == "latexpdf":
tool = ctx.executable._sphinx_latexpdf_tool
out = ctx.outputs.latexpdf
elif fmt == "html":
tool = ctx.executable._sphinx_html_tool
out = ctx.outputs.html
else:
fail("Invalid format")
# Create an action to create the doc from the source files and all it's transitive dependecies.
ctx.actions.run(
inputs = ctx.files.srcs + ctx.files.deps + ctx.files._sphinx_tool_deps,
outputs = [out],
arguments = [out.path] + [x.path for x in (ctx.files.srcs + ctx.files.deps)],
progress_message = "Building documentation with SPHINX...",
executable = tool,
use_default_shell_env = True)
# A rule to create documentation from restructured text using Sphinx.
sphinx = rule(
implementation = _impl,
attrs = {
# The name of the generated file. This is for example the name of the PDF file (without)
# extension when using the latexpdf format.
"output_name": attr.string(
mandatory = True
),
# Additional restructured text source files used to create the documentation.
"srcs": attr.label_list(allow_files = [".rst", ".jpg", ".png"]),
# Additional dependencies which will be traveresed transitively to discover all restructured
# text source files required to create this document.
"deps": attr.label_list(),
# This is the tool used to create the documentation in the latex PDF format
"_sphinx_latexpdf_tool": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//engine/build/sphinx:sphinx_latexpdf"),
),
# This is the tool used to create the documentation in the HTML format
"_sphinx_html_tool": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//engine/build/sphinx:sphinx_html"),
),
# TODO For some reason data dependencies of the sphinx tool don't show up in the sandbox.
# Thus we just add them here explicitely
"_sphinx_tool_deps": attr.label(
default=Label("//engine/build/sphinx:sphinx_files"),
)
},
# TODO For some reason when declare_file is used instead of this the rule doesn't activate.
outputs = {
"latexpdf": "%{output_name}.pdf",
"html": "%{output_name}.tar.gz",
}
)
SphinxFiles = provider("transitive_sources")
def get_transitive_srcs(srcs, deps):
"""Obtain the source files for a target and its transitive dependencies.
Args:
srcs: a list of source files
deps: a list of targets that are direct dependencies
Returns:
a collection of the transitive sources
"""
return depset(srcs, transitive = [dep[SphinxFiles].transitive_sources for dep in deps])
def _sphinx_dep_impl(ctx):
trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps)
return [
SphinxFiles(transitive_sources = trans_srcs),
DefaultInfo(files = trans_srcs),
]
# This rule is used to collect source files and track their dependencies. These can be used in
# the deps section of a `sphinx` rule to provide the document generator with all required input
# files.
sphinx_dep = rule(
implementation = _sphinx_dep_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
"deps": attr.label_list(),
},
)
|
from preprocessing_functions import preprocessing_functions as pf
import h5py as h5
import glob
# from hdfchain import HDFChain
blacklist = pf.blacklist
colblacklist = pf.colblacklist
dataPath = '~/sync/bachelorarbeit/daten/sim/'
#hardcoded nomissingvalues can be replaced by variable
#different types of data need to be in different directories
def prepare_corsika_from_blacklist(path, oldEnding='', newEnding='nomissing'):
"""Takes the path of the corsika files (there should be only corsika files in a single directory)
and removes unuseable and too short keys and writes new hdf5 files"""
oldCorsika = [h5.File(file, 'r') for file in sorted(glob.glob(path+'*'+oldEnding))]
uselessKeys = []
for corsika in oldCorsika:
uselessKeys.extend(pf.attrs_that_arent_simple_arrays(corsika))
tooShortKeys = []
for corsika in oldCorsika:
tooShortKeys.extend(pf.too_short_list(corsika, keyBlacklist=uselessKeys))
for corsika in oldCorsika:
pf.corsika_generate_new_arrays(corsika, fileEnding=newEnding)
newCorsika = [h5.File(file, 'r') for file in sorted(glob.glob(path+'*'+newEnding+'*'))]
for new, old in zip(newCorsika, oldCorsika):
pf.corsika_assign_arrays_and_weights(new, old)
pf.setid(new)
new.flush()
new.close()
old.close()
return uselessKeys, tooShortKeys
def prepare_nu_from_blacklist(path, oldEnding, newEnding):
"""Takes the path of the nu files (there should be only nu files in a single directory)
and removes unuseable and too short keys and writes new hdf5 files
Returns the useless and too short keys"""
oldNU = [h5.File(file, 'r') for file in sorted(glob.glob(path+'*'+oldEnding))]
uselessKeys = []
for nu in oldNU:
uselessKeys.extend(pf.attrs_that_arent_simple_arrays(nu))
tooShortKeys = []
for nu in oldNU:
tooShortKeys.extend(pf.too_short_list(nu, keyBlacklist=uselessKeys))
for nu in oldNU:
pf.generate_new_arrays(nu, fileEnding=newEnding)
newNU = [h5.File(file, 'r') for file in sorted(glob.glob(path+'*'+newEnding+'*'))]
for new, old in zip(newNU, oldNU):
pf.assign_arrays(new, old)
pf.setid(new)
new.flush()
new.close()
old.close()
return uselessKeys, tooShortKeys
|
"""
Copyright 2013, 2014 Ricardo Tubio-Pardavila
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = '[email protected]'
from django.db import models
import logging
from services.common import misc
from services.configuration.models import segments as segment_models
from services.configuration.models import channels as channel_models
from services.scheduling.models import compatibility as compatibility_models
from services.scheduling.models import operational as operational_models
logger = logging.getLogger('communications')
class PassiveMessage(models.Model):
"""Message model class for received out-of-operations messages
This class models the messages to be sent from Ground Stations to the
network with the data passively received from satellites. This means that
no remote operation has to be scheduled for the data to be received.
"""
class Meta:
app_label = 'communications'
groundstation = models.ForeignKey(
segment_models.GroundStation,
verbose_name='GroundStation that tx/rx this message'
)
retrieved = models.BooleanField(
'Indicates whether the message has been retrieved by a remote user.',
default=False
)
doppler_shift = models.FloatField(
'Doppler shift during the reception of the message.'
)
groundstation_timestamp = models.BigIntegerField(
'Timestamp for when this message was received at the Ground Station'
)
reception_timestamp = models.BigIntegerField(
'Timestamp for when this message was received at the server'
)
message = models.CharField(
'Message raw data in base64',
max_length=4000
)
def __unicode__(self):
"""Human readable unicode string
Human readable representation of this object as an unicode string.
:return: Unicode string
"""
return '>>> message (#' + str(self.pk) + '), gs = ' +\
str(self.groundstation) + '@' +\
str(self.groundstation_timestamp) + ', (BASE64)=' +\
str(self.message)
class MessageManager(models.Manager):
"""
Manager for the messages.
This manager handles the operations over the PassiveMessage table in the
database.
"""
@staticmethod
def get_channels(operational_slot):
"""
This function returns a tuple with the ground station and the
spacecraft channels involved in the communication.
:param operational_slot: Reference to the operational slot
:return: Tuple with the references to the GS and SC channels
"""
if operational_slot.identifier == operational_models.TEST_O_SLOT_ID:
return None, None
try:
channels = compatibility_models.ChannelCompatibility.objects\
.filter(
spacecraft=operational_slot.pass_slot.spacecraft,
groundstation=operational_slot.pass_slot.groundstation
)[0]
return (
channels.groundstation_channel, channels.spacecraft_channel
)
except compatibility_models.ChannelCompatibility.DoesNotExist:
raise ValueError(
'No compatible channels found, discarding message for slot =' +
str(operational_slot.identifier)
)
def create(
self, operational_slot, upwards, forwarded, tx_timestamp, message
):
"""Creates the object in the database
Creates the object in the database with the data provided and including
the current UTC timestamp as the timestamp of the moment at which this
message was received in the server.
# TODO The protocol should invoke the function for storing the message
# within the database including the channels involved in the
# communication, which will definitely allow supporting multiple
# channels.
:param operational_slot: The referenced slot
:param upwards: Flag indicating the direction of the message
:param forwarded: Flag indicating whether the message was forwarded
:param tx_timestamp: Timestamp of the moment at which this message was
received at the GroundStation
:param message: Binary message to be stored in the database
"""
channels = MessageManager.get_channels(operational_slot)
return super(MessageManager, self).create(
operational_slot=operational_slot,
groundstation_channel=channels[0],
spacecraft_channel=channels[1],
upwards=upwards,
forwarded=forwarded,
reception_timestamp=misc.get_utc_timestamp(),
transmission_timestamp=tx_timestamp,
message=message
)
class Message(models.Model):
"""Message model class.
This class includes all the information related with the relay of a
message
"""
objects = MessageManager()
operational_slot = models.ForeignKey(
operational_models.OperationalSlot,
verbose_name='OperationalSlot during which the message was transmitted'
)
groundstation_channel = models.ForeignKey(
channel_models.GroundStationChannel,
null=True,
verbose_name='GroundStation channel that tx/rx this message'
)
spacecraft_channel = models.ForeignKey(
channel_models.SpacecraftChannel,
null=True,
verbose_name='Spacecraft channel that tx/rx this message'
)
upwards = models.BooleanField(
'Message relay direction(upwards = GS-to-SC, downwards = SC-to-GS)',
default=False
)
forwarded = models.BooleanField(
'Whether this message has already been forwarded to the receiver',
default=False
)
reception_timestamp = models.BigIntegerField(
'Timestamp at which this message was received at the server'
)
transmission_timestamp = models.BigIntegerField(
'Timestamp at which this message was forwarded to the receiver'
)
message = models.CharField(
'Message raw data in base64',
max_length=4000
)
|
class CurrentStatusCoins():
BTC = 0
ETH = 0
XRP = 0
def get_current(self):
return {
"BTC": self.BTC,
"ETH": self.ETH,
"XRP": self.XRP
}
def set_coins(self, new_values):
self.BTC = new_values["BTC"]
self.ETH = new_values["ETH"]
self.XRP = new_values["XRP"]
|
#!/usr/bin/python3
import json
import datetime
import smbus
import socket
import math
import time
from collections import OrderedDict
from pysolar import solar
import pytz
import os.path
import sys
from dual_g2_hpmd_rpi import motors, MAX_SPEED
#480 is Positive 100% voltage
#-480 is Negative 100% voltage
#240 is Positive 50% voltage
#-240 is Negative 50% voltage
#0 is Stop
MYLAT = 1000.0
MYLNG = 1000.0
EAST_POS=0.0
WEST_POS=0.0
EAST_ANGLE=0.0
WEST_ANGLE=0.0
axis_azi = 0.0
axis_tilt = 0.0
MOVE_INTERVAL=600
NIGHT_POS=0.0
STRTZ = ""
ENV_FILE = "env.list"
if not os.path.isfile(ENV_FILE):
print("ENV_FILE at %s not found - exiting")
sys.exit(1)
e = open(ENV_FILE, "r")
lines = e.read()
e.close()
for line in lines.split("\n"):
myline = line.strip()
if myline.find("#") == 0:
pass
elif myline != "":
arline = myline.split("=")
if arline[0] == "MYLAT":
MYLAT = float(arline[1])
if arline[0] == "MYLNG":
MYLNG = float(arline[1])
if arline[0] == "STRTZ":
STRTZ = arline[1]
if arline[0] == "WEST_ANGLE":
WEST_ANGLE = float(arline[1])
if arline[0] == "EAST_ANGLE":
EAST_ANGLE = float(arline[1])
if arline[0] == "WEST_POS":
WEST_POS = float(arline[1])
if arline[0] == "EAST_POS":
EAST_POS = float(arline[1])
if arline[0] == "AXIS_AZI":
axis_azi = float(arline[1])
if arline[0] == "AXIS_TILT":
axis_tilt = float(arline[1])
if arline[0] == "MOVE_INTERVAL":
MOVE_INTERVAL = int(arline[1])
INVERT_SENSOR = True # We installed our sensor apparently "upside down" therefore we need to invert the reading to align with the solar function
ECONV = EAST_POS / EAST_ANGLE
WCONV = WEST_POS / WEST_ANGLE
if MYLAT == 1000.0 or MYLNG == 1000.0 or STRTZ == "" or EAST_ANGLE == 0.0 or WEST_ANGLE == 0.0 or WEST_POS == 0.0 or EAST_POS == 0.0 or axis_azi == 0.0 or axis_tilt == 0.0:
print("ENV Values not found please check your env.list file to ensure valid values exist for EAST and WEST_POS, EAST and WEST_ANGLE, AXIS_AZI, AXIS_TILE, MYLAT, MYLNG, and STRTZ")
sys.exit(1)
print("==================")
print("Starting with values:")
print("MYLAT: %s" % MYLAT)
print("MYLNG: %s" % MYLNG)
print("STRTZ: %s" % STRTZ)
print("AXIS_AZI: %s" % axis_azi)
print("AXIS_TILT: %s" % axis_tilt)
print("EAST_ANGLE: %s" % EAST_ANGLE)
print("WEST_ANGLE: %s" % WEST_ANGLE)
print("EAST_POS: %s" % EAST_POS)
print("WEST_POS: %s" % WEST_POS)
print("ECONV: %s" % ECONV)
print("WCONV: %s" % WCONV)
print("MOVE_INTERVAL: %s" % MOVE_INTERVAL)
print("INVERT_SENSOR: %s" % INVERT_SENSOR)
print("=================")
print("")
# Get I2C bus
busloc = 0x68 # Default for the MPU-6000 - Shouldn't need to change this.
bus = smbus.SMBus(1)
myhostname = socket.gethostname()
def main():
global bus
global busloc
global axis_tilt
global axis_azi
initsensor(bus, busloc)
timezone = pytz.timezone(STRTZ)
motors.enable()
motors.setSpeeds(0, 0)
RUNNING = True
last_set_val = 0
last_set_time = 0
while RUNNING:
curtime = datetime.datetime.now()
curday = curtime.strftime("%Y-%m-%d")
mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
epochtime = int(time.time())
mydate = timezone.localize(curtime)
curalt, curaz = get_alt_az(mydate)
cur_r = mydeg(get_pos())
track_err = False
if curalt > 0:
# We only check if there is a track error if the sun is up, no point in correcting all night long
if math.fabs(math.fabs(cur_r) - math.fabs(last_set_val)) > 2.0:
print("%s - Track error, going to set track_err to true: cur_r: %s - last_set_val: %s" % (mystrtime, cur_r, last_set_val))
track_err = True
sun_r = getR(curalt, curaz, axis_tilt, axis_azi)
if INVERT_SENSOR:
sun_r = -sun_r
print("%s - Sun is up! - Sun Alt: %s - Sun Azi: %s - Cur Rot: %s - Potential Sun Rot: %s" % (mystrtime, curalt, curaz, cur_r, sun_r))
NEW_SET_VAL = None
if sun_r <= EAST_ANGLE and sun_r >= WEST_ANGLE:
print("%s - Potential new val: %s - cur: %s" % (mystrtime, sun_r, cur_r))
NEW_SET_VAL = sun_r
elif sun_r > EAST_ANGLE and (last_set_val != EAST_ANGLE or track_err == True):
print("%s - Sun Rot (%s) is Beyond East(%s), and array needs to move there" % (mystrtime, sun_r, EAST_ANGLE))
NEW_SET_VAL = EAST_ANGLE
elif sun_r < WEST_ANGLE and (last_set_val != WEST_ANGLE or track_err == True):
print("%s - Sun Rot (%s) is Beyond West(%s), and array needs to move there" % (mystrtime, sun_r, WEST_ANGLE))
NEW_SET_VAL = WEST_ANGLE
if epochtime - last_set_time >= MOVE_INTERVAL and NEW_SET_VAL is not None:
print("%s Setting New val: %s from %s" % (mystrtime, NEW_SET_VAL, cur_r))
last_set_time = epochtime
last_set_val = NEW_SET_VAL
goto_angle(NEW_SET_VAL)
else:
if last_set_val != NIGHT_POS:
print("%s - Sun is down setting to %s for the night" % (mystrtime, NIGHT_POS))
goto_angle(NIGHT_POS)
last_set_val = NIGHT_POS
last_set_time = epochtime
time.sleep(60)
def getR(sun_alt, sun_azi, axis_tilt, axis_azi):
# Return in Degrees
sun_zen = 90 - sun_alt
x_1 = (math.sin(math.radians(sun_zen)) * math.sin(math.radians(sun_azi) - math.radians(axis_azi)))
x_2 = (math.sin(math.radians(sun_zen)) * math.cos(math.radians(sun_azi) - math.radians(axis_azi)) * math.sin(math.radians(axis_tilt)))
x_3 = (math.cos(math.radians(sun_zen)) * math.cos(math.radians(axis_tilt)))
x_4 = x_2 + x_3
X = x_1 / x_4
if X == 0.0 or (X > 0 and (sun_azi - axis_azi) > 0) or (X < 0 and (sun_azi - axis_azi) < 0):
mypsi = math.radians(0.0)
elif X < 0 and (sun_azi - axis_azi) > 0:
mypsi = math.radians(180.0)
elif X > 0 and (sun_azi - axis_azi) < 0:
mypsi = math.radians(-180.0)
else:
print("awe crap")
mypsi = 0
R = math.atan(X) + mypsi
return math.degrees(R)
def goto_angle(setangle):
global ECONV
global WCONV
global motors
CONV = 0.0
if setangle < 0:
CONV = WCONV
elif setangle > 0:
CONV = ECONV
TARGET_POS = CONV * setangle
# Get Current Location
curcnt = 0
cursum = 0.0
failcnt = 0
for x in range(10):
try:
xa, ya, za = getreading(bus, "accel", busloc)
curcnt += 1
cursum += xa
except:
failcnt += 1
if failcnt > 20:
break
print("Reading Fail!!")
else:
continue
CURRENT_POS = get_pos()
print("The current location is %s and you want to go to %s (%s in angle form)" % (CURRENT_POS, TARGET_POS, setangle))
finished = False
if CURRENT_POS > TARGET_POS:
# We want to move west
motor_dir = -480
elif CURRENT_POS < TARGET_POS:
motor_dir = 480
else:
motor_dir = 0
finished = True
print("No change!")
motors.motor1.setSpeed(motor_dir)
tcnt = 0
while finished == False:
tcnt += 1
NEW_POS = get_pos()
if motor_dir < 0:
if NEW_POS <= TARGET_POS:
motors.motor1.setSpeed(0)
finished = True
elif motor_dir > 0:
if NEW_POS >= TARGET_POS:
motors.motor1.setSpeed(0)
finished = True
elif tcnt >= 1200:
print("It has taken over 5 minutes of waiting and we didn't get to where you want, we are giving up at at %s" % NEW_POS)
finished = True
time.sleep(0.5)
print("Finished setting position")
#motors.motor1.setSpeed(-480)
def mydeg(pos):
retval = 0
if pos > 0:
retval = pos / ECONV
elif pos < 0:
retval = pos / WCONV
return retval
def get_pos():
global bus
global busloc
curcnt = 0
cursum = 0.0
failcnt = 0
for x in range(5):
try:
xa, ya, za = getreading(bus, "accel", busloc)
curcnt += 1
cursum += xa
except:
failcnt += 1
if failcnt > 20:
break
print("Reading Fail!!")
else:
continue
return cursum / curcnt
def initsensor(bus, busloc):
# Initialize things:
# Select gyroscope configuration register, 0x1B(27)
# 0x18(24) Full scale range = 2000 dps
bus.write_byte_data(busloc, 0x1B, 0x18)
# MPU-6000 address, 0x68(104)
# Select accelerometer configuration register, 0x1C(28)
# 0x18(24) Full scale range = +/-16g
bus.write_byte_data(busloc, 0x1C, 0x18)
# MPU-6000 address, 0x68(104)
# Select power management register1, 0x6B(107)
# 0x01(01) PLL with xGyro referenc
bus.write_byte_data(busloc, 0x6B, 0x01)
#
time.sleep(0.8)
def getreading(bus, src, busloc):
# src is accel or gyro
if src == "accel":
srcval = 0x3B
elif src == "gyro":
srcval = 0x43
else:
srcval = 0x00
print("Invalid src")
return (0,0,0)
data = bus.read_i2c_block_data(busloc, srcval, 6)
x = convertreading(data[0], data[1])
y = convertreading(data[2], data[3])
z = convertreading(data[4], data[5])
return x, y, z
def convertreading(val1, val2):
retval = val1 * 256 + val2
if retval > 32767:
retval -= 65536
return retval
def get_alt_az(dt):
alt = solar.get_altitude(MYLAT, MYLNG, dt)
az = solar.get_azimuth(MYLAT, MYLNG, dt)
return alt, az
if __name__ == '__main__':
main()
|
import os
import json
import requests
from PIL import Image
def get_imgur_client_id():
"""
gets the imgur client id key from config.txt file.
"""
with open('config.txt', 'r') as f:
client_id = f.read()
return client_id
def create_download_dir():
"""
creates a download directory for images.
"""
dir_images = os.path.join('images')
if not os.path.exists(dir_images):
os.mkdir(dir_images)
return dir_images
def download_image_from_url(url, directory):
"""
download image and save into given directory.
"""
response = requests.get(url, stream=True)
if response.status_code == 200:
filename = os.path.basename(url)
filepath = os.path.join(directory, f'{filename}')
with open(filepath, 'wb') as f:
f.write(response.content)
def build_link_list(client_id, num_of_images):
"""
builds a list of image links.
"""
i = 1
cnt = 0
url_list = []
url_list_len = []
try:
while(cnt < num_of_images):
# get request
response = requests.get(
f'https://api.imgur.com/3/gallery/random/random/{i}',
headers={'Authorization': f'Client-ID {client_id}'},
stream=True
)
# control
if response.status_code == 200:
data_list = json.loads(response.content)['data']
url_list.extend([
i['link']
for i in data_list
if 'type' in i
and i['type'] in ('image/png', 'image/jpeg')
and i['link'] not in url_list
])
cnt = len(url_list)
url_list_len.append(cnt)
i += 1
# control if api doesn't return anything new
if set(url_list_len[-10:]) == 1:
break
elif response.status_code == 429:
print('too many requests, enough, or you can choose to put time.sleep() in here...')
break
else:
break
except:
print('api limit reached!')
return url_list
def create_thumbnail(size, path):
"""
create resized version of the image path given, with the same name
extended with _thumbnail.
"""
try:
# create thumbnail
image = Image.open(path)
image.thumbnail(size)
# create path for thumbnail
dir_images = os.path.join(path)
filename, extension = os.path.splitext(path)
new_filename = os.path.join('{}{}{}'.format(filename, '_thumbnail', extension))
# save thumbnail
image.convert('RGB').save(new_filename)
except:
'image error'
|
# Copyright (c) 2020 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import collections
import dataclasses
import functools
import logging
import math
import re
import time
from dataclasses import dataclass
from typing import List, Tuple, Dict
import bpy
import bpy_types
import numpy as np
from . import proto, bsp, mdl, blendmdl, blendbsp, simplex, blendpart
logger = logging.getLogger(__name__)
Vec3 = Tuple[float, float, float]
_NEAR_CLIP_PLANE = 8
_FAR_CLIP_PLANE = 2048
_EYE_HEIGHT = 22
def _patch_vec(old_vec: Vec3, update):
return tuple(v if u is None else u for v, u in zip(old_vec, update))
def _quake_to_blender_angles(quake_angles: Vec3) -> Vec3:
return (math.pi * (90. - quake_angles[0]) / 180,
math.pi * quake_angles[2] / 180,
math.pi * (quake_angles[1] - 90.) / 180)
def _fix_angles(old_angles, new_angles, degrees=False):
# Undo wrapping of the yaw
if degrees:
t = 360
else:
t = 2 * np.pi
old_yaw = old_angles[1] / t
new_yaw = new_angles[1] / t
i = old_yaw // 1
j = np.argmin(np.abs(np.array([i - 1, i, i + 1]) + new_yaw - old_yaw))
return (new_angles[0], (new_yaw + i + j - 1) * t, new_angles[2])
def _quake_angles_to_mat(angles):
# Ported from mathlib.c AngleVectors
pitch = angles[0] * (np.pi / 180);
yaw = angles[1] * (np.pi / 180);
roll = angles[2] * (np.pi / 180);
sy, cy = np.sin(yaw), np.cos(yaw)
sp, cp = np.sin(pitch), np.cos(pitch)
sr, cr = np.sin(roll), np.cos(roll)
right = np.array([-1*sr*sp*cy + -1*cr*-sy, -1*sr*sp*sy + -1*cr*cy, -1*sr*cp])
forward = np.array([cp*cy, cp*sy, -sp])
up = np.array([cr*sp*cy + -sr*-sy, cr*sp*sy + -sr*cy, cr*cp])
return np.stack([right, forward, up], axis=1)
def _get_model_config(mdl_name, config):
mdls_cfg = config['models']
cfg = dict(mdls_cfg['__default__'])
cfg.update(mdls_cfg.get(mdl_name, {}))
return cfg
@dataclass
class _EntityInfo:
"""Information for a single entity slot"""
model_num: int
frame: int
skin: int
origin: Vec3
angles: Vec3
def update(self, msg: proto.ServerMessageUpdate, baseline: "_EntityInfo"):
def none_or(a, b):
return a if a is not None else b
angles = _fix_angles(self.angles, _patch_vec(baseline.angles, msg.angle))
return dataclasses.replace(
baseline,
model_num=none_or(msg.model_num, baseline.model_num),
frame=none_or(msg.frame, baseline.frame),
skin=none_or(msg.skin, baseline.skin),
origin=_patch_vec(baseline.origin, msg.origin),
angles=angles
)
_DEFAULT_BASELINE = _EntityInfo(0, 0, 0, (0, 0, 0), (0, 0, 0))
class SampleAsLightObject:
@property
def bbox(self):
raise NotImplementedError
@property
def leaf(self):
raise NotImplementedError
@property
def origin(self):
raise NotImplementedError
def add_keyframe(self, vis: bool, blender_frame: int):
raise NotImplementedError
class AliasModelSampleAsLightObject:
_bm: blendmdl.BlendMdl
_bb: blendbsp.BlendBsp
def __init__(self, bm, bb, mdl_cfg):
self._bm = bm
self._bb = bb
self._bbox = np.array(mdl_cfg['bbox'])
@property
def bbox(self):
return np.array(self._bm.obj.location) + self._bbox
@property
def leaf(self):
return self._bb.bsp.models[0].get_leaf_from_point(self._bm.obj.location)
def add_keyframe(self, vis: bool, blender_frame: int):
for bmat in self._bm.sample_as_light_mats:
bmat.add_sample_as_light_keyframe(vis, blender_frame)
@dataclass(eq=False)
class LeafSampleAsLightObject:
_leaf: bsp.Leaf
_mat: bpy.types.Material
_tex_cfg: Dict
_model_idx: int
_bb: blendbsp.BlendBsp
@property
def bbox(self):
if "bbox" not in self._tex_cfg:
raise Exception("Sample as light textures must have bounding boxes")
tex_bbox = np.array(self._tex_cfg['bbox'])
return np.stack([self._leaf.bbox.mins + tex_bbox[0],
self._leaf.bbox.maxs + tex_bbox[1]]) + self._model_origin
@property
def _model(self):
return self._bb.bsp.models[self._model_idx]
@property
def _model_origin(self):
return np.array(self._bb.model_objs[self._model].location)
@property
def leaf(self):
if self._model_idx == 0:
out = self._leaf
else:
leaf_origin = 0.5 * (np.array(self._leaf.bbox.mins) +
np.array(self._leaf.bbox.maxs))
origin = leaf_origin + self._model_origin
out = self._bb.bsp.models[0].get_leaf_from_point(origin)
return out
def add_keyframe(self, vis: bool, blender_frame: int):
self._mat.add_sample_as_light_keyframe(vis, blender_frame)
@classmethod
def create_from_bsp(cls, bb: blendbsp.BlendBsp):
if bb.sample_as_light_info:
return (
cls(leaf, mat, tex_cfg, model.id_, bb)
for model, model_info in bb.sample_as_light_info.items()
for leaf, leaf_info in model_info.items()
for mat, tex_cfg in leaf_info.items()
)
else:
return []
@dataclass
class ManagedObject:
fps: float
def _get_blender_frame(self, time):
return int(round(self.fps * time))
@property
def ignore_duplicate_updates(self) -> bool:
raise NotImplementedError
def add_pose_keyframe(self, pose_num: int, time: float):
raise NotImplementedError
def add_visible_keyframe(self, visible: bool, time: float):
raise NotImplementedError
def add_origin_keyframe(self, origin: Vec3, time: float):
raise NotImplementedError
def add_angles_keyframe(self, angles: Vec3, time: float):
raise NotImplementedError
def set_invisible_to_camera(self):
raise NotImplementedError
def done(self, final_time: float):
raise NotImplementedError
@dataclass
class AliasModelManagedObject(ManagedObject):
bm: blendmdl.BlendMdl
@property
def ignore_duplicate_updates(self) -> bool:
return True
def add_pose_keyframe(self, pose_num: int, time: float):
self.bm.add_pose_keyframe(pose_num, time, self.fps)
def add_visible_keyframe(self, visible: bool, time: float):
blender_frame = self._get_blender_frame(time)
for sub_obj in self.bm.sub_objs:
sub_obj.hide_render = not visible
sub_obj.keyframe_insert('hide_render', frame=blender_frame)
sub_obj.hide_viewport = not visible
sub_obj.keyframe_insert('hide_viewport', frame=blender_frame)
def add_origin_keyframe(self, origin: Vec3, time: float):
self.bm.obj.location = origin
self.bm.obj.keyframe_insert('location', frame=self._get_blender_frame(time))
def add_angles_keyframe(self, angles: Vec3, time: float):
# Should this use the other angles?
self.bm.obj.rotation_euler = (0., 0., angles[1])
if self.bm.am.header['flags'] & mdl.ModelFlags.ROTATE:
self.bm.obj.rotation_euler.z = time * 100. * np.pi / 180
self.bm.obj.keyframe_insert('rotation_euler', frame=self._get_blender_frame(time))
def set_invisible_to_camera(self):
self.bm.set_invisible_to_camera()
def done(self, final_time: float):
self.bm.done(final_time, self.fps)
@dataclass
class BspModelManagedObject(ManagedObject):
_bb: blendbsp.BlendBsp
_model_num: int
@property
def ignore_duplicate_updates(self) -> bool:
return False
def add_pose_keyframe(self, pose_num: int, time: float):
model = self._bb.bsp.models[self._model_num]
self._bb.add_material_frame_keyframe(model, pose_num, self._get_blender_frame(time))
@property
def _model(self):
return self._bb.bsp.models[self._model_num]
def add_visible_keyframe(self, visible: bool, time: float):
blender_frame = self._get_blender_frame(time)
self._bb.add_visible_keyframe(self._model, visible, blender_frame)
def add_origin_keyframe(self, origin: Vec3, time: float):
obj = self._bb.model_objs[self._model]
obj.location = origin
obj.keyframe_insert('location', frame=self._get_blender_frame(time))
def add_angles_keyframe(self, angles: Vec3, time: float):
pass
def done(self, final_time: float):
self._bb.add_animated_material_keyframes(self._get_blender_frame(final_time), final_time)
@dataclass
class NullManagedObject(ManagedObject):
@property
def ignore_duplicate_updates(self) -> bool:
return False
def add_pose_keyframe(self, pose_num: int, time: float):
pass
def add_visible_keyframe(self, visible: bool, time: float):
pass
def add_origin_keyframe(self, origin: Vec3, time: float):
pass
def add_angles_keyframe(self, angles: Vec3, time: float):
pass
def done(self, final_time: float):
pass
class ObjectManager:
def __init__(self, fs, config, fps, fov, width, height, world_obj_name='demo', load_level=True):
self._fs = fs
self._fps = fps
self._config = config
self._load_level = load_level
self._pal = np.fromstring(fs['gfx/palette.lmp'], dtype=np.uint8).reshape(256, 3) / 255
self._bb: Optional[blendbsp.BlendBsp] = None
self._objs: Dict[Tuple[int, int], ManagedObject] = {}
self._model_paths: Optional[List[str]] = None
self._view_entity_num: Optional[int] = None
self._static_objects: List[ManagedObject] = []
self._sample_as_light_objects: List[SampleAsLightObject] = []
self._sal_time: float = 0.
self._first_update_time: Optional[float] = None
self._intermission = False
self._num_explosions: int = 0
self._num_teleports: int = 0
self.world_obj = bpy.data.objects.new(world_obj_name, None)
bpy.context.scene.collection.objects.link(self.world_obj)
blendpart.get_particle_root().parent = self.world_obj
self._width, self._height = width, height
bpy.data.scenes['Scene'].render.resolution_x = width
bpy.data.scenes['Scene'].render.resolution_y = height
self._fov = fov
demo_cam = bpy.data.cameras.new(name="demo_cam")
demo_cam.angle = fov * np.pi / 180
demo_cam.clip_start = 0.04
self._demo_cam_obj = bpy.data.objects.new(name="demo_cam", object_data=demo_cam)
bpy.context.scene.collection.objects.link(self._demo_cam_obj)
self._demo_cam_obj.parent = self.world_obj
def set_intermission(self, i: bool):
self._intermission = i
def _path_to_bsp_name(self, bsp_path):
m = re.match(r"maps/([a-zA-Z0-9_]+).bsp", bsp_path)
if m is None:
raise Exception("Unexpected BSP path {mdl_path}")
return m.group(1)
def set_model_paths(self, model_paths: List[str]):
if self._model_paths is not None:
raise Exception("Model paths already set")
self._model_paths = model_paths
if self._load_level:
map_path = self._model_paths[0]
logger.info('Parsing bsp %s', map_path)
b = bsp.Bsp(self._fs.open(map_path))
map_name = self._path_to_bsp_name(map_path)
logger.info('Adding bsp %s', map_path)
self._bb = blendbsp.add_bsp(b, self._pal, map_name, self._config)
self._bb.map_obj.parent = self.world_obj
self._bb.hide_all_but_main()
self._sample_as_light_objects.extend(
LeafSampleAsLightObject.create_from_bsp(self._bb)
)
def _path_to_model_name(self, mdl_path):
m = re.match(r"progs/([A-Za-z0-9-_]*)\.mdl", mdl_path)
if m is None:
raise Exception("Unexpected model path {mdl_path}")
return m.group(1)
@functools.lru_cache(None)
def _load_alias_model(self, model_path):
return mdl.AliasModel(self._fs.open(model_path))
def set_view_entity(self, entity_num):
self._view_entity_num = entity_num
def create_static_object(self, model_num, frame, origin, angles, skin):
model_path = self._model_paths[model_num - 1]
am = mdl.AliasModel(self._fs.open(model_path))
mdl_name = self._path_to_model_name(model_path)
mdl_cfg = _get_model_config(mdl_name, self._config)
bm = blendmdl.add_model(am,
self._pal,
mdl_name,
f"static{len(self._static_objects)}",
skin,
mdl_cfg,
frame,
self._config['do_materials'])
bm.obj.parent = self.world_obj
bm.obj.location = origin
bm.obj.rotation_euler = (0., 0., angles[1])
self._static_objects.append(bm)
if bm.sample_as_light_mats:
self._sample_as_light_objects.append(
AliasModelSampleAsLightObject(bm, self._bb, mdl_cfg)
)
def create_teleport(self, pos, time):
obj_name = f'teleport{self._num_teleports}'
blendpart.create_teleport(time, obj_name, pos, self._fps)
self._num_teleports += 1
def create_explosion(self, pos, time):
obj_name = f'explosion{self._num_explosions}'
blendpart.create_explosion(time, obj_name, pos, self._fps)
self._num_explosions += 1
def _create_managed_object(self, entity_num, model_num, skin_num, initial_pose_num):
model_path = self._model_paths[model_num - 1] if model_num != 0 else None
if model_num == 0:
# Used to make objects disappear, eg. player at the end of the level
managed_obj = NullManagedObject(self._fps)
elif model_path.startswith('*'):
if self._load_level:
map_model_idx = int(model_path[1:])
managed_obj = BspModelManagedObject(self._fps, self._bb, map_model_idx)
else:
managed_obj = NullManagedObject(self._fps)
elif model_path.endswith('.mdl'):
am = self._load_alias_model(model_path)
mdl_name = self._path_to_model_name(model_path)
logger.info('Loading alias model %s', mdl_name)
mdl_cfg = _get_model_config(mdl_name, self._config)
bm = blendmdl.add_model(am,
self._pal,
mdl_name,
f'ent{entity_num}_{mdl_name}',
skin_num,
mdl_cfg,
initial_pose_num,
self._config['do_materials'])
bm.obj.parent = self.world_obj
managed_obj = AliasModelManagedObject(self._fps, bm)
if bm.sample_as_light_mats:
self._sample_as_light_objects.append(
AliasModelSampleAsLightObject(bm, self._bb, mdl_cfg)
)
elif model_path.endswith('.bsp'):
bsp_name = self._path_to_bsp_name(model_path)
logger.info('Loading bsp model %s', bsp_name)
b = bsp.Bsp(self._fs.open(model_path))
if len(b.models) != 1:
raise Exception(f"Expected one model in bsp model {bsp_name}, not {len(b.models)}")
bb = blendbsp.add_bsp(b, self._pal, bsp_name, self._config,
f'ent{entity_num}_')
bb.map_obj.parent = self.world_obj
managed_obj = BspModelManagedObject(self._fps, bb, 0)
else:
logging.warning('Cannot handle model %r', model_path)
managed_obj = NullManagedObject(self._fps)
return managed_obj
def _view_simplex(self, view_origin, view_angles):
view_origin = np.array(view_origin)
aspect_ratio = self._width / self._height
tan_fov = np.tan(0.5 * self._fov * np.pi / 180)
if aspect_ratio > 1:
h_tan = tan_fov
v_tan = h_tan / aspect_ratio
else:
v_tan = tan_fov
h_tan = v_tan * aspect_ratio
constraints = np.array([
[-1, h_tan, 0, 0], # right
[1, h_tan, 0, 0], # left
[0, v_tan, 1, 0], # bottom
[0, v_tan, -1, 0], # top
[0, 1, 0, -_NEAR_CLIP_PLANE], # near
[0, -1, 0, _FAR_CLIP_PLANE], # far
])
constraints[:, :3] /= np.linalg.norm(constraints[:, :3], axis=1)[:, None]
rotation_matrix = _quake_angles_to_mat(view_angles)
constraints[:, :3] = constraints[:, :3] @ rotation_matrix.T
constraints[:, 3] -= np.einsum('ij,j->i', constraints[:, :3], view_origin)
return simplex.Simplex(3, constraints, np.array([False, True, False, True, False, True]))
def _simplex_bbox_test(self, bbox: np.ndarray, sx: simplex.Simplex):
bbox_simplex = simplex.Simplex.from_bbox(*bbox)
try:
bbox_simplex.intersect(sx)
except simplex.Infeasible:
intersect = False
else:
intersect = True
return intersect
def _update_sample_as_light(self, view_origin, view_angles, blender_frame, crude_test=True):
if not self._load_level:
# No PVS info so can't update sample as lights
return
start = time.perf_counter()
view_pvs = set(self._bb.bsp.models[0].get_leaf_from_point(view_origin).visible_leaves)
view_sx = self._view_simplex(view_origin, view_angles)
num_tests = 0
num_visible = 0
num_early_exits = 0
for sal_obj in self._sample_as_light_objects:
# Checking if the light PVS intersects with the view PVS.
pvs = view_pvs & set(sal_obj.leaf.visible_leaves)
vis = bool(pvs)
# Clip leaf PVS bboxes to the light bbox.
if vis:
leaf_bboxes = np.stack([[leaf.bbox.mins, leaf.bbox.maxs] for leaf in pvs])
bboxes = np.stack([
np.maximum(leaf_bboxes[:, 0, :], sal_obj.bbox[0][None, :]),
np.minimum(leaf_bboxes[:, 1, :], sal_obj.bbox[1][None, :])
], axis=1)
bboxes = bboxes[np.all(bboxes[:, 0] < bboxes[:, 1], axis=1)]
vis = bboxes.shape[0] != 0
# Test if a single bbox that bounds all of the above bboxes intersects the view
# frustum.
if vis:
crude_bbox = np.stack([
np.min(bboxes[:, 0, :], axis=0),
np.max(bboxes[:, 1, :], axis=0),
])
num_tests += 1
vis = self._simplex_bbox_test(crude_bbox, view_sx)
# Finally, check if any of the individual bboxes intersects the view frustum.
if not crude_test and vis:
for bbox in bboxes:
num_tests += 1
if self._simplex_bbox_test(bbox, view_sx):
break
else:
vis = False
else:
num_early_exits += 1
num_visible += vis
sal_obj.add_keyframe(vis, blender_frame)
self._sal_time += time.perf_counter() - start
logger.debug('frame: %s, frustum tests: %s, lights visible: %s, early exits: %s / %s, total sal time: %s',
blender_frame, num_tests, num_visible,
num_early_exits, len(self._sample_as_light_objects),
self._sal_time)
def update(self, time, prev_entities, entities, prev_updated, updated, view_angles):
blender_frame = int(round(self._fps * time))
if self._intermission:
view_angles = tuple(x * 180 / np.pi for x in entities[self._view_entity_num].angles)
# Hide any objects that weren't updated in this frame, or whose model changed.
for entity_num in prev_updated:
obj = self._objs[entity_num, prev_entities[entity_num].model_num]
if (prev_entities[entity_num].model_num != entities[entity_num].model_num
or entity_num not in updated):
obj.add_visible_keyframe(False, time)
# Insert keyframes so the object doesn't drift towards where the entity is re-used.
obj.add_origin_keyframe(prev_entities[entity_num].origin, time)
obj.add_angles_keyframe(prev_entities[entity_num].angles, time)
for entity_num in updated:
# Create managed objects where we don't already have one for the given entity num / model num.
ent = entities[entity_num]
model_num = entities[entity_num].model_num
key = entity_num, model_num
if key not in self._objs:
obj = self._create_managed_object(entity_num, model_num, ent.skin, ent.frame)
obj.add_visible_keyframe(False, 0)
self._objs[key] = obj
else:
obj = self._objs[key]
# Update position / rotation / pose
prev_ent = prev_entities.get(entity_num)
if (not obj.ignore_duplicate_updates or
prev_ent is None or
prev_ent.origin != ent.origin or
prev_ent.angles != ent.angles):
obj.add_origin_keyframe(ent.origin, time)
obj.add_angles_keyframe(ent.angles, time)
obj.add_pose_keyframe(ent.frame, time)
# Unhide objects that were updated this frame, or whose model changed.
for entity_num in updated:
if (entity_num not in prev_updated or
(entity_num in prev_entities and
prev_entities[entity_num].model_num != entities[entity_num].model_num)):
self._objs[entity_num, entities[entity_num].model_num].add_visible_keyframe(
True, time
)
# Pose camera
view_origin = entities[self._view_entity_num].origin
if not self._intermission:
view_origin = (view_origin[0], view_origin[1], view_origin[2] + _EYE_HEIGHT)
self._demo_cam_obj.location = view_origin
self._demo_cam_obj.keyframe_insert('location', frame=blender_frame)
self._demo_cam_obj.rotation_euler = _quake_to_blender_angles(view_angles)
self._demo_cam_obj.keyframe_insert('rotation_euler', frame=blender_frame)
# Set sample_as_light materials.
self._update_sample_as_light(view_origin, view_angles, blender_frame)
# Record for updating latest
if self._first_update_time is None:
self._first_update_time = time
def done(self, final_time: float):
# Animate static objects
for bm in self._static_objects:
bm.done(final_time, self._fps)
for obj in self._objs.values():
obj.done(final_time)
# Make the view entity invisible to camera rays
if self._config['hide_view_entity']:
for (entity_num, model_num), obj in self._objs.items():
if entity_num == self._view_entity_num and model_num != 0:
obj.set_invisible_to_camera()
# Set start / end frame
if self._first_update_time is not None:
bpy.data.scenes['Scene'].frame_start = int(round(self._first_update_time * self._fps))
bpy.data.scenes['Scene'].frame_end = int(round(final_time * self._fps))
def add_demo(demo_file, fs, config, fps=30, world_obj_name='demo',
load_level=True, relative_time=False, fov=120, width=1920, height=1080):
assert not relative_time, "Not yet supported"
baseline_entities: Dict[int, _EntityInfo] = collections.defaultdict(lambda: _DEFAULT_BASELINE)
entities: Dict[int, _EntityInfo] = {}
fixed_view_angles: Vec3 = (0, 0, 0)
prev_updated = set()
demo_done = False
obj_mgr = ObjectManager(fs, config, fps, fov, width, height, world_obj_name, load_level)
last_time = 0.
msg_iter = proto.read_demo_file(demo_file)
while not demo_done:
time = None
update_done = False
updated = set()
prev_entities = dict(entities)
while not update_done and not demo_done:
try:
msg_end, view_angles, parsed = next(msg_iter)
except StopIteration:
demo_done = True
break
update_done = msg_end and time is not None and entities
fixed_view_angles = _fix_angles(fixed_view_angles, view_angles, degrees=True)
if parsed.msg_type == proto.ServerMessageType.TIME:
time = parsed.time
if parsed.msg_type == proto.ServerMessageType.SERVERINFO:
obj_mgr.set_model_paths(parsed.models)
if parsed.msg_type == proto.ServerMessageType.SETVIEW:
obj_mgr.set_view_entity(parsed.viewentity)
if parsed.msg_type == proto.ServerMessageType.SETANGLE:
fixed_view_angles = parsed.view_angles
if parsed.msg_type == proto.ServerMessageType.SPAWNSTATIC:
obj_mgr.create_static_object(
parsed.model_num, parsed.frame, parsed.origin, parsed.angles, parsed.skin
)
if parsed.msg_type == proto.ServerMessageType.SPAWNBASELINE:
baseline_entities[parsed.entity_num] = _EntityInfo(
model_num=parsed.model_num,
frame=parsed.frame,
skin=parsed.skin,
origin=parsed.origin,
angles=parsed.angles,
)
if parsed.msg_type == proto.ServerMessageType.UPDATE:
baseline = baseline_entities[parsed.entity_num]
prev_info = entities.get(parsed.entity_num, baseline)
entities[parsed.entity_num] = prev_info.update(parsed, baseline)
updated.add(parsed.entity_num)
if parsed.msg_type == proto.ServerMessageType.TEMP_ENTITY:
if parsed.temp_entity_type == proto.TempEntityTypes.EXPLOSION:
obj_mgr.create_explosion(parsed.origin, time)
elif parsed.temp_entity_type == proto.TempEntityTypes.TELEPORT:
obj_mgr.create_teleport(parsed.origin, time)
if parsed.msg_type in (
proto.ServerMessageType.INTERMISSION,
proto.ServerMessageType.FINALE,
proto.ServerMessageType.CUTSCENE):
obj_mgr.set_intermission(True)
if update_done:
logger.debug('Handling update. time=%s', time)
obj_mgr.update(time, prev_entities, entities, prev_updated, updated, fixed_view_angles)
last_time = time
prev_updated = updated
obj_mgr.done(last_time)
return obj_mgr.world_obj, obj_mgr
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import cgi
import cgitb
import os
from templates import secret_page
cgitb.enable()
class FollowingTheTAsInstructionsError(Exception):
def __init__(self):
Exception.__init__(
self, ("You must edit secret.py to change the username, password, "
"and to delete this error!"))
# Delete this line:
#raise FollowingTheTAsInstructionsError
# Edit the following two lines:
# password = form.getvalue('password')
username = 'qwert'
password = '12345'
#if username == entered_username and password == entered_password:
#!/usr/bin/python
#print("Set-Cookie:UserID = %s;\r\n" % username)
#print("Set-Cookie:Password = %s;\r\n" % password)
#print('Set-Cookie:Expires = Tuesday, 31-Dec-2021 23:12:40 GMT;\r\n')
#print('Set-Cookie:Domain = localhost;\r\n')
#print('Set-Cookie:Path = /perl;\r\n')
#print('Content-type: text/html\r\n\r\n')
#print(secret_page(username, password))
#for query in os.environ.keys():
#if query == 'HTTP_COOKIE':
#print("<b>%20s</b>: %s<br>" % (query, os.environ[query]))
#print(os.environ)
#return True
|
from .iterators import Iterable, Iterator, It
|
import re
# Example plugin for EditorFunctions type plugins
#
# The plugin allows to install new menu items and toolbar items and register a
# a function with each that is called. The function must accept one argument which
# is the instance of PersonalWikiFrame providing access to the editor and the data store.
#
# To register a menu item implement the function describeMenuItem to return a
# sequence of tuples at least containing the callback function, the item string
# and an item tooltip (see below for details).
#
# To register a toolbar item implement the function describeToolbarItem to return
# a tuple at least containing the callback function, item label, tooltip and icon.
#
# both register functions must accept one argument which is again the
# PersonalWikiFrame instance
# descriptor for EditorFunctions plugin type
WIKIDPAD_PLUGIN = (("MenuFunctions",1),)
def describeMenuItems(wiki):
"""
wiki -- Calling PersonalWikiFrame
Returns a sequence of tuples to describe the menu items, where each must
contain (in this order):
- callback function
- menu item string
- menu item description (string to show in status bar)
It can contain the following additional items (in this order), each of
them can be replaced by None:
- icon descriptor (see below, if no icon found, it won't show one)
- menu item id.
- update function
- kind of menu item (wx.ITEM_NORMAL, wx.ITEM_CHECK)
The callback function must take 2 parameters:
wiki - Calling PersonalWikiFrame
evt - wx.CommandEvent
If the menu item string contains one or more vertical bars '|' these
are taken as delimiters to describe a "path" of submenus where
the item should be placed. E.g. the item string
"Admin|Maintenance|Reset Settings" will create in plugins menu
a submenu "Admin" containing a submenu "Maintenance" containing
the item "Reset Settings".
An icon descriptor can be one of the following:
- a wx.Bitmap object
- the filename of a bitmap (if file not found, no icon is used)
- a tuple of filenames, first existing file is used
"""
kb = wiki.getKeyBindings()
return ((autoNewNumbered, _(u"Create new page") + u"\t" +
kb.Plugin_AutoNew_Numbered, _(u"Create new page")),)
_testRE = re.compile(ur"^New[0-9]{6}$")
def autoNewNumbered(wiki, evt):
wiki.saveAllDocPages()
candidates = wiki.getWikiData().getWikiPageLinkTermsStartingWith(u"New")
candidates = filter(lambda w: _testRE.match(w), candidates)
numbers = map(lambda w: int(w[3:]), candidates)
if len(numbers) == 0:
nextNumber = 1
else:
nextNumber = max(numbers) + 1
wiki.openWikiPage(u"New%06i" % nextNumber)
dpp = wiki.getCurrentDocPagePresenter()
if dpp is None:
return
dpp.switchSubControl("textedit", True)
dpp.SetFocus()
|
import urllib
from pymd5 import md5, padding
query = open('3.3_query.txt').read()
command3 = open('3.3_command3.txt').read()
print 'Query : ', query
token = query.split('=')[1].split('&')[0]
print 'Token : ', token
parameters = query.split('&')
print 'Parameters : ', parameters
message = parameters[1] + '&' + parameters[2] + '&' + parameters[3]
print 'Message (without 8-char password) : ', message
length = len(message) + 8 # message + 8-character password (1 char = 1 Byte)
print 'Length of Message (with 8-char password) : ', (length * 8)
print 'Length of Padding : ', (len(padding(length * 8)) * 8)
bits = (length + len(padding(length * 8))) * 8
print 'Total # of Bits : ', bits
h = md5(state = token.decode('hex'), count = bits)
h.update(command3)
modified_token = h.hexdigest()
print 'Modified Token : ', modified_token
updated_query = query.split('=')[0] + '=' + modified_token + '&' + message + urllib.quote(padding(length * 8)) + command3
print 'Updated Query : ', updated_query
output_file = open('solution33.txt', 'w')
output_file.write(updated_query)
output_file.close()
print '\nVerification Step : '
password = input('Insert 8-character Password : ')
print 'Valid User Pass : ', password
message = str(password) + message
print 'Message (with 8-char Pass) : ', message
verification_query = message + padding(len(message) * 8) + command3
print 'Verification Query : ', verification_query
verification_token = md5(verification_query).hexdigest()
print 'Verification Token : ', verification_token
print 'Verification : ', token == verification_token |
import logging
import sys
from io import BytesIO
from pyfbx import FBXSerializationException
from pyfbx.serializers import BytesSerializer
logger = logging.getLogger("tests")
def test_serialize_deserialize_sequence():
bytes_serializer = BytesSerializer()
test_bytes = "bytes".encode("utf-8")
serialized = bytes_serializer.serialize(None, test_bytes)
deserialized = bytes_serializer.deserialize(None, str, BytesIO(serialized))
assert test_bytes == deserialized
def test_empty_bytes():
bytes_serializer = BytesSerializer()
serialized = bytes_serializer.serialize(None, b'')
deserialized = bytes_serializer.deserialize(None, str, BytesIO(serialized))
assert deserialized == b''
def test_deserialize_empty_bytes():
bytes_serializer = BytesSerializer()
with pytest.raises(FBXSerializationException):
bytes_serializer.deserialize(None, str, BytesIO(b''))
|
from . import base
__all__ = ["Jaccard", "SorensenDice"]
class Jaccard(base.MultiOutputClassificationMetric):
"""Jaccard index for binary multi-outputs.
The Jaccard index, or Jaccard similarity coefficient, defined as the size of the intersection
divided by the size of the union of two label sets, is used to compare the set of predicted
labels for a sample with the corresponding set of labels in `y_true`.
The Jaccard index may be a poor metric if there are no positives for some samples or labels.
The Jaccard index is undefined if there are no true or predicted labels, this implementation
will return a score of 0.0 if this is the case.
Parameters
----------
cm
This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a
confusion matrix reduces the amount of storage and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = [
... {0: False, 1: True, 2: True},
... {0: True, 1: True, 2: False},
... ]
>>> y_pred = [
... {0: True, 1: True, 2: True},
... {0: True, 1: False, 2: False},
... ]
>>> jac = metrics.Jaccard()
>>> for yt, yp in zip(y_true, y_pred):
... jac = jac.update(yt, yp)
>>> jac
Jaccard: 0.583333
References
----------
[^1]: [Wikipedia section on similarity of asymmetric binary attributes](https://www.wikiwand.com/en/Jaccard_index#/Similarity_of_asymmetric_binary_attributes)
"""
@property
def bigger_is_better(self):
return True
@property
def requires_labels(self):
return True
def get(self):
try:
return self.cm.jaccard_sum / self.cm.n_samples
except ZeroDivisionError:
return 0.0
class SorensenDice(Jaccard):
r"""Sørensen-Dice coefficient.
Sørensen-Dice coefficient [^1] (or Sørensen Index, Dice's coefficient) is a statistic used to gauge
the similarity of two samples. Sørensen's original formula was intended to be applied to discrete
data. Given two sets, $X$ and $Y$, it is defined as:
$$
DSC = \frac{2 |X \cap Y|}{|X| + |Y|}.
$$
It is equal to twice the number of elements common to both sets divided by the sum of the number of
elements in each set.
The coefficient is not very different in form from the Jaccard index. The only difference between the
two metrics is that the Jaccard index only counts true positives once in both the numerator and
denominator. In fact, both are equivalent in the sense that given a value for the Sorensen-Dice index,
once can canculate the respective Jaccard value and vice versa, using the equations
$$
\begin{equation}
J = \frac{S}{2-S}, \\ S = \frac{2J}{1+J}.
\end{equation}
$$
Parameters
----------
cm
This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a
confusion matrix reduces the amount of storage and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = [
... {0: False, 1: True, 2: True},
... {0: True, 1: True, 2: False},
... ]
>>> y_pred = [
... {0: True, 1: True, 2: True},
... {0: True, 1: False, 2: False},
... ]
>>> sorensen_dice = metrics.SorensenDice()
>>> for yt, yp in zip(y_true, y_pred):
... sorensen_dice = sorensen_dice.update(yt, yp)
>>> sorensen_dice
SorensenDice: 0.736842
References
----------
[^1]: [Wikipedia article on Sørensen-Dice coefficient](https://en.wikipedia.org/wiki/Sørensen-Dice_coefficient)
"""
def get(self):
j = super().get()
return 2 * j / (1 + j)
|
from django.conf.urls import url
from . import views
app_name = 'words'
urlpatterns = [
url(r'^$', views.AbbreviationListView.as_view(),
name='abbreviation_browse'),
url(r'^detail/(?P<pk>[0-9]+)$', views.AbbreviationDetailView.as_view(),
name='abbreviation_detail'),
url(r'^create/$', views.AbbreviationCreate.as_view(),
name='abbreviation_create'),
url(r'^edit/(?P<pk>[0-9]+)$', views.AbbreviationUpdate.as_view(),
name='abbreviation_edit'),
url(r'^delete/(?P<pk>[0-9]+)$', views.AbbreviationDelete.as_view(),
name='abbreviation_delete'),
url(r'^delete/(?P<pk>[0-9]+)$', views.AbbreviationDelete.as_view(),
name='abbreviation_delete'),
url(r'download-csv/$', views.AbbreviationDownloadView.as_view(), name='dl_csv_link'),
]
|
#encoding: utf-8
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from xml.etree import cElementTree as ET
from bslib.xml import Request, Collection, Result, ResultList, StatusElement, Comments
import os.path
def _a(path):
return os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
path)
#TBD: this is uterly incomplete!!!
def test_request_from_apidocs():
rq = Request.fromxml(ET.parse(_a("request.xml")).getroot())
assert rq.reqid == 12
assert rq.title == "Kraft"
assert rq.description[:21] == "Kraft is KDE software"
assert rq.accept_at == "2009-12-22T23:00:00"
assert rq.state.name == "superseded"
assert len(rq.actions) == 10
assert len(rq.reviews) == 2
assert len(rq.history) == 2
def test_collection_from_apidocs():
cl = Collection.fromxml(ET.parse(_a("collection.xml")).getroot())
assert len(cl) == 2
assert cl[0] != cl[1]
with pytest.raises(IndexError):
cl[42]
for el in cl:
assert isinstance(el, Request)
def test_resultlist_from_apidocs():
rl = ResultList.fromxml(ET.parse(_a("pkgresult.xml")).getroot())
assert len(rl) == 18
assert isinstance(rl[0], Result)
res = rl[0]
assert res.project == "Base:System"
assert len(res.statuslist) == 1
st = res.statuslist[0]
assert isinstance(st, StatusElement)
assert st.package == "gpg2"
def test_mandatory_attrs_variables():
rq = Request.fromxml(ET.parse(_a("request.xml")).getroot())
h = rq.history[0]
assert hasattr(h, "_mandatory_attrs")
assert hasattr(h, "_attrs")
assert h._mandatory_attrs == ("name", )
assert h._attrs == ("who", "when")
def test_comments():
comments = Comments.fromxml(ET.parse(_a("comments.xml")).getroot())
assert comments.comment_for == ("request", "221303")
assert len(comments.comments) == 3
c = comments.comments
assert c[1]._asdict() == {
"who" : "coolo",
"when" : "2014-02-11 09:52:47 UTC",
"id" : "1167",
"parent" : "1166",
"text" : "Conflicts: yast2-installation < THEONE"}
|
import os
import click
from .config_json_parser import ClpipeConfigParser
from .batch_manager import BatchManager, Job
import logging
@click.command()
@click.option('-config_file', required = False ,type=click.Path(exists=False, dir_okay=False, file_okay=True), default=None,
help='A config file. Optional if you have a batch_config specified')
@click.option('-batch_config', type=click.Path(exists=False, dir_okay=False, file_okay=True),
help='A batch config file. Optional if a batch_config is provided in the supplied config file.')
@click.option('-log_dir', type=click.Path(exists=False, dir_okay=True, file_okay=False),
help='Where to put the test output. Defaults to current working directory', default = os.getcwd())
@click.option('-submit', is_flag=True, default=False, help='Flag to submit commands to the HPC')
def test_batch_setup(config_file = None, batch_config = None, log_dir = None, submit = None):
config = ClpipeConfigParser()
config.config_updater(config_file)
if batch_config is not None:
config.config['BatchConfig'] = batch_config
batch_manager = BatchManager(config.config['BatchConfig'], os.path.abspath(log_dir))
batch_manager.update_email(config.config["EmailAddress"])
os.makedirs(os.path.abspath(log_dir), exist_ok=True)
submission_string = 'python3 -c \\\"print(\\\\\\\"Hello Cluster\\\\\\\")\\\"'
test_IDs = ["Test-" + str(i) for i in range(10)]
for ID in test_IDs:
batch_manager.addjob(Job(ID, submission_string))
batch_manager.compilejobstrings()
if submit:
batch_manager.submit_jobs()
else:
batch_manager.print_jobs()
|
from win32com.shell import shell, shellcon
import os
import glob
import operator
import re
import time
import win32api
import win32con
import win32process
import pythoncom
import logging
unlearn_open_undo = []
my_documents_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0)
LEARN_AS_DIR = os.path.join(my_documents_dir, u"Enso's Learn As Open Commands")
# Check if Learn-as dir exist and create it if not
if (not os.path.isdir(LEARN_AS_DIR)):
os.makedirs(LEARN_AS_DIR)
SHORTCUT_TYPE_EXECUTABLE = 'x'
SHORTCUT_TYPE_FOLDER = 'f'
SHORTCUT_TYPE_URL = 'u'
SHORTCUT_TYPE_DOCUMENT = 'd'
SHORTCUT_TYPE_CONTROL_PANEL = 'c'
def _cpl_exists(cpl_name):
return (
os.path.isfile(
os.path.expandvars("${WINDIR}\\%s.cpl") % cpl_name)
or os.path.isfile(
os.path.expandvars("${WINDIR}\\system32\\%s.cpl") % cpl_name)
)
control_panel_applets = [i[:3] for i in (
(SHORTCUT_TYPE_CONTROL_PANEL,
u"control panel",
"rundll32.exe shell32.dll,Control_RunDLL"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"accessibility options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL access.cpl"),
#accessibility options (Keyboard):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,1
#accessibility options (Sound):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,2
#accessibility options (Display):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,3
#accessibility options (Mouse):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,4
#accessibility options (General):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,5
(SHORTCUT_TYPE_CONTROL_PANEL,
u"add or remove programs (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl"),
#add or remove programs (Install/Uninstall):
# rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl,,1
#add or remove programs (Windows Setup):
# rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl,,2
#add or remove programs (Startup Disk):
# rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl,,3
(SHORTCUT_TYPE_CONTROL_PANEL,
u"display properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL desk.cpl"),
#Display Properties (Background):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,0
#Display Properties (Screen Saver):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,1
#Display Properties (Appearance):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,2
#Display Properties (Settings):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,3
(SHORTCUT_TYPE_CONTROL_PANEL,
u"regional and language options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL intl.cpl"),
#Regional Settings Properties (Regional Settings):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,0
#Regional Settings Properties (Number):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,1
#Regional Settings Properties (Currency):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,2
#Regional Settings Properties (Time):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,3
#Regional Settings Properties (Date):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,4
(SHORTCUT_TYPE_CONTROL_PANEL,
u"game controllers (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL joy.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"mouse properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL main.cpl @0"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"keyboard properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL main.cpl @1"),
# DOES NOT WORK
#Printers:
# rundll32.exe shell32.dll,Control_RunDLL main.cpl @2
# DOES NOT WORK
#Fonts:
# rundll32.exe shell32.dll,Control_RunDLL main.cpl @3
(SHORTCUT_TYPE_CONTROL_PANEL,
u"microsoft exchange profiles (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL mlcfg32.cpl",
_cpl_exists("mlcfg32")),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"sounds and audio devices (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl"),
#Multimedia Properties (Audio):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,0
#Multimedia Properties (Video):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,1
#Multimedia Properties (MIDI):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,2
#Multimedia Properties (CD Music):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,3
#Multimedia Properties (Advanced):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,4
(SHORTCUT_TYPE_CONTROL_PANEL,
u"modem properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL modem.cpl",
_cpl_exists("modem")),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"network connections (control panel)",
"RUNDLL32.exe SHELL32.DLL,Control_RunDLL NCPA.CPL"),
#Password Properties (Change Passwords):
# rundll32.exe shell32.dll,Control_RunDLL password.cpl
(SHORTCUT_TYPE_CONTROL_PANEL,
u"system properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,0"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"device manager (control panel)",
#"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,1"
"devmgmt.msc"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"disk management (control panel)",
"diskmgmt.msc"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"scanners and cameras (control panel)",
"control.exe sticpl.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"removable storage (control panel)",
"ntmsmgr.msc"),
#dfrg.msc Disk defrag
#eventvwr.msc Event viewer
#eventvwr.exe \\computername View the Event Log at a remote computer
#fsmgmt.msc Shared folders
#gpedit.msc Group policies
#lusrmgr.msc Local users and groups
#perfmon.msc Performance monitor
#rsop.msc Resultant set of policies
#secpol.msc Local security settings
#services.msc Various Services
(SHORTCUT_TYPE_CONTROL_PANEL,
u"hardware profiles (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,2"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"advanced system properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,3"),
#Add New Hardware Wizard:
# rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl @1
(SHORTCUT_TYPE_CONTROL_PANEL,
u"date and time (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL timedate.cpl"),
#Microsoft Workgroup Postoffice Admin:
# rundll32.exe shell32.dll,Control_RunDLL wgpocpl.cpl
#Open With (File Associations):
# rundll32.exe shell32.dll,OpenAs_RunDLL d:\path\filename.ext
#Run Diskcopy Dialog:
# rundll32 diskcopy.dll,DiskCopyRunDll
#Create New Shortcut Wizard:
# 'puts the new shortcut in the location specified by %1
# rundll32.exe AppWiz.Cpl,NewLinkHere %1
(SHORTCUT_TYPE_CONTROL_PANEL,
u"add new hardware wizard (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL hdwwiz.cpl @1"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"add printer wizard (control panel)",
"rundll32.exe shell32.dll,SHHelpShortcuts_RunDLL AddPrinter"),
#(SHORTCUT_TYPE_CONTROL_PANEL,
# u"dialup networking wizard (cp)",
# "rundll32.exe rnaui.dll,RnaWizard"),
#Open a Scrap Document:
# rundll32.exe shscrap.dll,OpenScrap_RunDLL /r /x %1
#Create a Briefcase:
# rundll32.exe syncui.dll,Briefcase_Create
(SHORTCUT_TYPE_CONTROL_PANEL,
u"printers and faxes (control panel)",
"rundll32.exe shell32.dll,SHHelpShortcuts_RunDLL PrintersFolder"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"fonts (control panel)",
"rundll32.exe shell32.dll,SHHelpShortcuts_RunDLL FontsFolder"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"windows firewall (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL firewall.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"speech properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL \"${COMMONPROGRAMFILES}\\Microsoft Shared\\Speech\\sapi.cpl\"",
os.path.isfile(os.path.expandvars("${COMMONPROGRAMFILES}\\Microsoft Shared\\Speech\\sapi.cpl"))),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"internet options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL inetcpl.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"odbc data source administrator (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL odbccp32.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"power options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL powercfg.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"bluetooth properties (control panel)",
"control.exe bhtprops.cpl",
_cpl_exists("bhtprops")),
#Pick a Time Zone Dialog:
# rundll32.exe shell32.dll,Control_RunDLL timedate.cpl,,/f
) if len(i) < 4 or i[3]]
#print control_panel_applets
class _PyShortcut():
def __init__( self, base ):
self._base = base
self._base_loaded = False
self._shortcut_type = None
def load( self, filename = None):
if filename:
self._filename = filename
try:
self._base.QueryInterface( pythoncom.IID_IPersistFile ).Load( self._filename )
except:
logging.error("Error loading shell-link for file %s" % self._filename)
self._base_loaded = True
def save( self, filename = None):
if filename:
self._filename = filename
self._base.QueryInterface( pythoncom.IID_IPersistFile ).Save( self._filename, 0 )
def get_filename(self):
return self._filename
def get_type(self):
if not self._base_loaded:
raise Exception("Shortcut data has not been loaded yet. Use load(filename) before using get_type()")
name, ext = os.path.splitext(self._filename)
if ext.lower() == '.lnk':
file_path = self._base.GetPath(0)
if file_path and file_path[0]:
if os.path.isdir(file_path[0]):
self._shortcut_type = SHORTCUT_TYPE_FOLDER
elif (os.path.splitext(file_path[0])[1].lower()
in ('.exe', '.com', '.cmd', '.bat')):
self._shortcut_type = SHORTCUT_TYPE_EXECUTABLE
else:
self._shortcut_type = SHORTCUT_TYPE_DOCUMENT
else:
self._shortcut_type = SHORTCUT_TYPE_DOCUMENT
elif ext.lower() == '.url':
self._shortcut_type = SHORTCUT_TYPE_URL
else:
self._shortcut_type = SHORTCUT_TYPE_DOCUMENT
return self._shortcut_type
def __getattr__( self, name ):
if name != "_base":
return getattr( self._base, name )
class PyShellLink(_PyShortcut):
def __init__( self ):
base = pythoncom.CoCreateInstance(
shell.CLSID_ShellLink,
None,
pythoncom.CLSCTX_INPROC_SERVER,
shell.IID_IShellLink
)
_PyShortcut.__init__(self, base)
class PyInternetShortcut(_PyShortcut):
def __init__( self ):
base = pythoncom.CoCreateInstance(
shell.CLSID_InternetShortcut,
None,
pythoncom.CLSCTX_INPROC_SERVER,
shell.IID_IUniformResourceLocator
)
_PyShortcut.__init__(self, base)
def expand_path_variables(file_path):
import re
re_env = re.compile(r'%\w+%')
def expander(mo):
return os.environ.get(mo.group()[1:-1], 'UNKNOWN')
return os.path.expandvars(re_env.sub(expander, file_path))
def displayMessage(msg):
import enso.messages
enso.messages.displayMessage("<p>%s</p>" % msg)
ignored = re.compile("(uninstall|read ?me|faq|f.a.q|help)", re.IGNORECASE)
"""
def get_control_panel_applets():
import _winreg as reg
reghandle = None
cpl_applets = []
try:
regkey = None
try:
reghandle = reg.ConnectRegistry(None, reg.HKEY_LOCAL_MACHINE)
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Control Panel\\Cpls"
regkey = reg.OpenKey(reghandle, key)
index = 0
try:
while True:
regval = reg.EnumValue(regkey, index)
cpl_applets.append((
SHORTCUT_TYPE_CONTROL_PANEL,
regval[0].lower().replace("/"," ") + " (control panel)",
regval[1]))
index += 1
except Exception, e:
pass
except Exception, e:
print e
finally:
if regkey:
reg.CloseKey(regkey)
regkey = None
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ControlPanel\\Namespace"
regkey = reg.OpenKey(reghandle, key)
index = 0
try:
while True:
cplkey = reg.EnumKey(regkey, index)
regkey1 = None
try:
regkey1 = reg.OpenKey(reghandle, key + "\\" + cplkey)
cpl_applets.append((
SHORTCUT_TYPE_CONTROL_PANEL,
reg.QueryValueEx(regkey1, "Name")[0].lower().replace("/"," ") + " (control panel)",
reg.QueryValueEx(regkey1, "Module")[0]))
except:
pass
finally:
if regkey1:
reg.CloseKey(regkey1)
index += 1
except Exception, e:
pass
except Exception, e:
print e
finally:
if regkey:
reg.CloseKey(regkey)
finally:
if reghandle:
reg.CloseKey(reghandle)
return cpl_applets
print get_control_panel_applets()
"""
def get_shortcuts(directory):
shortcuts = []
sl = PyShellLink()
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
if ignored.search(filename):
continue
name, ext = os.path.splitext(filename)
if not ext.lower() in (".lnk", ".url"):
continue
#print name, ext
shortcut_type = SHORTCUT_TYPE_DOCUMENT
if ext.lower() == ".lnk":
sl.load(os.path.join(dirpath, filename))
shortcut_type = sl.get_type()
elif ext.lower() == ".url":
shortcut_type = SHORTCUT_TYPE_URL
shortcuts.append((shortcut_type, name.lower(), os.path.join(dirpath, filename)))
return shortcuts
def reload_shortcuts_map():
desktop_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_DESKTOPDIRECTORY, 0, 0)
quick_launch_dir = os.path.join(
shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0),
"Microsoft",
"Internet Explorer",
"Quick Launch")
start_menu_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_STARTMENU, 0, 0)
common_start_menu_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_STARTMENU, 0, 0)
#control_panel = shell.SHGetFolderPath(0, shellcon.CSIDL_CONTROLS, 0, 0)
shortcuts = get_shortcuts(LEARN_AS_DIR) + \
get_shortcuts(desktop_dir) + \
get_shortcuts(quick_launch_dir) + \
get_shortcuts(start_menu_dir) + \
get_shortcuts(common_start_menu_dir) + \
control_panel_applets
return dict((s[1], s) for s in shortcuts)
shortcuts_map = reload_shortcuts_map()
def cmd_open(ensoapi, target):
""" Continue typing to open an application or document """
displayMessage(u"Opening <command>%s</command>..." % target)
try:
global shortcuts_map
shortcut_type, shortuct_id, file_path = shortcuts_map[target]
file_path = os.path.normpath(expand_path_variables(file_path))
logging.info("Executing '%s'" % file_path)
if shortcut_type == SHORTCUT_TYPE_CONTROL_PANEL:
if " " in file_path:
executable = file_path[0:file_path.index(' ')]
params = file_path[file_path.index(' ')+1:]
else:
executable = file_path
params = None
try:
rcode = win32api.ShellExecute(
0,
'open',
executable,
params,
None,
win32con.SW_SHOWDEFAULT)
except Exception, e:
logging.error(e)
else:
os.startfile(file_path)
return True
except Exception, e:
logging.error(e)
return False
cmd_open.valid_args = [s[1] for s in shortcuts_map.values()]
def cmd_open_with(ensoapi, application):
""" Opens your currently selected file(s) or folder with the specified application """
seldict = ensoapi.get_selection()
if seldict.get('files'):
file = seldict['files'][0]
elif seldict.get('text'):
file = seldict['text'].strip()
else:
file = None
if not (file and (os.path.isfile(file) or os.path.isdir(file))):
ensoapi.display_message(u"No file or folder is selected")
return
displayMessage(u"Opening <command>%s</command>..." % application)
#print file, application
global shortcuts_map
try:
print shortcuts_map[application][2]
print shortcuts_map[application]
executable = expand_path_variables(shortcuts_map[application][2])
except:
print application
print shortcuts_map.keys()
print shortcuts_map.values()
try:
rcode = win32api.ShellExecute(
0,
'open',
executable,
'"%s"' % file,
os.path.dirname(file),
win32con.SW_SHOWDEFAULT)
except Exception, e:
logging.error(e)
cmd_open_with.valid_args = [s[1] for s in shortcuts_map.values() if s[0] == SHORTCUT_TYPE_EXECUTABLE]
def is_url(text):
urlfinders = [
re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?/[-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]*[^]'\\.}>\\),\\\"]"),
re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?"),
re.compile("(~/|/|\\./)([-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]|\\\\)+"),
re.compile("'\\<((mailto:)|)[-A-Za-z0-9\\.]+@[-A-Za-z0-9\\.]+"),
]
for urltest in urlfinders:
if urltest.search(text, re.I):
return True
return False
def cmd_learn_as_open(ensoapi, name):
""" Learn to open a document or application as {name} """
if name is None:
displayMessage(u"You must provide name")
return
seldict = ensoapi.get_selection()
if seldict.get('files'):
file = seldict['files'][0]
elif seldict.get('text'):
file = seldict['text'].strip()
else:
ensoapi.display_message(u"No file is selected")
return
if not os.path.isfile(file) and not os.path.isdir(file) and not is_url(file):
displayMessage(
u"Selection represents no existing file, folder or URL.")
return
file_name = name.replace(":", "").replace("?", "").replace("\\", "")
file_path = os.path.join(LEARN_AS_DIR, file_name)
if os.path.isfile(file_path + ".url") or os.path.isfile(file_path + ".lnk"):
displayMessage(
u"<command>open %s</command> already exists. Please choose another name."
% name)
return
if is_url(file):
shortcut = PyInternetShortcut()
shortcut.SetURL(file)
shortcut.QueryInterface( pythoncom.IID_IPersistFile ).Save(
file_path + ".url", 0 )
else:
shortcut = PyShellLink()
shortcut.SetPath(file)
shortcut.SetWorkingDirectory(os.path.dirname(file))
shortcut.SetIconLocation(file, 0)
shortcut.QueryInterface( pythoncom.IID_IPersistFile ).Save(
file_path + ".lnk", 0 )
#time.sleep(0.5)
global shortcuts_map
shortcuts_map = reload_shortcuts_map()
cmd_open.valid_args = [s[1] for s in shortcuts_map.values()]
cmd_open_with.valid_args = [s[1] for s in shortcuts_map.values() if s[0] == SHORTCUT_TYPE_EXECUTABLE]
cmd_unlearn_open.valid_args = [s[1] for s in shortcuts_map.values()]
displayMessage(u"<command>open %s</command> is now a command" % name)
def cmd_unlearn_open(ensoapi, name):
u""" Unlearn \u201copen {name}\u201d command """
file_path = os.path.join(LEARN_AS_DIR, name)
if os.path.isfile(file_path + ".lnk"):
sl = PyShellLink()
sl.load(file_path + ".lnk")
unlearn_open_undo.append([name, sl])
os.remove(file_path + ".lnk")
elif os.path.isfile(file_path + ".url"):
sl = PyInternetShortcut()
sl.load(file_path + ".url")
unlearn_open_undo.append([name, sl])
os.remove(file_path + ".url")
global shortcuts_map
shortcuts_map = reload_shortcuts_map()
cmd_open.valid_args = [s[1] for s in shortcuts_map.values()]
cmd_open_with.valid_args = [s[1] for s in shortcuts_map.values() if s[0] == SHORTCUT_TYPE_EXECUTABLE]
cmd_unlearn_open.valid_args = [s[1] for s in shortcuts_map.values()]
displayMessage(u"Unlearned <command>open %s</command>" % name)
cmd_unlearn_open.valid_args = [s[1] for s in shortcuts_map.values()]
def cmd_undo_unlearn(ensoapi):
u""" Undoes your last \u201cunlearn open\u201d command """
if len(unlearn_open_undo) > 0:
name, sl = unlearn_open_undo.pop()
sl.save()
displayMessage(u"Undo successful. <command>open %s</command> is now a command" % name)
else:
ensoapi.display_message(u"There is nothing to undo")
if __name__ == "__main__":
import doctest
doctest.testmod()
# vim:set ff=unix tabstop=4 shiftwidth=4 expandtab:
|
import socket
import re
import uuid
from struct import pack, unpack
from select import select
def poll(sock, timeout):
return sock in select([sock], [], [], timeout)[0]
class SendError(Exception):
pass
class ReceiveError(Exception):
pass
class Client(object):
re_aggregate_response = re.compile(r'AggregateResponse[^0-9-]+(-?[0-9]+)[^0-9-]+(-?[0-9]+)')
re_level_response = re.compile(r'LevelResponse[^0-9-]+([0-9]+|-)')
def __init__(self, host, port, sock=None):
if not sock:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
else:
self.sock = sock
def send_msg(self, msg):
n = self.sock.send(pack("<I", len(msg)))
if n < 4:
raise SendError
else:
self.sock.send(msg)
def recv_msg(self, re):
len_bytes = self.sock.recv(4)
if len_bytes == '':
raise ReceiveError
else:
len = unpack("<I", len_bytes)[0]
data = self.sock.recv(len)
if data == '':
raise ReceiveError
else:
return self.parse_response(data, re)
def send_local(self, local1, local2):
self.send_msg('Local' + ' ' + str(local1) + ' ' + str(local2))
def send_send_aggregate(self):
self.send_msg('SendAggregate')
def send_broadcast(self):
self.send_msg('Broadcast')
def send_aggregate_request(self):
self.send_msg('AggregateRequest')
return self.recv_msg(self.re_aggregate_response)
def send_level_request(self):
self.send_msg('LevelRequest')
return self.recv_msg(self.re_level_response)
def deserialize(self, data):
if data == '-':
return None
return data
def parse_response(self, data, re):
try:
match = re.match(data)
return [self.deserialize(match.group(n)) for n in (1,2)]
except Exception as e:
print "Parse error, data=%s" % data
raise e
|
class circularQueue(object):
def __init__(self, size):
self.__size = size
self.__queue = [None]*size
self.__head = 0
self.__tail = 0
def enqueue(self, item):
print("TAIL", self.__tail)
if not(self.__isBefore()):
self.__queue[self.__tail] = item
self.__updateTail()
else:
raise QueueError("Queue full")
def dequeue(self):
if not(self.__isEmpty()):
item = self.__queue[self.__head]
self.__updateHead()
return item
else:
raise QueueError("Empty Queue")
def __isBefore(self):
return( self.__head - self.__tail == 1 or (self.__head == 0 and self.__tail == self.__size-1))
def __updateTail(self):
self.__tail += 1
if self.__tail >= self.__size:
self.__tail = 0
def __updateHead(self):
self.__head += 1
if self.__head >= self.__size:
self.__head = 0
def __isEmpty(self):
return (self.__head == self.__tail)
def display(self):
p = self.__head
while p != self.__tail:
print(p, self.__queue[p])
p+= 1
if p >= self.__size:
p = 0
class QueueError(Exception):
def __init__(self, message):
self.__message = message
def toString(self):
return (self.__message)
|
from flask import Blueprint, jsonify, request, url_for
from shorty.services.base import Services
from shorty.services import error_handler
from shorty.services.error_handler import CustomError
api = Blueprint('api', __name__)
@api.route('/shortlinks', methods=['POST'])
def create_shortlink():
data = request.get_json()
if not data:
raise error_handler.data_not_found()
if 'url' not in data:
raise error_handler.url_not_found()
to_shorten = Services(data)
to_shorten.check_errors()
shortened_link = to_shorten.shortened_link()
return jsonify(shortened_link), 200
@api.errorhandler(CustomError)
def handle_bad_request(error):
return error.to_jsonified(), error.status_code
|
import gzip
try:
from Magphi.exit_with_error import exit_with_error
except ModuleNotFoundError:
from exit_with_error import exit_with_error
EXIT_INPUT_FILE_ERROR = 3
def check_if_gzip(input_files, file_logger):
"""
Check if genome files given as input at gzipped, by trying to open them with zgip's open() funciton
:param
input_files: A list of names of genomes given as input.
file_logger: A logger that will write to a logging file
:return: Boolean that tell if all input files are gzipped.
"""
file_logger.debug('Check if input genomes are in gzipped state')
# Make list with Bool for each input genome and evaluate all input genomes for Gzipped state,
# If genome genome is Gzipped change list index for genome to true in is_input_gzipepd.
is_input_gzipped = [False] * len(input_files)
for i, file in enumerate(input_files):
with gzip.open(file, 'r') as input_file:
try:
input_file.readline()
is_input_gzipped[i] = True
except OSError:
# except gzip.BadGzipFile:
is_input_gzipped[i] = False
# Check if all inputs are gzipped,
# if then return True for all inputs being gzipped.
# Else check if some are gzipped and return that input is mixed
# Lastly return False if no input is Gzipped.
if all(is_input_gzipped):
file_logger.debug('Files were found to be gzipped')
return True
elif 0 < sum(is_input_gzipped) < len(is_input_gzipped):
file_logger.exception('Check input files: Some input files found to be gzipped!\n'
'Please check that all of your input files are either Gzipped or not')
exit_with_error(message='Some input files found to be Gzipped while other were not!',
exit_status=EXIT_INPUT_FILE_ERROR)
else:
file_logger.debug('Files were not found to be gzipped')
return False
def check_if_fasta(input_files, file_logger, is_input_gzipped):
"""
Function to check if input files are Fasta format identified by > in first line
returns 'fasta' if all files are fasta, exits and logs error if only some are fasta indicating mixed input
:param input_files: List of input genomes by file name
:param file_logger: Logger that outputs files to log
:return: variable: Either 'fasta' if all input genomes are fasta format or None if not
"""
file_logger.debug('Check if input genomes are fasta format')
# Construct list to hold if files are fasta
# Search all files for fasta signature > in first line.
is_input_fasta = [False]*len(input_files)
for i, file in enumerate(input_files):
if is_input_gzipped:
in_file = gzip.open(file, 'rt')
else:
in_file = open(file, 'r')
# Test first line
first_line = in_file.readline()
if '>' in first_line:
if len(first_line.strip()) > 1:
is_input_fasta[i] = True
else:
in_file.close()
exit_with_error(
f'Fasta file contains no sequence header. This is not allowed please have a look at file: {file}',
EXIT_INPUT_FILE_ERROR)
# Check for new line in remaining lines
new_line_in_middle = False
for line in in_file.readlines():
if new_line_in_middle:
in_file.close()
exit_with_error(f'Fasta file contains new line in middle of file. This is not allowed please have a look at file: {file}',
EXIT_INPUT_FILE_ERROR)
# Check for empty line
if not line.strip():
new_line_in_middle = True
in_file.close()
# Check if all input genomes are fasta, if then return 'fasta'
# else check if only some input genomes are fasta - if: Give error
# Else return None
if all(is_input_fasta):
file_logger.debug('Files were found to be Fasta files')
return 'fasta'
elif any(is_input_fasta):
file_logger.error('Input files were found to be of mixed type with some being recognised as fasta!\n'
'Please make sure that all your input files are only either Fasta or GFF3, not mixed')
exit_with_error(message='Input files found to be of mixed type! Only some files were recognised as fasta!',
exit_status=EXIT_INPUT_FILE_ERROR)
else:
file_logger.debug('Files were not found to be Fasta files')
return None
def check_if_gff(input_files, file_logger, is_input_gzipped):
"""
Function to check in input genomes are given in a gff3 format with the appended genome.
:param
input_files: List of input genomes by file name
file_logger: Logger that outputs files to log
:return: Either 'gf' if all input genomes are fasta format or None if not
"""
''' Function to check if input files are GFF3 format with an attached genomes identified by the ##FASTA line '''
file_logger.debug('Check input files: Check if GFF3')
is_input_gff = [False] * len(input_files)
for i, file in enumerate(input_files):
if is_input_gzipped:
in_file = gzip.open(file, 'rt')
else:
in_file = open(file, 'r')
if '##gff-version 3' in in_file.readline():
for line in in_file.readlines():
if '##FASTA' in line:
is_input_gff[i] = True
# Check that a genome has been found in file
if is_input_gff[i] is False:
in_file.close()
print(f'UPS! {file} does seem to be a GFF3 version, but not one that contain the genome '
f'following a ##FASTA line - Please check the file')
file_logger.error('Check input files: Input is GFF but seems to miss ##FASTA line or entire genome')
exit_with_error(message='Input is GFF but seems to miss ##FASTA line or entire genome',
exit_status=EXIT_INPUT_FILE_ERROR)
in_file.close()
if all(is_input_gff):
file_logger.debug('Files were found to be GFF files')
return 'gff'
elif any(is_input_gff):
file_logger.error('Input files were found to be of mixed type with some being recognised as GFF!\n'
'Please make sure that all your input files are only either Fasta or GFF3, not mixed')
exit_with_error(message='Input files found to be of mixed type! Only some files were recognised as GFF!',
exit_status=EXIT_INPUT_FILE_ERROR)
else:
file_logger.debug('Files were not found to be GFF files')
return None
def check_inputs(input_files, file_logger):
"""
Function to run through different checks of input files to determine their state (gzipped) and type (Fasta or GFF)
:param input_files: List of input genomes by file name
:param file_logger: Logger that outputs files to log
:return: Variable for Type, and state of file found by checks.
"""
''' Function to check the input files. Will chack if input is GFF, Fasta or exit if filetype is not recognised.
Will exit program when unrecognised input file is given.'''
# Add logging step
file_logger.debug("Check input files: "
"Initiating verifying input files are either FASTA or GFF3 with genome attached."
" Also checking for gzipped state")
# Check if files are gzipped
is_input_gzipped = check_if_gzip(input_files, file_logger)
# check if input file is fasta
file_type = check_if_fasta(input_files, file_logger, is_input_gzipped)
# Check if input files are GFF3
if file_type is None:
file_type = check_if_gff(input_files, file_logger, is_input_gzipped)
# If file_type is still None, exit as input files are not recognized
if file_type is None:
file_logger.exception('The given input files could not be recognised as either Fasta or GFF3 with a genome.')
exit_with_error(message='The given input files could not be recognised as either Fasta or GFF3 with a genome',
exit_status=EXIT_INPUT_FILE_ERROR)
return file_type, is_input_gzipped
|
# Copyright 2019 VMware, Inc.
# SPDX-License-Indentifier: Apache-2.0
from .exception import TemplateEngineException
from .tags.tag_map import get_tag_map
class TagResolver(object):
"""
`TagResolver` resolves a template tag.
"""
# All template tag names start with TAG_MARKER
TAG_MARKER = '#'
# Any character from ":" to the end of the tag name string is ignored.
LABEL_SEPARATOR = ":"
def __init__(self, element_resolver, template_loader):
"""
Construct a TagResolver.
:param element_resolver: ElementResolver for resolving an element
by a tag.
:param template_loader: TemplateLoader for loading template by a tag.
"""
self._element_resolver = element_resolver
self._template_loader = template_loader
self._tag_map = get_tag_map(self)
@staticmethod
def is_key_tag(key):
return isinstance(key, str) and len(key) > 1 and \
key[0] == TagResolver.TAG_MARKER
@staticmethod
def is_tag(tag_data):
"""
Check whether a JSON element is a tag.
:param tag_data: JSON element to be checked.
:type tag_data: JSON object
:return: True if it is a tag.
:rtype: 'bool'
"""
return isinstance(tag_data, list) and len(tag_data) > 0 and \
isinstance(tag_data[0], str) and len(tag_data[0]) > 1 and \
tag_data[0][0] == TagResolver.TAG_MARKER
def resolve(self, tag_data, binding_data_list):
"""
Process a JSON element as a tag.
:param tag_data: Template tag to be processed.
:type tag_data: JSON element
:param binding_data_list: binding data list to be used
during the processing.
:type binding_data_list: 'list'
:return: Processed tag.
:rtype: JSON object
"""
tag_name = tag_data[0][1:]
# When a tag name is used in a dictionary as a key,
# an arbitrary label is allowed to be appended to the tag name
# in the format of ":label" to make the key unique.
tag_name = tag_name.partition(TagResolver.LABEL_SEPARATOR)[0]
if tag_name in self._tag_map:
tag = self._tag_map[tag_name]
tag_tokens = tag_data[1:]
return tag.process(tag_tokens, binding_data_list)
else:
raise TemplateEngineException("Unknown tag \"{}\".".format(tag_name))
def get_element_resolver(self):
"""
Return the element_resolver. Used by tags to get the element resolver.
:return: Element resolver.
:rtype: ElementResolver
"""
return self._element_resolver
def get_template_loader(self):
"""
Return the template loader. Used by tags to get the template loader.
:return: Template loader
:rtype: TemplateLoader
"""
return self._template_loader
|
import sys
from collections import defaultdict as d
import re
from optparse import OptionParser, OptionGroup
# Author: Martin Kapun
# edits by Siyuan Feng
######################################################### HELP #########################################################################
usage = """python3 %prog \
--mpileup data.mpileup \
--min-cov 10 \
--max-cov data.cov \
--min-count 10 \
--min-freq 0.01 \
--mis-frac 0.1 \
--base-quality-threshold 15 \
--names Kib32,Tam10 \
--coding 1.8 \
> output.vcf"""
parser = OptionParser(usage=usage)
helptext = """
H E L P :
_________
"""
group = OptionGroup(parser, helptext)
######################################################### parameters #########################################################################
parser.add_option("--mpileup", dest="m", help="A mpileup file")
parser.add_option("--min-cov", dest="minc",
help="The minimum coverage threshold: e.g. 10", default=10)
parser.add_option("--max-cov", dest="max",
help="An input file with precomputed coverage thresholds")
parser.add_option("--min-count", dest="mint",
help="The minimum number of counts of the alternative allele across all samples pooled", default=3)
parser.add_option("--min-freq", dest="minf",
help="The minimum Frequency of the alternative allele across all samples pooled", default=0.01)
parser.add_option("--miss-frac", dest="mis",
help="The minimum Frequency of the alternative allele across all samples pooled", default=0.1)
parser.add_option("--base-quality-threshold", dest="b",
help="The Base-quality threshold for Qualities encoded in Sanger format (Illumina 1.8 format)", default=15)
parser.add_option("--names", dest="n",
help="a comma separted list of thenames of all samples in the mpileup file")
parser.add_option("--coding", dest="c",
help="the Illumina FASTQ quality coding", default=1.8)
parser.add_option_group(group)
(options, args) = parser.parse_args()
################################### functions ######################################
def load_data(x):
''' import data either from a gzipped or or uncrompessed file or from STDIN'''
import gzip
if x == "-":
y = sys.stdin
elif x.endswith(".gz"):
y = gzip.open(x, "r")
else:
y = open(x, "r")
return y
def keywithmaxvalue(d):
''' This function resturns the key for the maximum value in a dictionary'''
newhash = d(list)
for k, v in d.items():
newhash[v].append(k)
return newhash[max(newhash.keys())]
def splitter(l, n):
''' This generator function returns equally sized cunks of an list'''
# credit: Meric Lieberman, 2012
i = 0
chunk = l[:n]
while chunk:
yield chunk
i += n
chunk = l[i:i + n]
def extract_indel(l, sign):
''' This function returns an Indel from a sequence string in a pileup'''
position = l.index(sign)
numb = ""
i = 0
while True:
if l[position + 1 + i].isdigit():
numb += l[position + 1 + i]
i += 1
else:
break
seqlength = int(numb)
sequence = l[position:position + i + 1 + seqlength]
indel = sequence.replace(numb, "")
return sequence, indel
################################## parameters ########################################
data = options.m
minimumcov = int(options.minc)
minimumcount = int(options.mint)
minimumfreq = float(options.minf)
missfrac = float(options.mis)
baseqthreshold = int(options.b)
phred = float(options.c)
############################ calculate PHRED cutoff #############################
# calculate correct PHRED score cutoff: ASCII-pc
if phred >= 1.0 and phred < 1.8:
pc = 64
else:
pc = 33
############################ get MAX coverage threshold #############################
maximumcov = d(list)
for l in open(options.max, "r"):
if l.startswith("#") or l.startswith("calculating"):
continue
k, v = l.split("\t")
maximumcov[k] = [int(x) for x in v.split(",")]
# print maximumcov
############################ parse MPILEUP ###########################################
# parse mpileup and store alternative alleles:
for line in load_data(data):
if len(line.split("\t")) < 2:
continue
k = line[:-1].split('\t')
CHR, POS, REF = k[:3]
# only keep chromosomal arms with maximum coverage threshold
if CHR not in maximumcov:
# print CHR
continue
div = list(splitter(k, 3))
libraries = div[1:]
# loop through libraries
totalalleles = d(int)
alleles = d(lambda: d(int))
for j in range(len(libraries)):
alleles[j]
nuc = libraries[j][1]
qualities = libraries[j][2]
# test if seq-string is empty
if nuc == "*":
continue
# find and remove read indices and mapping quality string
nuc = re.sub(r'\^.', r'', nuc)
nuc = nuc.replace('$', '')
# find and remove InDels
while "+" in nuc or "-" in nuc:
if "+" in nuc:
insertion, ins = extract_indel(nuc, "+")
nuc = nuc.replace(insertion, "")
else:
deletion, dele = extract_indel(nuc, "-")
nuc = nuc.replace(deletion, "")
# test for base quality threshold (if below: ignore nucleotide)
# print len(nuc),len(qualities)
nuc = "".join([nuc[x] for x in range(len(nuc)) if ord(
qualities[x]) - pc >= baseqthreshold])
nuc = "".join([nuc[x] for x in range(len(nuc)) if nuc[x] != "*"])
# read all alleles
for i in range(len(nuc)):
# ignore single nucleotide deletions
if nuc[i] == "*":
continue
# count nucleotides similar to reference base
if nuc[i] == "," or nuc[i] == ".":
totalalleles[REF] += 1
alleles[j][REF] += 1
continue
# count alternative nucleotides
totalalleles[nuc[i].upper()] += 1
alleles[j][nuc[i].upper()] += 1
# test if SNPs pass minimum count / minimum frequency threshold:
for allele, counts in totalalleles.copy().items():
if counts < minimumcount or counts / float(sum(totalalleles.values())) < minimumfreq:
del totalalleles[allele]
# test if site is polymorphic
if len(totalalleles) < 2:
# print CHR,POS,"non-poly",totalalleles
continue
# create output for VCF
ADP = sum(totalalleles.values()) / len(libraries)
ALT = []
# set alternative allele order:
for i in ["A", "T", "C", "G"]:
if i == REF:
continue
if i not in totalalleles:
continue
ALT.append(i)
# set ADP,NC,GT,AD and DP
ADP = sum(totalalleles.values()) / len(libraries)
samplelist = []
miss = 0
for j in range(len(libraries)):
# make empty entry if no allele counts for sample
if j not in alleles:
samplelist.append("./.:.:.:.:.")
miss += 1
continue
alleleh = alleles[j]
# remove alleles not counted in all samples
for k, v in alleleh.copy().items():
if k != REF and k not in ALT:
del alleleh[k]
GT, AD, RD, FREQ, NC = [], [], 0, [], 0
DP = sum(alleleh.values())
# test if mincoverage is still reached or if the maxcoverage is still exceeded when removing alleles that do not fullfill criteria; make empty entry if sample not fullfilling min/max coverage threshold
if DP < minimumcov or DP > maximumcov[CHR][j]:
samplelist.append("./.:.:.:.:.")
miss += 1
continue
# test if sample empty:
if "*" in alleleh or len(alleleh) == 0:
NC += 1
samplelist.append("./.:.:.:.:.")
miss += 1
continue
# test if population is fixed for REF allele
if len(alleleh) == 1 and REF in alleleh:
samplelist.append("0/0:" + str(DP) + ":0:" + str(DP) + ":0.0")
continue
# test if population is fixed for ALT allele
at = 0
if len(alleleh) == 1:
for i in range(len(ALT)):
if ALT[i] in alleleh:
samplelist.append(
str(i + 1) + "/" + str(i + 1) + ":0:" + str(DP) + ":" + str(DP) + ":1.0")
at = 1
continue
if at == 1:
continue
# proceed if population not fixed
# set REF counts
if REF in alleleh:
GT.append(0)
# set ALT counts
for i in range(len(ALT)):
if ALT[i] in alleleh:
GT.append(i + 1)
AD.append(alleleh[ALT[i]])
RD = DP - sum(AD)
FREQ.append(
round(alleleh[ALT[i]] / float(sum(alleleh.values())), 2))
samplelist.append("/".join([str(x) for x in GT]) + ":" + str(RD) + ":" + ",".join(
[str(x) for x in AD]) + ":" + str(DP) + ":" + ",".join([str(x) for x in FREQ]))
# test if missing fraction of samples smaller than threshold:
if miss / float(len(libraries)) > missfrac:
# print CHR,POS,"missing fraction",miss/float(len(libraries))
continue
# write output
print(CHR + "\t" + POS + "\t.\t" + REF + "\t" + ",".join(ALT) + "\t.\t.\tADP=" +
str(ADP) + ";NC=" + str(NC) + "\tGT:RD:AD:DP:FREQ\t" + "\t".join(samplelist))
|
import json
import os
from common.file_helpers import mkdirs_exists_ok
from selfdrive.hardware import PC
class MissingAuthConfigError(Exception):
pass
if PC:
CONFIG_DIR = os.path.expanduser('~/.comma')
else:
CONFIG_DIR = "/tmp/.comma"
mkdirs_exists_ok(CONFIG_DIR)
def get_token():
try:
with open(os.path.join(CONFIG_DIR, 'auth.json')) as f:
auth = json.load(f)
return auth['access_token']
except Exception:
return None
def set_token(token):
with open(os.path.join(CONFIG_DIR, 'auth.json'), 'w') as f:
json.dump({'access_token': token}, f)
def clear_token():
os.unlink(os.path.join(CONFIG_DIR, 'auth.json'))
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import math
import time
from argparse import Namespace
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
from unittest import mock
from unittest.mock import MagicMock
import pytest
import torch
from _pytest.capture import SysCapture
from _pytest.logging import LogCaptureFixture
from azureml._restclient.constants import RunStatus
from azureml.core import Run
from pytorch_lightning import Trainer
from health_azure import RUN_CONTEXT, create_aml_run_object
from health_ml.utils import AzureMLLogger, AzureMLProgressBar, log_learning_rate, log_on_epoch
from testhiml.utils_testhiml import DEFAULT_WORKSPACE
def create_unittest_run_object(snapshot_directory: Optional[Path] = None) -> Run:
return create_aml_run_object(experiment_name="himl-tests",
workspace=DEFAULT_WORKSPACE.workspace,
snapshot_directory=snapshot_directory or ".")
def test_log_on_epoch() -> None:
"""
Tests if the helper function to log metrics per epoch works.
"""
module = mock.MagicMock()
module.trainer = None
with pytest.raises(AssertionError) as ex1:
log_on_epoch(module, metrics={"foo": 1})
assert "No trainer is set" in str(ex1)
module.trainer = mock.MagicMock()
module.trainer.world_size = 1
with pytest.raises(ValueError) as ex2:
log_on_epoch(module, name="foo")
assert "'name' and 'value' must be provided" in str(ex2)
with pytest.raises(ValueError) as ex3:
log_on_epoch(module, value=1.0)
assert "'name' and 'value' must be provided" in str(ex3)
foo_value = 1
metrics = {"bar": torch.tensor(2.0)}
module.device = 'cpu'
module.log_dict = mock.MagicMock()
log_on_epoch(module, name="foo", value=foo_value, metrics=metrics)
# Test if all metrics that are not tensors are converted to floating point tensors
actual_args = module.log_dict.call_args
actual_metrics = actual_args[0][0]
for metric_name in ["foo", "bar"]:
assert metric_name in actual_metrics, f"Metric missing: {metric_name}"
assert torch.is_tensor(actual_metrics[metric_name]), f"Metric {metric_name}: not a tensor"
assert actual_metrics[metric_name].dtype == torch.float, f"Metric {metric_name}: should be float tensor"
assert actual_metrics["foo"].item() == float(foo_value)
# Default arguments for the call to module.log
assert actual_args[1] == {'on_epoch': True,
'on_step': False,
'reduce_fx': torch.mean,
'sync_dist': False,
'sync_dist_op': 'mean'}, "Failed for world_size==1"
# Test if sync_dist is computed correctly from world size: world size is now 2, so sync_dist should be True
module.trainer.world_size = 2
log_on_epoch(module, metrics=metrics)
assert module.log_dict.call_args[1] == {'on_epoch': True,
'on_step': False,
'reduce_fx': torch.mean,
'sync_dist': True,
'sync_dist_op': 'mean'}, "Failed for world_size==2"
# Test if overrides for sync_dist and the other aggregation args are passed correctly
module.trainer.world_size = 2
log_on_epoch(module, metrics=metrics, reduce_fx="reduce", sync_dist=False, sync_dist_op="nothing") # type: ignore
assert module.log_dict.call_args[1] == {'on_epoch': True,
'on_step': False,
'sync_dist': False,
'reduce_fx': "reduce",
'sync_dist_op': "nothing"}, "Failed for sync_dist==True"
module.trainer.world_size = 1
log_on_epoch(module, metrics=metrics, reduce_fx="reduce", sync_dist=True, sync_dist_op="nothing") # type: ignore
assert module.log_dict.call_args[1] == {'on_epoch': True,
'on_step': False,
'sync_dist': True,
'reduce_fx': "reduce",
'sync_dist_op': "nothing"}, "Failed for sync_dist==True"
def test_log_learning_rate_singleton() -> None:
"""
Test the method that logs learning rates, when there is a single LR scheduler.
"""
module = mock.MagicMock()
module.lr_schedulers = mock.MagicMock(return_value=None)
with pytest.raises(ValueError) as ex:
log_learning_rate(module)
assert "can only be used during training" in str(ex)
scheduler = mock.MagicMock()
lr = 1.234
scheduler.get_last_lr = mock.MagicMock(return_value=[lr])
module.lr_schedulers = mock.MagicMock(return_value=scheduler)
module.trainer = mock.MagicMock(world_size=1)
with mock.patch("health_ml.utils.logging.log_on_epoch") as mock_log_on_epoch:
log_learning_rate(module)
assert mock_log_on_epoch.call_args[0] == (module,)
assert mock_log_on_epoch.call_args[1] == {'metrics': {'learning_rate': lr}}
def test_log_learning_rate_multiple() -> None:
"""
Test the method that logs learning rates, when there are multiple schedulers with non-scalar return values.
"""
scheduler1 = mock.MagicMock()
lr1 = [1]
scheduler1.get_last_lr = mock.MagicMock(return_value=lr1)
scheduler2 = mock.MagicMock()
lr2 = [2, 3]
scheduler2.get_last_lr = mock.MagicMock(return_value=lr2)
module = mock.MagicMock()
module.lr_schedulers = mock.MagicMock(return_value=[scheduler1, scheduler2])
with mock.patch("health_ml.utils.logging.log_on_epoch") as mock_log_on_epoch:
log_learning_rate(module, name="foo")
assert mock_log_on_epoch.call_args[0] == (module,)
assert mock_log_on_epoch.call_args[1] == {'metrics': {'foo/0/0': lr1[0],
'foo/1/0': lr2[0],
'foo/1/1': lr2[1]}}
def create_mock_logger() -> AzureMLLogger:
"""
Create an AzureMLLogger that has a run field set to a MagicMock.
"""
run_mock = MagicMock()
with mock.patch("health_ml.utils.logging.create_aml_run_object", return_value=run_mock):
return AzureMLLogger(enable_logging_outside_azure_ml=True)
def test_azureml_logger() -> None:
"""
Tests logging to an AzureML run via PytorchLightning
"""
logger = create_mock_logger()
# On all build agents, this should not be detected as an AzureML run.
assert not logger.is_running_in_azure_ml
assert logger.has_custom_run
logger.log_metrics({"foo": 1.0})
assert logger.run is not None
logger.run.log.assert_called_once_with("foo", 1.0, step=None)
# All the following methods of LightningLoggerBase are not implemented
assert logger.name() == ""
assert logger.version() == 0
assert logger.experiment() is None
# Finalizing should call the "Complete" method of the run
logger.finalize(status="foo")
logger.run.complete.assert_called_once()
def test_azureml_log_hyperparameters1() -> None:
"""
Test logging of hyperparameters
"""
logger = create_mock_logger()
assert logger.run is not None
# No logging should happen with empty params
logger.log_hyperparams(None) # type: ignore
assert logger.run.log.call_count == 0
logger.log_hyperparams({})
assert logger.run.log.call_count == 0
logger.log_hyperparams(Namespace())
assert logger.run.log.call_count == 0
# Logging of hyperparameters that are plain dictionaries
fake_params = {"foo": 1.0}
logger.log_hyperparams(fake_params)
# Dictionary should be logged as name/value pairs, one value per row
logger.run.log_table.assert_called_once_with("hyperparams", {'name': ['foo'], 'value': ["1.0"]})
def test_azureml_log_hyperparameters2() -> None:
"""
Logging of hyperparameters that are Namespace objects from the arg parser
"""
logger = create_mock_logger()
assert logger.run is not None
class Dummy:
def __str__(self) -> str:
return "dummy"
fake_namespace = Namespace(foo="bar", complex_object=Dummy())
logger.log_hyperparams(fake_namespace)
# Complex objects are converted to str
expected_dict: Dict[str, Any] = {'name': ['foo', 'complex_object'], 'value': ['bar', 'dummy']}
logger.run.log_table.assert_called_once_with("hyperparams", expected_dict)
def test_azureml_log_hyperparameters3() -> None:
"""
Logging of hyperparameters that are nested dictionaries. They should first be flattened, than each complex
object to str
"""
logger = create_mock_logger()
assert logger.run is not None
fake_namespace = Namespace(foo={"bar": 1, "baz": {"level3": Namespace(a="17")}})
logger.log_hyperparams(fake_namespace)
expected_dict = {"name": ["foo/bar", "foo/baz/level3/a"], "value": ["1", "17"]}
logger.run.log_table.assert_called_once_with("hyperparams", expected_dict)
def test_azureml_logger_many_hyperparameters(tmpdir: Path) -> None:
"""
Test if large number of hyperparameters are logged correctly.
Earlier versions of the code had a bug that only allowed a maximum of 15 hyperparams to be logged.
"""
many_hyperparams: Dict[str, Any] = {f"param{i}": i for i in range(0, 20)}
many_hyperparams["A long list"] = ["foo", 1.0, "abc"]
expected_metrics = {key: str(value) for key, value in many_hyperparams.items()}
logger: Optional[AzureMLLogger] = None
try:
logger = AzureMLLogger(enable_logging_outside_azure_ml=True, workspace=DEFAULT_WORKSPACE.workspace)
assert logger.run is not None
logger.log_hyperparams(many_hyperparams)
logger.run.flush()
time.sleep(1)
metrics = logger.run.get_metrics(name=AzureMLLogger.HYPERPARAMS_NAME)
print(f"metrics = {metrics}")
actual = metrics[AzureMLLogger.HYPERPARAMS_NAME]
assert actual["name"] == list(expected_metrics.keys())
assert actual["value"] == list(expected_metrics.values())
finally:
if logger:
logger.finalize("done")
def test_azureml_logger_hyperparams_processing() -> None:
"""
Test flattening of hyperparameters: Lists were not handled correctly in previous versions.
"""
hyperparams = {"A long list": ["foo", 1.0, "abc"],
"foo": 1.0}
logger = AzureMLLogger(enable_logging_outside_azure_ml=False)
actual = logger._preprocess_hyperparams(hyperparams)
assert actual == {"A long list": "['foo', 1.0, 'abc']", "foo": "1.0"}
def test_azureml_logger_step() -> None:
"""
Test if the AzureML logger correctly handles epoch-level and step metrics
"""
logger = create_mock_logger()
assert logger.run is not None
logger.log_metrics(metrics={"foo": 1.0, "epoch": 123}, step=78)
assert logger.run.log.call_count == 2
assert logger.run.log.call_args_list[0][0] == ("foo", 1.0)
assert logger.run.log.call_args_list[0][1] == {"step": None}, "For epoch-level metrics, no step should be provided"
assert logger.run.log.call_args_list[1][0] == ("epoch", 123)
assert logger.run.log.call_args_list[1][1] == {"step": None}, "For epoch-level metrics, no step should be provided"
logger.run.reset_mock() # type: ignore
logger.log_metrics(metrics={"foo": 1.0}, step=78)
logger.run.log.assert_called_once_with("foo", 1.0, step=78)
def test_azureml_logger_init1() -> None:
"""
Test the logic to choose the run, inside of the constructor of AzureMLLogger.
"""
# When running in AzureML, the RUN_CONTEXT should be used
with mock.patch("health_ml.utils.logging.is_running_in_azure_ml", return_value=True):
with mock.patch("health_ml.utils.logging.RUN_CONTEXT", "foo"):
logger = AzureMLLogger(enable_logging_outside_azure_ml=True)
assert logger.is_running_in_azure_ml
assert not logger.has_custom_run
assert logger.run == "foo"
# We should be able to call finalize without any effect (logger.run == "foo", which has no
# "Complete" method). When running in AzureML, the logger should not
# modify the run in any way, and in particular not complete it.
logger.finalize("nothing")
def test_azureml_logger_init2() -> None:
"""
Test the logic to choose the run, inside of the constructor of AzureMLLogger.
"""
# When disabling offline logging, the logger should be a no-op, and not log anything
logger = AzureMLLogger(enable_logging_outside_azure_ml=False)
assert logger.run is None
logger.log_metrics({"foo": 1.0})
logger.finalize(status="nothing")
def test_azureml_logger_actual_run() -> None:
"""
When running outside of AzureML, a new run should be created.
"""
logger = AzureMLLogger(enable_logging_outside_azure_ml=True, workspace=DEFAULT_WORKSPACE.workspace)
assert not logger.is_running_in_azure_ml
assert logger.run is not None
assert logger.run != RUN_CONTEXT
assert isinstance(logger.run, Run)
assert logger.run.experiment.name == "azureml_logger"
assert logger.has_custom_run
expected_metrics = {"foo": 1.0, "bar": 2.0}
logger.log_metrics(expected_metrics)
logger.run.flush()
actual_metrics = logger.run.get_metrics()
assert actual_metrics == expected_metrics
assert logger.run.status != RunStatus.COMPLETED
logger.finalize("nothing")
# The AzureML run has been complete now, insert a mock to check if
logger.run = MagicMock()
logger.finalize("nothing")
logger.run.complete.assert_called_once_with()
def test_azureml_logger_init4() -> None:
"""
Test the logic to choose the run, inside of the constructor of AzureMLLogger.
"""
# Check that all arguments are respected
run_mock = MagicMock()
with mock.patch("health_ml.utils.logging.create_aml_run_object", return_value=run_mock) as mock_create:
logger = AzureMLLogger(enable_logging_outside_azure_ml=True,
experiment_name="exp",
run_name="run",
snapshot_directory="snapshot",
workspace="workspace", # type: ignore
workspace_config_path=Path("config_path"))
assert logger.has_custom_run
assert logger.run == run_mock
mock_create.assert_called_once_with(experiment_name="exp",
run_name="run",
snapshot_directory="snapshot",
workspace="workspace",
workspace_config_path=Path("config_path"))
def test_progress_bar_enable() -> None:
"""
Test the logic for disabling the progress bar.
"""
bar = AzureMLProgressBar(refresh_rate=0)
assert not bar.is_enabled
assert bar.is_disabled
bar = AzureMLProgressBar(refresh_rate=1)
assert bar.is_enabled
bar.disable()
assert not bar.is_enabled
bar.enable()
assert bar.is_enabled
def test_progress_bar(capsys: SysCapture) -> None:
bar = AzureMLProgressBar(refresh_rate=1)
mock_trainer = mock.MagicMock(current_epoch=12,
lightning_module=mock.MagicMock(global_step=34),
num_training_batches=10,
emable_validation=False,
num_test_batches=[20],
num_predict_batches=[30])
bar.on_init_end(mock_trainer) # type: ignore
assert bar.trainer == mock_trainer
def latest_message() -> str:
return capsys.readouterr().out.splitlines()[-1] # type: ignore
# Messages in training
trainer = Trainer()
bar.on_train_epoch_start(trainer, None) # type: ignore
assert bar.stage == AzureMLProgressBar.PROGRESS_STAGE_TRAIN
assert bar.train_batch_idx == 0
assert bar.val_batch_idx == 0
assert bar.test_batch_idx == 0
assert bar.predict_batch_idx == 0
bar.on_train_batch_end(None, None, None, None, None) # type: ignore
assert bar.train_batch_idx == 1
latest = latest_message()
assert "Training epoch 12 (step 34)" in latest
assert "1/10 ( 10%) completed" in latest
# When starting the next training epoch, the counters should be reset
bar.on_train_epoch_start(trainer, None) # type: ignore
assert bar.train_batch_idx == 0
# Messages in validation
bar.on_validation_start(trainer, None) # type: ignore
assert bar.stage == AzureMLProgressBar.PROGRESS_STAGE_VAL
assert bar.total_num_batches == 0
assert bar.val_batch_idx == 0
# Number of validation batches is difficult to fake, tweak the field where it is stored in the progress bar
bar.total_num_batches = 5
bar.on_validation_batch_end(None, None, None, None, None, None) # type: ignore
assert bar.val_batch_idx == 1
latest = latest_message()
assert "Validation epoch 12: " in latest
assert "1/5 ( 20%) completed" in latest
# Messages in testing
bar.on_test_epoch_start(trainer, None) # type: ignore
assert bar.stage == AzureMLProgressBar.PROGRESS_STAGE_TEST
test_count = 2
for _ in range(test_count):
bar.on_test_batch_end(None, None, None, None, None, None) # type: ignore
assert bar.test_batch_idx == test_count
latest = latest_message()
assert "Testing:" in latest
assert f"{test_count}/20 ( 10%)" in latest
# Messages in prediction
bar.on_predict_epoch_start(trainer, None) # type: ignore
assert bar.stage == AzureMLProgressBar.PROGRESS_STAGE_PREDICT
predict_count = 3
for _ in range(predict_count):
bar.on_predict_batch_end(None, None, None, None, None, None) # type: ignore
assert bar.predict_batch_idx == predict_count
latest = latest_message()
assert "Prediction:" in latest
assert f"{predict_count}/30 ( 10%)" in latest
assert "since epoch start" in latest
# Test behaviour when a batch count is infinity
bar.total_num_batches = math.inf # type: ignore
bar.on_predict_batch_end(None, None, None, None, None, None) # type: ignore
assert bar.predict_batch_idx == 4
latest = latest_message()
assert "4 batches completed" in latest
assert "since epoch start" in latest
def test_progress_bar_to_logging(caplog: LogCaptureFixture) -> None:
"""
Check that the progress bar correctly writes to logging
"""
to_logging = AzureMLProgressBar(write_to_logging_info=True)
message = "A random message"
with caplog.at_level(logging.INFO):
to_logging._print(message)
assert message in caplog.text
@pytest.mark.parametrize("print_timestamp", [True, False])
def test_progress_bar_to_stdout(capsys: SysCapture, print_timestamp: bool) -> None:
"""
Check that the progress bar correctly writes to stdout, and that timestamps are generated if requested.
"""
message = "A random message"
today = datetime.utcnow().strftime("%Y-%m-%d")
to_stdout = AzureMLProgressBar(write_to_logging_info=False, print_timestamp=print_timestamp)
to_stdout._print(message)
stdout: str = capsys.readouterr().out # type: ignore
print(f"Output: {stdout}")
assert message in stdout
assert stdout.startswith(today) == print_timestamp
|
import re
def convert_string_to_snake_case(s):
"""
Changes String to from camelCase to snake_case
:param s: String to convert
:rtype: String
:rertuns: String converted to snake_case
"""
a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return a.sub(r'_\1', s).lower()
def convert_list_to_snake_case(a):
"""
Iterates over a list and changes the key values
from camelCase to snake_case
:param a: List of dictionaries to convert
:rtype: list
:rertuns: list with each key converted to snake_case
"""
new_arr = []
for i in a:
if isinstance(i, list):
new_arr.append(convert_list_to_snake_case(i))
elif isinstance(i, dict):
new_arr.append(convert_dict_to_snake_case(i))
else:
new_arr.append(i)
return new_arr
def convert_dict_to_snake_case(d):
"""
Iterates over a dictionary and changes the key values
from camelCase to snake_case
:param d: Dictionary to convert
:rtype: dict
:rertuns: dictionary with each key converted to snake_case
"""
out = {}
for k in d:
new_k = convert_string_to_snake_case(k)
if isinstance(d[k], dict):
out[new_k] = convert_dict_to_snake_case(d[k])
elif isinstance(d[k], list):
out[new_k] = convert_list_to_snake_case(d[k])
else:
out[new_k] = d[k]
return out
def convert_object_to_snake_case(o):
"""
Iterates over an object and changes the key values
from camelCase to snake_case
:param o: Dictionary or Array of dictionaries to convert
:rtype: o
:rertuns: Same type that was passed
"""
if isinstance(o, list):
return convert_list_to_snake_case(o)
elif isinstance(o, dict):
return convert_dict_to_snake_case(o)
elif isinstance(o, str):
return convert_string_to_snake_case(o)
else:
return o
|
import numpy as np
from scipy.signal import savgol_filter
def get_filtered_data(df, filter="No filter"):
# clean lists by removing sensitivity, removing IC ratio, removing empty values and converting strings
# with ratios to floats.
# x
l = df["Parameter"].to_list()
l_time = []
for string in l[3:]:
if string == "":
string = np.nan
l_time.append(string)
else:
l_time.append(string)
# y1
l1 = df["Pump"].to_list()
l1_new = []
for string in l1[3:]:
if string == "":
string = np.nan
l1_new.append(string)
else:
l1_new.append(string)
l1 = list(map(float, l1_new))
# y2
l2 = df["Autotune"].to_list()
l2 = l2[3:]
l2_new = []
for string in l2:
if string == "":
string = np.nan
l2_new.append(string)
else:
l2_new.append(string)
l2 = list(map(float, l2_new))
l2 = np.asarray(l2)
# apply filter
l2_clean = l2[::2] # remove empty values
if filter == "No filter":
l3 = l2_clean
else:
if filter == "Savitzky-Golay 11.6":
l3 = savgol_filter(l2_clean, 11, 6)
elif filter == "Savitzky-Golay 17.5":
l3 = savgol_filter(l2_clean, 17, 5)
elif filter == "Savitzky-Golay 23.3":
l3 = savgol_filter(l2_clean, 23, 3)
# update numpy array of recommendations (l2) with filtered values
n = 0
for i, j in enumerate(l2):
if not np.isnan(j):
l2[i] = l3[n]
n += 1
l2 = l2.tolist()
# round numbers
l2 = [round(num, 2) for num in l2]
# use easy identifiable variable names
x = l_time
y1 = l1
y2 = l2
return x,y1,y2
|
def _assert(bool_, err_string=''):
"""
Avoid using asserts in production code
https://juangarcia.co.uk/python/python-smell-assert/
"""
if not bool_:
raise ValueError(err_string)
|
"""
iterative ptychographical reconstruction assuming vector wavefields
the algorithm is developed using the theory outlined in,
"Ptychography in anisotropic media, Ferrand, Allain, Chamard, 2015"
"""
import jones_matrices as jones
import numexpr as ne
import numpy as np
import optics_utils as optics
import pylab as pyl
import scipy as sp
from tqdm import tqdm
from matplotlib import pyplot as plt
pi = np.pi
class vPIE:
def __init__(
self,
scan_path,
pmodes=3,
amodes=3,
iterations=5,
nruns=1,
ptych_num=360,
WL=660e-09,
bit_precision=32,
vpie_config_num=1,
):
self.bit_precision = bit_precision
self.r_dtype = "float{}".format(bit_precision)
self.c_dtype = "complex{}".format(bit_precision * 2)
self.J = complex(0, 1)
self.PI = np.pi
self.pmodes = pmodes
self.amodes = amodes
self.nruns = nruns
self.iterations = iterations
self.scan_path = scan_path
self.WL = 660e-09
self.K = 2.0 * self.PI / self.WL
self.vpie_config = np.loadtxt(
scan_path + "vpie_config_%02d.txt" % vpie_config_num
)
self.probe_path = scan_path
self.detpix = 2048
self.dxd = 6.5e-6
self.rebfac = 8
self.padfac = 1
self.fovfac = 4
self.rebpix = self.detpix // self.rebfac
self.rebmid = self.rebpix // 2
self.rebqrt = self.rebmid // 2
self.padpix = self.rebpix * self.padfac
self.padmid = self.padpix // 2
self.padqrt = self.padmid // 2
self.sampix = self.padpix * self.fovfac
self.sammid = self.sampix // 2
self.samqrt = self.sammid // 2
self.dx3 = self.rebfac * self.dxd
self.z12 = 0.25
self.z23 = 0.30
self.z24 = 0.04
self.z43 = self.z23 - self.z24
self.z34 = -self.z43
self.dx2 = (self.WL * self.z23) / (self.padpix * self.dx3)
self.dx1 = (self.WL * self.z12) / (self.padpix * self.dx2)
self.dx4 = (self.WL * self.z43) / (self.padpix * self.dx3)
self.theta_p = np.zeros(self.pmodes)
self.theta_a = np.zeros(self.amodes)
self.ptych_num = ptych_num
self.npts = ptych_num
self.spx = self.padmid - self.rebmid
self.epx = self.padmid + self.rebmid
self.trans = np.ones(((2, 2, self.sampix, self.sampix))).astype(self.c_dtype)
self.trans[0, 0] = optics.randomise_amplitude(self.sampix, 0.8, 1.0) * np.exp(
self.J * optics.randomise_phase(self.sampix, -self.PI, self.PI)
)
self.trans[0, 1] = optics.randomise_amplitude(self.sampix, 0.8, 1.0) * np.exp(
self.J * optics.randomise_phase(self.sampix, -self.PI, self.PI)
)
self.trans[1, 0] = optics.randomise_amplitude(self.sampix, 0.8, 1.0) * np.exp(
self.J * optics.randomise_phase(self.sampix, -self.PI, self.PI)
)
self.trans[1, 1] = optics.randomise_amplitude(self.sampix, 0.8, 1.0) * np.exp(
self.J * optics.randomise_phase(self.sampix, -self.PI, self.PI)
)
self.psi_analyser_est = np.zeros(
(self.pmodes, self.amodes, self.padpix, self.padpix)
).astype(self.c_dtype)
self.cplx_diff = np.zeros(
(self.pmodes, self.amodes, self.padpix, self.padpix)
).astype(self.c_dtype)
self.arr_A = np.zeros((self.pmodes, self.padpix, self.padpix)).astype(
self.c_dtype
)
self.arr_B = np.zeros((self.pmodes, self.padpix, self.padpix)).astype(
self.c_dtype
)
self.psi_det_est = np.zeros(
(self.pmodes, self.amodes, self.padpix, self.padpix)
).astype(self.c_dtype)
self.trans_crop = np.zeros((2, 2, self.padpix, self.padpix)).astype(
self.c_dtype
)
self.trans = np.load(self.scan_path + "jones_guess_avg_r256_p256_f1024.npy")
def load_data(self):
print("************* setting-up data structure")
# the data array is the full set of measurements
self.data = np.zeros(
(self.npts, self.pmodes, self.amodes, self.padpix, self.padpix)
)
self.data_energy = np.zeros((self.npts, self.pmodes, self.amodes))
print("************* loading data")
# ... load data into data array ... #
# ... don't need to calculate the jones matrices for the polarisers ... #
# ... they will be included in the amp and phase of the probes ... #
for i in range(self.pmodes):
for j in range(self.amodes):
print("polarisation mode: {}|analyser mode: {}".format(i, j))
kk = (i * self.pmodes) + j
self.subdir = "scan_%02d/grid_00_00/" % np.int(self.vpie_config[kk, 0])
self.theta_p[i] = self.vpie_config[kk, 1]
self.theta_a[j] = self.vpie_config[kk, 2]
for k in np.arange(self.npts):
self.procdir = (
"processed_r"
+ str(self.rebpix)
+ "_p"
+ str(self.padpix)
+ "/npy_files/"
)
filename = "position_%04d_int.npy" % k
self.full_filename = (
self.scan_path + self.subdir + self.procdir + filename
)
self.data[
k, i, j, self.spx : self.epx, self.spx : self.epx
] = np.sqrt(np.load(self.full_filename))
self.data_energy[k, i, j] = np.sum(
self.data[k, i, j, self.spx : self.epx, self.spx : self.epx]
** 2
)
self.data[k, i, j] = np.fft.fftshift(self.data[k, i, j])
# ... transform angles from degrees to radians
self.theta_p = np.radians(self.theta_p)
self.theta_a = np.radians(self.theta_a)
print(self.theta_a)
print(tjheta)
self.stheta_p = np.sin(self.theta_p)
self.ctheta_p = np.cos(self.theta_p)
self.stheta_a = np.sin(self.theta_a)
self.ctheta_a = np.cos(self.theta_a)
# for a given position on the sample, the detector estimate will depend the polariser and analyser settings
# thus we need to declare psi_det_est to have pmodes*amodes slices
# ... load the sample positions text file
spec_data = sp.loadtxt(
self.scan_path
+ ("scan_%02d" % np.int(self.vpie_config[0, 0]))
+ "/grid_00_00/final_smarpod_positions.txt",
delimiter=",",
)
# ... and create a new set of position arrays in pixel units
pos_x = spec_data[:, 0] / self.dx4
pos_y = spec_data[:, 1] / self.dx4
self.ix = np.round(pos_x)
self.iy = np.round(pos_y)
# this is important ... the probe is always in the middle of the smaller array ... the rx and ry tell the algorithm where to put the top corner of the cutout (sample) array so you're cutting out the right part
self.rx = self.sammid - self.padmid
self.ry = self.sammid - self.padmid
self.fft_denom = 1.0 / self.padpix
def guess_probe(self):
beam = np.zeros((2, self.padpix, self.padpix)).astype(self.c_dtype)
beam[0] = np.load(self.probe_path + "sim_probe_guess_r256_p256.npy")
radpix = sp.hypot(
*sp.ogrid[-self.padmid : self.padmid, -self.padmid : self.padmid]
)
phase_factor = sp.exp(
(self.J * self.PI / self.WL)
* (self.z43 ** (-1.0) * (self.dx4 * radpix) ** 2.0)
)
prop_factor = sp.exp(
(self.J * self.PI / self.WL)
* ((self.z23 ** -1 + self.z34 ** (-1.0)) * (self.dx4 * radpix) ** 2.0)
).astype(self.c_dtype)
self.probe = np.zeros((self.pmodes, 2, self.padpix, self.padpix)).astype(
self.c_dtype
)
self.probe_conj = np.zeros((self.pmodes, 2, self.padpix, self.padpix)).astype(
self.c_dtype
)
"""
beam = np.zeros((padpix,padpix))
beamwidth = 10e-6
mult = -1.0 / (2.0*beamwidth**2)
beam = np.zeros((2, padpix, padpix)).astype(c_dtype)
beam[0] = np.exp( mult * (radpix*d1)**2 )
probe_guess = np.zeros((pmodes, 2, padpix, padpix)).astype(c_dtype)
probe_conj = np.zeros((pmodes, 2, padpix, padpix)).astype(c_dtype)
"""
for i in range(self.pmodes):
self.probe[i] = jones.prop_through_hwp(beam, self.theta_p[i] / 2.0)
self.probe_conj[i, 0] = np.conj(self.probe[i, 0])
self.probe_conj[i, 1] = np.conj(self.probe[i, 1])
def analvect(self, amode):
avect = np.array([np.cos(amode), np.sin(amode)])
return avect
def iterate(self):
self.ddbeta = 0.25
self.ddbetavec = np.zeros(self.iterations)
self.ddbetavec[0 * self.iterations // 4 : 1 * self.iterations // 4] = 0.5
self.ddbetavec[1 * self.iterations // 4 : 2 * self.iterations // 4] = 0.6
self.ddbetavec[2 * self.iterations // 4 : 3 * self.iterations // 4] = 0.7
self.ddbetavec[3 * self.iterations // 4 : 4 * self.iterations // 4] = 0.8
# probe[mode, plane] so probe[0,1] = probe[0th mode, y-plane] and probe[2,0] = probe[2nd mode, x-plane]
self.rho_xx_max = np.max(
np.abs(self.probe[0, 0]) ** 2
+ np.abs(self.probe[1, 0]) ** 2
+ np.abs(self.probe[2, 0]) ** 2
)
self.rho_yy_max = np.max(
np.abs(self.probe[0, 1]) ** 2
+ np.abs(self.probe[1, 1]) ** 2
+ np.abs(self.probe[2, 1]) ** 2
)
for ii in np.arange(self.nruns):
trans_tmp = np.zeros((2, 2, self.padpix, self.padpix)).astype(self.c_dtype)
# pdb.set_trace()
# loop over the number of iterations
for i in np.arange(self.iterations):
rand_pos = np.random.permutation(self.ptych_num)
# pdb.set_trace()
# loop over the number of scan points
for j in np.arange(self.ptych_num):
jj = rand_pos[j]
# work out the ROI on the larger, object array that corresponds to the k^th projection
x_region = np.int(self.rx + self.ix[jj])
y_region = np.int(self.ry + self.iy[jj])
xi = x_region
xf = x_region + self.padpix
yi = y_region
yf = y_region + self.padpix
# crop out the relevant part of the sample
trans_crop = np.copy(self.trans[:, :, yi:yf, xi:xf])
# loop over the number of incident polarisation settings
for k in np.arange(self.pmodes):
self.esw = jones.jones_product(trans_crop, self.probe[k])
# loop over the number of analyser settings
for l in np.arange(self.amodes):
# store the diffraction data in a temp array
# temp_diff_amp = data[k,l,jj]
temp_diff_amp = self.data[jj, k, l]
print("data", self.data.shape)
print("temp_diff_amp", temp_diff_amp.shape)
# propagate through the analyser
self.aESW = jones.prop_through_lin_pol(
self.esw, self.theta_a[l]
)
# we know the field is linearly polarised
# so change coords from (x,y) to (u,v) such that the field is polarised along u
# this allows us to represent the field in a scalar formalism ( only amp/phase )
scaESW = jones.rotate_coords(self.aESW, self.theta_a[l])
# scaESW = jones.rotate_coords(self.aESW, self.theta_a[l])[0]
print("ESW", scaESW.shape)
# propagate to the detector
ff_meas = optics.downstream_prop(scaESW)
threshval = 0.001 * np.max(np.abs(ff_meas))
# apply the modulus constraint
ft_guess = sp.where(
ne.evaluate("real(abs(ff_meas))") > threshval,
ne.evaluate(
"real(abs(temp_diff_amp))*(ff_meas/real(abs(ff_meas)))"
),
0.0,
).astype(self.c_dtype)
# calculate the complex difference
####self.cplx_diff[k,l] = ft_guess - ff_meas
# propagate the difference back to the exit surface of the analyser
# psi_analyser_est[k,l] = spf.fftshift( spf.ifft2( spf.fftshift( cplx_diff ) ) ) * padpix
temp_arr1 = (
(self.ctheta_a[0] * self.cplx_diff[k, 0])
+ (self.ctheta_a[1] * self.cplx_diff[k, 1])
+ (self.ctheta_a[2] * self.cplx_diff[k, 2])
)
self.arr_A[k, :, :] = optics.upstream_prop(temp_arr1)
temp_arr2 = (
(self.stheta_a[0] * self.cplx_diff[k, 0])
+ (self.stheta_a[1] * self.cplx_diff[k, 1])
+ (self.stheta_a[2] * self.cplx_diff[k, 2])
)
self.arr_B[k, :, :] = optics.upstream_prop(temp_arr2)
trans_tmp[0, 0] = trans_crop[0, 0] + (
self.ddbetavec[i] / self.rho_xx_max
) * (
(self.probe_conj[0, 0] * self.arr_A[0])
+ (self.probe_conj[1, 0] * self.arr_A[1])
+ (self.probe_conj[2, 0] * self.arr_A[2])
)
trans_tmp[0, 1] = trans_crop[0, 1] + (
self.ddbetavec[i] / self.rho_yy_max
) * (
(self.probe_conj[0, 1] * self.arr_A[0])
+ (self.probe_conj[1, 1] * self.arr_A[1])
+ (self.probe_conj[2, 1] * self.arr_A[2])
)
trans_tmp[1, 0] = trans_crop[1, 0] + (
self.ddbetavec[i] / self.rho_xx_max
) * (
(self.probe_conj[0, 0] * self.arr_B[0])
+ (self.probe_conj[1, 0] * self.arr_B[1])
+ (self.probe_conj[2, 0] * self.arr_B[2])
)
trans_tmp[1, 1] = trans_crop[1, 1] + (
self.ddbetavec[i] / self.rho_yy_max
) * (
(self.probe_conj[0, 1] * self.arr_B[0])
+ (self.probe_conj[1, 1] * self.arr_B[1])
+ (self.probe_conj[2, 1] * self.arr_B[2])
)
self.trans[:, :, yi:yf, xi:xf] = trans_tmp
for j in range(self.ptych_num):
jj = rand_pos[j]
# work out the ROI on the larger, object array that corresponds to the k^th projection
x_region = np.int(self.rx + self.ix[jj])
y_region = np.int(self.ry + self.iy[jj])
xi = x_region
xf = x_region + self.padpix
yi = y_region
yf = y_region + self.padpix
for k in range(self.pmodes):
print("probe", self.probe.shape)
for l in range(self.amodes):
print(self.trans.shape)
obj_k = self.trans[:, :, yi:yf, xi:xf]
print("cropped object", obj_k.shape)
temp_probe = np.zeros(obj_k.shape)
delta_p = np.zeros(self.probe.shape).astype(
np.dtype("complex64")
)
delta_p[k, 0] = np.conj(
obj_k[0, 0]
).T * optics.upstream_prop(
np.cos(l) * ff_meas[0] + np.sin(l) * ff_meas[1]
)
delta_p[k, 0] += np.conj(
obj_k[1, 0]
).T * optics.upstream_prop(
np.cos(l) * ff_meas[0] + np.sin(l) * ff_meas[1]
)
delta_p[k, 1] = np.conj(
obj_k[1, 0]
).T * optics.upstream_prop(
np.cos(l) * ff_meas[0] + np.sin(l) * ff_meas[1]
)
delta_p[k, 1] += np.conj(
obj_k[1, 1]
).T * optics.upstream_prop(
np.cos(l) * ff_meas[0] + np.sin(l) * ff_meas[1]
)
modfact = np.sqrt(ff_meas ** 2 / ft_guess ** 2) - 1
delta_p[k] *= modfact
print(self.probe.shape, delta_p.shape)
self.probe = self.probe - delta_p
print("MODEERATE SUCCESS")
""" update probe """
# h = np.array([np.sum(abs(self.ctheta_a)**2),
# np.sum(self.ctheta_a*np.conj(self.stheta_a)),
# np.conj(np.sum(self.ctheta_a*np.conj(self.stheta_a))),
# np.sum(abs(self.stheta_a)**2)])
# h = h.reshape([2,2])
# Dn = np.diag(np.sum(np.conj(np.sum(self.trans.T)))*h*np.sum(self.trans))
# self.probe = self.probe - self.ddbetavec[i]/Dn
# print("iteration: {}".format(i))
# plt.imshow(np.imag(self.trans[0,0]))
# plt.imshow(np.imag(self.probe))
# plt.show()
current_fname = (
"jones_guess_%02d" % ii
+ "_r"
+ str(self.rebpix)
+ "_p"
+ str(self.padpix)
+ "_f"
+ str(self.sampix)
+ ".npy"
)
"""
include probe update somewhere around here.
"""
def run():
PIE = vPIE("/opt/data/sim_data/henry_02/", iterations=2)
PIE.load_data()
PIE.guess_probe()
PIE.iterate()
print("test complete")
run()
# sample_est = np.load('/home/guido/data/objects/birefringent_wheel/jones_matrix_2048_x_2048.npy')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tasker
#
# Copyright (c) 2020 Lorenzo Carbonell Cerezo <a.k.a. atareao>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gi
from gi.repository import Gtk
try:
gi.require_version("Gtk", "3.0")
except Exception as e:
print(e)
exit(-1)
class ListBoxRowString(Gtk.ListBoxRow):
"""Docstring for ListBoxRowString. """
def __init__(self, text):
"""TODO: to be defined. """
Gtk.ListBoxRow.__init__(self)
label = Gtk.Label.new(text)
label.set_halign(Gtk.Align.CENTER)
label.set_width_chars(20)
label.set_margin_top(5)
label.set_margin_bottom(5)
self.add(label)
def get_name(self):
return self.get_children()[0].get_text()
def set_name(self, text):
self.get_children()[0].set_text(text)
class ListBoxString(Gtk.ListBox):
"""Docstring for ListBoxString. """
def __init__(self, items=[]):
"""TODO: to be defined. """
Gtk.ListBox.__init__(self)
self.set_sort_func(self.sort_list)
if len(items) > 0:
self.add_all(items)
def sort_list(self, row1, row2):
"""TODO: Docstring for sort_list.
:row1: TODO
:row2: TODO
:returns: TODO
"""
return row1.get_name() > row2.get_name()
def add_all(self, items):
for item in items:
self.add_item(item)
def add_item(self, text):
for item in self.get_children():
if item.get_name() == text:
return
newListBoxRowString = ListBoxRowString(text)
newListBoxRowString.show_all()
self.add(newListBoxRowString)
def remove_item(self, text):
for index, item in enumerate(self.get_children()):
if self.get_children()[index].get_name() == text:
self.remove(self.get_children()[index])
return
def clear(self):
for index in range(len(self.get_children()) - 1, -1):
self.remove(self.get_children()[index])
def get_items(self):
items = []
for child in self.get_children():
items.append(child.get_name())
return items
|
#!/usr/bin/env python
import sys
sys.path.append('..')
from libs.helpers import get_input
from functools import lru_cache
@lru_cache(maxsize=None)
def add(n1, n2):
return n1 + n2
@lru_cache(maxsize=None)
def mul(n1, n2):
return n1 * n2
@lru_cache(maxsize=None)
def div(n1, n2):
return int(n1 / n2)
@lru_cache(maxsize=None)
def mod(n1, n2):
return n1 % n2
@lru_cache(maxsize=None)
def eql(n1, n2):
return 0 if n1 == n2 else 1
def run_instructions(lines, inp, register):
i = 0
for l in lines:
split = l.split()
c = split[0]
n1 = register[split[1]]
n2 = 0
res = 0
if len(split) > 2:
try:
n2 = int(split[2])
except:
n2 = register[split[2]]
if c == 'add':
res = add(n1, n2)
elif c == 'inp':
res = inp[i]
i += 1
elif c == 'mul':
res = mul(n1, n2)
elif c == 'div':
res = div(n1, n2)
elif c == 'mod':
res = mod(n1, n2)
elif c == 'eql':
res = eql(n1, n2)
else:
raise(ValueError(f'Unknown command: {l}'))
register[split[1]] = res
return register
def find_max(lines, register, states = {}):
if not lines:
if register['z'] == 0:
return (True, '')
else:
return (False, '')
state = tuple([lines[0]] + [register[r] for r in register])
if state in states:
return states[state]
l = lines[0]
split = l.split()
c = split[0]
n1 = register[split[1]]
n2 = 0
res = 0
if len(split) > 2:
try:
n2 = int(split[2])
except:
n2 = register[split[2]]
if c == 'inp':
valid = False
for i in range(9, 0, -1):
reg = {r: register[r] for r in register}
reg[split[1]] = i
valid, inp = find_max(lines[1:], reg)
if valid:
return (valid, f'{i}{inp}')
return (False, '')
elif c == 'div':
if n2 == 0:
input()
return (False, '')
res = int(n1 / n2)
elif c == 'mod':
if n2 <= 0 or n1 < 0:
input()
return (False, '')
else:
res = n1 % n2
elif c == 'add':
res = add(n1, n2)
elif c == 'mul':
res = mul(n1, n2)
elif c == 'eql':
res = eql(n1, n2)
else:
raise(ValueError(f'Unknown command: {l}'))
register[split[1]] = res
mx = find_max(lines[1:], register)
states[state] = mx
return mx
if __name__ == '__main__':
lines = get_input('./example3.txt')
inp = [9,27]
register = {'w': 0, 'x': 0, 'y': 0, 'z': 0}
res = run_instructions(lines, inp, register)
print(res)
lines = get_input('./input.txt')
register = {'w': 0, 'x': 0, 'y': 0, 'z': 0}
res = find_max(lines, register)
print(res)
|
#!/usr/bin/env python
import os.path
import argparse
import random
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import stats.methods as methods
from stats.utils import *
################
# Declarations #
################
SYM_X, SYM_Y = SYM_VALUES = sp.symbols('x y')
SYM_A, SYM_ALPHA = SYM_PARAMS = sp.symbols('a alpha')
SYM_EXPR = sp.sympify('a * exp(alpha*x)')
SYM_EXPR_DELTA = sp.sympify('y - a * exp(alpha*x)')
MIN_X = 0
MAX_X = 10
NUM_VALS = 20 # number of source values
REAL_A = 31 # real 'a' value of source distribution
REAL_ALPHA = 0.5 # real 'alpha' value of source distiribution
ERR_X_AVG = 0 # average of X error values
ERR_X_MIN_STD = 0.1 # minimal std of X error values
ERR_X_MAX_STD = 0.1 # maximal std of X error values
ERR_Y_AVG = 0 # average of Y error values
ERR_Y_MIN_STD = 0.01 # minimal std of Y error values
ERR_Y_MAX_STD = 10.01 # maximal std of Y error values
ERR_NUM_STD_ITER = 50 # number of stds iterations
MNK_NUM_ITER = 1 # number of MNK iterations
################
# Program code #
################
DESCRIPTION = 'Use this script to determine estimates quality'
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-w', '--write-to', metavar='PATH',
type=str, help='file to write plot in')
# parse cli options
args = parser.parse_args()
# real X values without errors
real_x = np.linspace(MIN_X, MAX_X, NUM_VALS, dtype=np.float)
# real Y values without errors
real_y = np.vectorize(
sp.lambdify(SYM_X,
SYM_EXPR.subs({SYM_A: REAL_A, SYM_ALPHA: REAL_ALPHA}),
'numpy'
)
)(real_x)
# generate array of X error stds
err_x_stds = np.linspace(ERR_X_MIN_STD, ERR_X_MAX_STD, ERR_NUM_STD_ITER)
# generate array of Y error stds
err_y_stds = np.linspace(ERR_Y_MIN_STD, ERR_Y_MAX_STD, ERR_NUM_STD_ITER)
# collect dispersions of estimates
basic_stds = []
mnk_stds = []
mrt_stds = []
print('Expression: {}'.format(SYM_EXPR))
print('Real A: {}'.format(REAL_A))
print('Real ALPHA: {}'.format(REAL_ALPHA))
print('Number of iterations: {}'.format(ERR_NUM_STD_ITER))
print('-' * 40, '\n')
# iterate by error standart derivation values
for err_x_std, err_y_std in zip(err_x_stds, err_y_stds):
print('Error X std: {}'.format(err_x_std))
print('Error Y std: {}\n'.format(err_y_std))
# add X errors with current normal distribution
x = np.vectorize(
lambda v: v + random.gauss(ERR_X_AVG, err_x_std)
)(real_x)
# add Y errors with current normal distribution
y = np.vectorize(
lambda v: v + random.gauss(ERR_Y_AVG, err_y_std)
)(real_y)
# get base values as first pairs of values
base_values_first = {
SYM_X: [x[0], x[1]],
SYM_Y: [y[0], y[1]]
}
half_len = len(x) / 2
# get base values as half-distant pairs of values
base_values_half_dist = {
SYM_X: [x[0], x[half_len]],
SYM_Y: [y[0], y[half_len]]
}
# get base values as maximal distant pairs of values
base_values_max_dist = {
SYM_X: [x[0], x[-1]],
SYM_Y: [y[0], y[-1]]
}
# get base values as averages of two half-length subgroups
base_values_avg = {
SYM_X: [avg(x[:half_len]), avg(x[half_len:])],
SYM_Y: [avg(y[:half_len]), avg(y[half_len:])]
}
################
# Basic search #
################
# find params with basic method
basic_a, basic_alpha = methods.search_basic(
delta_expression=SYM_EXPR_DELTA,
parameters=(SYM_A, SYM_ALPHA),
values=base_values_max_dist)
basic_y = np.vectorize(
sp.lambdify(SYM_X,
SYM_EXPR.subs({SYM_A: basic_a, SYM_ALPHA: basic_alpha}),
'numpy'
)
)(real_x)
basic_disp = disp(basic_y, real_y)
basic_std = std(basic_y, real_y)
basic_stds.append(basic_std)
print('Basic a: {}'.format(basic_a))
print('Basic alpha: {}'.format(basic_alpha))
print('Dispersion: {}'.format(basic_disp))
print('Std: {}\n'.format(basic_std))
##############
# MNK search #
##############
for i, (mnk_a, mnk_alpha) in methods.search_mnk(
expression=SYM_EXPR,
parameters=(SYM_A, SYM_ALPHA),
values={SYM_X: x},
result_values={SYM_Y: y},
init_estimates={SYM_A: basic_a, SYM_ALPHA: basic_alpha},
num_iter=MNK_NUM_ITER
):
mnk_y = np.vectorize(
sp.lambdify(SYM_X,
SYM_EXPR.subs({SYM_A: mnk_a,
SYM_ALPHA: mnk_alpha}),
'numpy'
)
)(real_x)
mnk_disp = disp(mnk_y, real_y)
mnk_std = std(mnk_y, real_y)
if i == MNK_NUM_ITER:
mnk_stds.append(mnk_std) # only if last iteration
print('MNK({}) a: {}'.format(i, mnk_a))
print('MNK({}) alpha: {}'.format(i, mnk_alpha))
print('Dispersion: {}'.format(mnk_disp))
print('Std: {}\n'.format(mnk_std))
# find params with mrt method
mrt_a, mrt_alpha = methods.search_mrt(
delta_expression=SYM_EXPR_DELTA,
parameters=(SYM_A, SYM_ALPHA),
values={SYM_X: x, SYM_Y: y},
err_stds={SYM_X: err_x_std, SYM_Y: err_y_std}
)
mrt_y = np.vectorize(
sp.lambdify(SYM_X,
SYM_EXPR.subs({SYM_A: mrt_a,
SYM_ALPHA: mrt_alpha}),
'numpy'
)
)(real_x)
mrt_disp = disp(mrt_y, real_y)
mrt_std = std(mrt_y, real_y)
mrt_stds.append(mrt_std)
print('MRT a: {}'.format(mrt_a))
print('MRT alpha: {}'.format(mrt_alpha))
print('Dispersion: {}'.format(mrt_disp))
print('Std: {}'.format(mrt_std))
print('-' * 40, '\n')
basic_stds_plot, = plt.plot(err_y_stds, basic_stds,
color='g', linestyle='-',
marker='.', markersize=5,
mfc='g', label='basic')
mnk_stds_plot, = plt.plot(err_y_stds, mnk_stds,
color='b', linestyle='-',
marker='.', markersize=5,
mfc='b', label='MNK({})'.format(MNK_NUM_ITER))
mrt_stds_plot, = plt.plot(err_y_stds, mrt_stds,
color='r', linestyle='-',
marker='.', markersize=5,
mfc='r', label='MRT')
plt.axis([ERR_Y_MIN_STD, ERR_Y_MAX_STD, 0, 1000])
plt.xlabel('$ \\sigma_y $')
plt.ylabel('$ \\rho $')
plt.grid(True)
if args.write_to:
plt.savefig(args.write_to, dpi=100)
plt.show()
|
import tempfile
import unittest
from pyadt import Connection
from pyadt import exceptions as e
class TestConnectionInit(unittest.TestCase):
"""Unit tests for Connection.__init__"""
def test_attribute_declaration(self):
obj = Connection("DataSource")
self.assertEqual(obj.datasource, "DataSource")
self.assertIsNone(obj.cnxn)
self.assertIsNone(obj.dataset)
self.assertIsNone(obj.columns)
self.assertFalse(obj.isopen)
def test_open(self):
with tempfile.TemporaryDirectory() as tmpdir:
obj = Connection(tmpdir)
obj.open()
self.assertTrue("pyodbc.Connection" in obj.cnxn.__str__())
self.assertTrue(obj.isopen)
obj.cnxn.close()
def test_close(self):
with tempfile.TemporaryDirectory() as tmpdir:
obj = Connection(tmpdir)
obj.open()
obj.close()
self.assertFalse(obj.isopen)
def test_run_query_insert(self):
with tempfile.TemporaryDirectory() as tmpdir:
obj = Connection(tmpdir)
obj.open()
create_test_table(obj)
query = '''INSERT INTO test_table
VALUES (1, 'Test');'''
obj.run_query(query)
# Eval if test_contains record
record = return_one_record(obj)
self.assertEqual(record[0], 1)
self.assertEqual(record[1].strip(), "Test")
obj.cnxn.close()
def test_run_query_select(self):
with tempfile.TemporaryDirectory() as tmpdir:
obj = Connection(tmpdir)
obj.open()
create_test_table(obj)
# Insert a test record into test_table
query = '''INSERT INTO test_table
VALUES (1, 'Test');'''
with obj.cnxn.cursor() as c:
c.execute(query)
# Invoke run_query
query = '''SELECT * FROM test_table;'''
obj.run_query(query)
# Eval if obj.dataset contains test_table records
self.assertEqual(len(obj.dataset), 1)
self.assertEqual(obj.dataset[0][0], 1)
self.assertEqual(obj.dataset[0][1].strip(), "Test")
# # Eval if obj.columns contains test_table columns
self.assertEqual(obj.columns, ["column1", "column2"])
obj.cnxn.close()
def test_run_query_closed(self):
with tempfile.TemporaryDirectory() as tmpdir:
obj = Connection(tmpdir)
obj.open()
create_test_table(obj)
obj.cnxn.close()
obj.isopen = False
query = '''SELECT * FROM test_table;'''
with self.assertRaises(e.ClosedDataException):
obj.run_query(query)
def test_iter_dataset(self):
# TODO: This test really needs to have it's own test case
with tempfile.TemporaryDirectory() as tmpdir:
obj = Connection(tmpdir)
obj.open()
create_test_table(obj)
# Insert test records into test_table
query = '''INSERT INTO test_table
VALUES (1, 'Test');'''
with obj.cnxn.cursor() as c:
c.execute(query)
query = '''INSERT INTO test_table
VALUES (2, 'Test2');'''
with obj.cnxn.cursor() as c:
c.execute(query)
# Run a SELECT method to assign attribute variables.
query = '''SELECT * FROM test_table;'''
obj.run_query(query)
# Create iterator
iter_ = obj.iter_dataset()
# Evaluate the first row
row = next(iter_)
self.assertEqual(row["column1"], 1)
self.assertEqual(row["column2"], "Test")
# Evaluate the second row
row = next(iter_)
self.assertEqual(row["column1"], 2)
self.assertEqual(row["column2"], "Test2")
obj.cnxn.close()
# Utility functions
def create_test_table(obj):
"""Create a test table using obj.cnxn"""
with obj.cnxn.cursor() as cursor:
cursor.execute(
'''CREATE TABLE test_table (
column1 integer,
column2 char(10)
);'''
)
def return_one_record(obj):
"""Returns the first result from a SELECT * FROM test_table query"""
with obj.cnxn.cursor() as cursor:
cursor.execute('''SELECT * FROM test_table;''')
return cursor.fetchone()
|
"""axiodl URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('core.urls')),
url(r'^boards/', include('boards.urls')),
url(r'^account/', include('accounts.urls')),
url(r'^jet/', include('jet.urls', 'jet')), # Django JET URLS
url(r'^jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')), # Django JET dashboard URLS
url(r'^admin/', admin.site.urls),
url(r'^martor/', include('martor.urls')),
url(r'^nested_admin/', include('nested_admin.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
]
admin.site.site_header = 'AxioDL Admin'
admin.site.set_title = 'AxioDL Admin Portal'
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/env python
# coding: utf-8
# # Collaboration and Competition
import os
import sys
from collections import deque
import matplotlib.pyplot as plt
import numpy as np
import random
import time
import torch
from unityagents import UnityEnvironment
sys.path.insert(0, os.path.dirname(__file__))
from constants import *
from ddpg_agent import Agent
from replay_buffer import ReplayBuffer
base_dir = 'p3_collab-compet'
env = UnityEnvironment(file_name="{}/Tennis_Linux_NoVis/Tennis.x86".format(base_dir))
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=True)[brain_name]
n_agents = len(env_info.agents)
print('Number of agents:', n_agents)
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
env_info = env.reset(train_mode=True)[brain_name]
def ddpg(n_episodes=1000, max_t=1000, solved_score=0.5, consec_episodes=100, print_every=1, train_mode=True):
"""Deep Deterministic Policy Gradient (DDPG)
Params
======
n_episodes (int) : maximum number of training episodes
max_t (int) : maximum number of timesteps per episode
train_mode (bool) : if 'True' set environment to training mode
solved_score (float) : min avg score over consecutive episodes
consec_episodes (int) : number of consecutive episodes used to calculate score
print_every (int) : interval to display results
"""
memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, 2)
agents = [
Agent(base_dir + '/agent1.pytorch', memory, state_size=state_size, action_size=action_size, random_seed=2),
Agent(base_dir + '/agent2.pytorch', memory, state_size=state_size, action_size=action_size, random_seed=2)
]
mean_scores = [] # list of mean scores from each episode
min_scores = [] # list of lowest scores from each episode
max_scores = [] # list of highest scores from each episode
best_score = -np.inf
scores_window = deque(maxlen=consec_episodes) # mean scores from most recent episodes
moving_avgs = [] # list of moving averages
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=train_mode)[brain_name] # reset environment
states = env_info.vector_observations # get current state for each agent
scores = np.zeros(n_agents) # initialize score for each agent
for agent in agents:
agent.reset()
start_time = time.time()
for t in range(max_t):
actions = [
agent.act(states[i, np.newaxis], add_noise=True)
for i, agent in enumerate(agents)
]
env_info = env.step(actions)[brain_name] # send actions to environment
next_states = env_info.vector_observations # get next state
rewards = env_info.rewards # get reward
dones = env_info.local_done # see if episode has finished
# save experience to replay buffer, perform learning step at defined interval
for agent, state, action, reward, next_state, done in zip(agents, states, actions, rewards, next_states, dones):
agent.step(state, action, reward, next_state, done, t)
states = next_states
scores += rewards
if np.any(dones): # exit loop when episode ends
break
duration = time.time() - start_time
min_scores.append(np.min(scores)) # save lowest score for a single agent
max_scores.append(np.max(scores)) # save highest score for a single agent
mean_scores.append(np.mean(scores)) # save mean score for the episode
scores_window.append(max_scores[-1]) # save mean score to window
moving_avgs.append(np.mean(scores_window)) # save moving average
if i_episode % print_every == 0:
print('\rEpisode {} ({} sec) -- \tMin: {:.1f}\tMax: {:.1f}\tMean: {:.1f}\tMov. Avg: {:.1f}'.format(\
i_episode, round(duration), min_scores[-1], max_scores[-1], mean_scores[-1], moving_avgs[-1]))
if moving_avgs[-1] >= solved_score and i_episode >= consec_episodes:
print('\nEnvironment SOLVED in {} episodes!\tMoving Average ={:.1f} over last {} episodes'.format(\
i_episode-consec_episodes, moving_avgs[-1], consec_episodes))
for agent in agents:
agent.save()
break
return mean_scores, moving_avgs
# run the training loop
mean_scores, moving_avgs = ddpg()
np.savez_compressed(base_dir + '/scores.npz', {
'mean_scores': mean_scores,
'moving_avgs': moving_avgs
})
env.close()
|
import pytest
import numpy as np
from bbox import BBox2D, BBox2DList
from bbox.box_modes import XYXY, XYWH
class TestBBox2DList(object):
@classmethod
def setup_class(cls):
cls.n = 10
cls.l = [BBox2D(np.random.randint(0, 1024, size=4))
for _ in range(cls.n)]
cls.bbl = BBox2DList(cls.l)
def test_null(self):
bbl = BBox2DList([])
assert bbl.shape == (0, 4)
def test_len(self):
assert len(self.bbl) == self.n
def test_init(self):
bbl = BBox2DList(self.bbl)
assert np.array_equal(bbl.numpy(), self.bbl.numpy())
def test_init_invalid(self):
with pytest.raises(TypeError):
BBox2DList("1, 2, 3, 4")
def test_init_invalid_element_type(self):
with pytest.raises(TypeError):
BBox2DList(["1, 2, 3, 4", [1, 2, 3, 4]])
def test_init_empty_ndarray(self):
bbl = BBox2DList(np.empty((0, 4)))
assert bbl.bboxes.shape == (0, 4)
def test_init_vector(self):
bbl = BBox2DList(np.asarray([0, 1, 2, 4]))
assert bbl.bboxes.shape == (1, 4)
def test_init_invalid_dims(self):
with pytest.raises(ValueError):
BBox2DList(np.random.rand(10, 3))
with pytest.raises(ValueError):
BBox2DList(np.random.rand(10, 5))
with pytest.raises(ValueError):
BBox2DList(np.random.rand(10, 1, 4))
def test_box_shapes(self):
n = 10
l = [BBox2D(np.random.randint(0, 1024, size=4)) for _ in range(n)]
bbl = BBox2DList(l)
assert bbl.shape == (n, 4)
lx1 = np.array([b.x1 for b in l])
lx2 = np.array([b.x2 for b in l])
ly1 = np.array([b.y1 for b in l])
ly2 = np.array([b.y2 for b in l])
assert lx1.shape == bbl.x1.shape
assert ly1.shape == bbl.y1.shape
assert lx2.shape == bbl.x2.shape
assert ly2.shape == bbl.y2.shape
assert np.array_equal(lx1, bbl.x1)
assert np.array_equal(lx2, bbl.x2)
assert np.array_equal(ly1, bbl.y1)
assert np.array_equal(ly2, bbl.y2)
assert bbl.x1.shape == (n,)
def test_equality(self):
bblist = BBox2DList(self.l)
assert bblist == self.bbl
def test_inequality(self):
bbl = BBox2DList([BBox2D(np.random.randint(0, 1024, size=4))
for _ in range(self.n)])
assert bbl != self.bbl
def test_equality_invalid(self):
bblist = BBox2DList(self.l)
assert bblist != repr(self.bbl)
def test_getitem(self):
assert self.bbl[3] == self.l[3]
def test_getitem_invalid_key(self):
with pytest.raises(IndexError):
self.bbl['random']
with pytest.raises(IndexError):
self.bbl[30]
def test_setitem(self):
self.bbl[0] = [5, 6, 7, 8]
self.bbl[1] = BBox2D([1, 2, 3, 4])
assert np.array_equal(self.bbl[0], BBox2D([5, 6, 7, 8]))
assert np.array_equal(self.bbl[1], BBox2D([1, 2, 3, 4]))
def test_x1_getter(self):
assert np.array_equal(self.bbl.x1, self.bbl.bboxes[:, 0])
def test_x1_setter(self):
bbl = self.bbl.copy()
bbl.x1 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.x1, np.zeros(bbl.shape[0]))
def test_y1_getter(self):
assert np.array_equal(self.bbl.y1, self.bbl.bboxes[:, 1])
def test_y1_setter(self):
bbl = self.bbl.copy()
bbl.y1 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.y1, np.zeros(bbl.shape[0]))
def test_x2_getter(self):
assert np.array_equal(self.bbl.x2, self.bbl.bboxes[:, 2])
def test_x2_setter(self):
bbl = self.bbl.copy()
bbl.x2 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.x2, np.zeros(bbl.shape[0]))
def test_y2_getter(self):
assert np.array_equal(self.bbl.y2, self.bbl.bboxes[:, 3])
def test_y2_setter(self):
bbl = self.bbl.copy()
bbl.y2 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.y2, np.zeros(bbl.shape[0]))
def test_invalid_setter(self):
"""
One test is sufficient since all setters use the same verification function
"""
bbl = self.bbl.copy()
with pytest.raises(TypeError):
bbl.x1 = "0," * self.bbl.shape[0]
with pytest.raises(ValueError):
bbl.x1 = np.zeros((5, 4))
with pytest.raises(ValueError):
bbl.x1 = np.zeros(5)
def test_width_getter(self):
w = self.bbl.bboxes[:, 2] - self.bbl.bboxes[:, 0] + 1
assert np.array_equal(self.bbl.w, w)
assert np.array_equal(self.bbl.width, w)
def test_width_setter(self):
bbl = self.bbl.copy()
w = np.ones(bbl.shape[0])
bbl.w = w
assert np.array_equal(bbl.w, w)
assert np.array_equal(bbl.width, w)
def test_height_getter(self):
h = self.bbl.bboxes[:, 3] - self.bbl.bboxes[:, 1] + 1
assert np.array_equal(self.bbl.h, h)
assert np.array_equal(self.bbl.height, h)
def test_height_setter(self):
bbl = self.bbl.copy()
h = np.ones(bbl.shape[0])
bbl.h = h
assert np.array_equal(bbl.h, h)
assert np.array_equal(bbl.height, h)
def test_mul(self):
bbl = BBox2DList(np.ones((7, 4)))
bbl_scaled = bbl * 11
assert np.all(bbl_scaled.bboxes == 11)
bbl_scaled = 11 * bbl
assert np.all(bbl_scaled.bboxes == 11)
def test_invalid_mul(self):
bbl = BBox2DList(np.ones((7, 4)))
with pytest.raises(ValueError):
bbl * "11"
def test_append_list(self):
x = [3, 7, 10, 44]
bbl = self.bbl.append(x, mode=XYXY)
assert np.array_equal(bbl.bboxes[-1], x)
def test_append_numpy(self):
x = np.asarray([3, 7, 10, 16])
bbl = self.bbl.append(x, mode=XYXY)
assert np.array_equal(bbl.bboxes[-1], x)
def test_append_bbox(self):
x = BBox2D([3, 7, 10, 16], mode=XYXY)
bbl = self.bbl.append(x)
assert np.array_equal(bbl.bboxes[-1], x.numpy(mode=XYXY))
def test_append_bboxlist(self):
x = BBox2DList([[3, 7, 10, 16]], mode=XYXY)
bbl = self.bbl.append(x)
assert np.array_equal(bbl.bboxes,
np.vstack((self.bbl.bboxes,
[3, 7, 10, 16])))
def test_append_invalid(self):
x = "3, 7, 10, 16"
with pytest.raises(TypeError):
bbl = self.bbl.append(x, mode=XYXY)
def test_append_invalid_list(self):
x = ["abc", "7", 10, 16]
with pytest.raises(ValueError):
bbl = self.bbl.append(x, mode=XYXY)
def test_append_invalid_range(self):
x = range(4)
with pytest.raises(TypeError):
self.bbl.append(x)
def test_append_invalid_dimensions(self):
with pytest.raises(ValueError):
self.bbl.append((1, 2, 3))
with pytest.raises(ValueError):
self.bbl.append((1, 2, 3, 4, 5))
with pytest.raises(ValueError):
self.bbl.append([[1, 2, 3, 4, 5]])
def test_insert_list(self):
x = [3, 7, 10, 16]
bbl = self.bbl.insert(x, 0, mode=XYXY)
assert np.array_equal(bbl.bboxes[0], x)
def test_insert_numpy(self):
x = np.asarray([3, 7, 10, 16])
bbl = self.bbl.insert(x, 0, mode=XYXY)
assert np.array_equal(bbl.bboxes[0], x)
def test_insert_bbox(self):
x = BBox2D([3, 7, 10, 16], mode=XYXY)
bbl = self.bbl.insert(x, 0)
assert np.array_equal(bbl.bboxes[0], x.numpy(mode=XYXY))
def test_insert_invalid_datatype(self):
x = range(4)
with pytest.raises(TypeError):
self.bbl.insert(x, 0)
with pytest.raises(TypeError):
self.bbl.insert("abcd", 0)
def test_insert_invalid_type(self):
with pytest.raises(ValueError):
self.bbl.insert(["a", "b", "c", "d"], 0)
def test_insert_invalid_dimensions(self):
with pytest.raises(ValueError):
self.bbl.insert((1, 2, 3), 0)
with pytest.raises(ValueError):
self.bbl.insert((1, 2, 3, 4, 5), 0)
def test_delete(self):
idx = 5
bbl = self.bbl.delete(idx)
assert bbl.shape[0] == self.bbl.shape[0]-1
assert self.bbl[idx] not in bbl
def test_delete_negative(self):
idx = -3
bbl = self.bbl.delete(idx)
assert bbl.shape[0] == self.bbl.shape[0]-1
assert self.bbl[idx] not in bbl
def test_delete_invalid(self):
# idx is one greater than the max allowed index
idx = self.bbl.shape[0]
with pytest.raises(IndexError):
bbl = self.bbl.delete(idx)
def test_str(self):
bbl = BBox2DList([[0, 0, 1, 1], [5, 5, 5, 5]])
assert str(bbl) == "[[0. 0. 1. 1.]\n [5. 5. 5. 5.]]"
def test_repr(self):
bbl = BBox2DList([[0, 0, 1, 1], [5, 5, 5, 5]])
assert repr(
bbl) == "array([[0., 0., 1., 1.],\n [5., 5., 5., 5.]])"
|
"""Asynchronous Python client for ZAMG weather data."""
from .zamg import ZamgData
__all__ = [
"ZamgData",
]
|
'''
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# command: nosetests -v -s test/unit/
# specific test: nosetests -v -s test/unit/file.py:TestStreamPayload.test_name
import base64
import json
from collections import OrderedDict, namedtuple
from nose.tools import (
assert_equal,
assert_not_equal,
nottest,
assert_raises,
raises
)
from stream_alert.rule_processor.config import (
ConfigError,
load_config,
validate_config,
load_env
)
def test_validate_config_valid():
"""Config Validator - Valid Config"""
config = {
'logs': {
'json_log': {
'schema': {
'name': 'string'
},
'parser': 'json'
},
'csv_log': {
'schema': {
'data': 'string',
'uid': 'integer'
},
'parser': 'csv'
}
},
'sources': {
'kinesis': {
'stream_1': {
'logs': [
'json_log',
'csv_log'
]
}
}
}
}
validate_result = validate_config(config)
assert_equal(validate_result, True)
@raises(ConfigError)
def test_validate_config_no_parsers():
"""Config Validator - No Parsers"""
config = {
'logs': {
'json_log': {
'schema': {
'name': 'string'
}
},
'csv_log': {
'schema': {
'data': 'string',
'uid': 'integer'
}
}
},
'sources': {
'kinesis': {
'stream_1': {
'logs': [
'json_log',
'csv_log'
]
}
}
}
}
validate_result = validate_config(config)
@raises(ConfigError)
def test_validate_config_no_logs():
"""Config Validator - No Logs"""
config = {
'logs': {
'json_log': {
'schema': {
'name': 'string'
}
},
'csv_log': {
'schema': {
'data': 'string',
'uid': 'integer'
}
}
},
'sources': {
'kinesis': {
'stream_1': {}
}
}
}
validate_result = validate_config(config)
def test_load_env():
"""Config Environment Validator"""
context = namedtuple('Context', ['invoked_function_arn'])
context.invoked_function_arn = 'arn:aws:lambda:us-east-1:555555555555:function:streamalert_testing:production'
env = load_env(context)
assert_equal(env['lambda_region'], 'us-east-1')
assert_equal(env['account_id'], '555555555555')
assert_equal(env['lambda_function_name'], 'streamalert_testing')
assert_equal(env['lambda_alias'], 'production')
|
# 3Sum
class Solution:
def threeSum(self, nums):
nums.sort()
ans = []
length = len(nums)
i = 0
while i < length:
if i > 0 and nums[i] == nums[i - 1]:
i += 1
continue
j = i + 1
k = length - 1
target = -nums[i]
while j < k:
twosum = nums[j] + nums[k]
if twosum > target:
k -= 1
elif twosum < target:
j += 1
else:
# found it!
ans.append([nums[i], nums[j], nums[k]])
# skip through duplicate values
while j < k and nums[j] == nums[j + 1]:
j += 1
while j < k and nums[k] == nums[k - 1]:
k -= 1
# this is what actually changes the value of nums[j], nums[k]
j += 1
k -= 1
i += 1
return ans
# [-4, -1, -1, 0, 1, 2]
# 0 1 2 3 4 5
if __name__ == "__main__":
sol = Solution()
inp = [-1,0,1,2,-1,-4]
print(sol.threeSum(inp))
|
from ccxt.base.errors import BadRequest
from decimal import Decimal, InvalidOperation
def _convert_float_or_raise(f, msg):
try:
val = _convert_float(f)
except InvalidOperation:
raise BadRequest('{} needs to be a number'.format(msg))
if not val.is_finite():
raise BadRequest('{} needs to be finite'.format(msg))
return val
def _convert_float(f):
return Decimal(str(f))
|
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import numpy as np
from model_compression_toolkit.pytorch.constants import CUDA, CPU
def get_working_device() -> str:
"""
Get the working device of the environment
Returns:
Device "cuda" if GPU is available, else "cpu"
"""
return torch.device(CUDA if torch.cuda.is_available() else CPU)
def set_model(model: torch.nn.Module):
"""
Set model to work in eval mode and GPU mode if GPU is available
Args:
model: Pytorch model
Returns:
"""
model.eval()
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
def to_torch_tensor(tensor):
"""
Convert a Numpy array to a Torch tensor.
Args:
tensor: Numpy array.
Returns:
Torch tensor converted from the input Numpy array.
"""
working_device = get_working_device()
if isinstance(tensor, torch.Tensor):
return tensor.to(working_device)
elif isinstance(tensor, list):
return [to_torch_tensor(t) for t in tensor]
elif isinstance(tensor, tuple):
return (to_torch_tensor(t) for t in tensor)
elif isinstance(tensor, np.ndarray):
return torch.from_numpy(tensor.astype(np.float32)).to(working_device)
else:
raise Exception(f'Conversion of type {type(tensor)} to {type(torch.Tensor)} is not supported')
def torch_tensor_to_numpy(tensor: torch.Tensor) -> np.ndarray:
"""
Convert a Pytorch tensor to a Numpy array.
Args:
tensor: Pytorch tensor.
Returns:
Numpy array converted from the input tensor.
"""
if isinstance(tensor, np.ndarray):
return tensor
elif isinstance(tensor, list):
return [torch_tensor_to_numpy(t) for t in tensor]
elif isinstance(tensor, tuple):
return (torch_tensor_to_numpy(t) for t in tensor)
elif isinstance(tensor, torch.Tensor):
return tensor.detach().cpu().numpy()
else:
raise Exception(f'Conversion of type {type(tensor)} to {type(np.ndarray)} is not supported')
|
from typing import Optional, Tuple
from paramak import RotateMixedShape
import math
class CapsuleVacuumVessel(RotateMixedShape):
"""A cylindrical vessel volume with constant thickness that has addition
spherical edges.
Arguments:
outer_start_point: the x,z coordinates of the outer bottom of the
vacuum vessel
radius: the radius from which the centres of the vessel meets the outer
circumference.
thickness: the radial thickness of the vessel in cm.
"""
def __init__(
self,
radius: float,
outer_start_point: Tuple[float, float],
thickness: float,
**kwargs
):
self.radius = radius
self.thickness = thickness
self.outer_start_point = outer_start_point[0], outer_start_point[1]
super().__init__(**kwargs)
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
if not isinstance(value, (float, int)):
raise ValueError(
'VacuumVessel.radius must be a number. Not', value)
if value <= 0:
msg = ('VacuumVessel.radius must be a positive number above 0. '
f'Not {value}')
raise ValueError(msg)
self._radius = value
@property
def thickness(self):
return self._thickness
@thickness.setter
def thickness(self, value):
if not isinstance(value, (float, int)):
msg = f'VacuumVessel.thickness must be a number. Not {value}'
raise ValueError(msg)
if value <= 0:
msg = (
f'VacuumVessel.thickness must be a positive number above 0. Not {value}')
raise ValueError(msg)
self._thickness = value
def find_points(self):
"""
Finds the XZ points joined by straight and circle connections that describe the
2D profile of the vessel shape.
"""
radius = self.radius
thickness = self.thickness
bottom_outer_x, bottom_outer_y = self.outer_start_point
top_outer_y = bottom_outer_y + (4 * radius)
top_outer_x = bottom_outer_x
inner_r = radius - thickness
bottom_outer_x, bottom_outer_y, thickness, radius, top_outer_x, top_outer_y, inner_r = float(bottom_outer_x), float(
bottom_outer_y), float(thickness), float(radius), float(top_outer_x), float(top_outer_y), float(inner_r)
p1 = (bottom_outer_x, bottom_outer_y, 'circle')
p3 = (p1[0] + radius, p1[1] + radius, 'straight')
p4 = (p3[0], p3[1] + radius * 2, 'circle')
p6 = (top_outer_x, top_outer_y, 'straight')
p7 = (p6[0], p6[1] - thickness, 'circle')
p9 = (p4[0] - thickness, p4[1], 'straight')
p10 = (p3[0] - thickness, p3[1], 'circle')
p12 = (p1[0], p1[1] + thickness, 'straight')
p2 = ((p1[0]) + (radius * math.cos((3 * math.pi) / 8)), (p1[1] +
radius) - (radius * math.sin((3 * math.pi) / 8)), 'circle')
p5 = ((p6[0] + (radius * math.cos((2 * math.pi) / 8))), (p6[1] -
radius) + (radius * math.sin((2 * math.pi) / 8)), 'circle')
p8 = ((p7[0] + (inner_r * math.cos((2 * math.pi) / 8))), (p7[1] -
inner_r) + (inner_r * math.sin((2 * math.pi) / 8)), 'circle')
p11 = ((p12[0]) + (inner_r * math.cos((3 * math.pi) / 8)),
(p12[1] + inner_r) - (inner_r * math.sin((3 * math.pi) / 8)),
'circle')
self.points = [
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12
]
|
from what_apps.contact.models import ContactInfo, PhoneNumber
def setup(userprofile=None):
rusty_contact = ContactInfo.objects.create(
address="7 somewhere ave.",
address_line2="apt. 69",
city="New Paltz",
state="New York",
postal_code=12561
)
rusty_contact.userprofile = userprofile
rusty_contact.save()
home_number = PhoneNumber.objects.create(number=8455556669,
owner=rusty_contact,
type="home")
work_number = PhoneNumber.objects.create(number=8455559996,
owner=rusty_contact,
type="work")
return rusty_contact, home_number, work_number
|
#!/usr/bin/env python3
import argparse
import os
from lxml import etree
from itertools import zip_longest
from funcy import collecting
from tqdm import tqdm
import json
import re
import fnmatch
NSMAP = {'fb2': 'http://www.gribuser.ru/xml/fictionbook/2.0'}
MAX_PATH_DEPTH = 128
def parse_args():
p = argparse.ArgumentParser(description='tool to rename fb2 files')
p.add_argument('--path', help='path to file or folder with files')
p.add_argument('--config')
p.add_argument('-n', action='store_true')
return p.parse_args()
def get_node(root, *args):
xpath = '/'.join(f'fb2:{e}' for e in args)
nodes = root.xpath(f'//{xpath}', namespaces=NSMAP)
return [n.text for n in nodes if n.text]
@collecting
def get_authors(title_info):
for last, first in zip_longest(
get_node(title_info, 'author', 'last-name'),
get_node(title_info, 'author', 'first-name'),
):
if first and last:
yield f'{last} {first}'
SANITIZE_REGEXS = [
re.compile('^\d+\)'),
re.compile('[^а-яА-Яa-zA-Z0-9 \.,-]'),
]
def clean_string(s):
if not s:
return '', False
if isinstance(s, list):
s = s[0]
new_s = s.strip()
for r in SANITIZE_REGEXS:
new_s = re.sub(r, '', new_s)
new_s = new_s.strip()
return new_s, s != new_s
def file_info(path, pattern):
try:
tree = etree.parse(path)
except etree.XMLSyntaxError as e:
return {'path': path, 'broken': e}
title_info = tree.getroot().xpath('//fb2:description/fb2:title-info', namespaces=NSMAP)[0]
modified = False
titles_raw = get_node(title_info, 'book-title')
title, titles_modified = clean_string(titles_raw)
modified = modified or titles_modified
authors_raw = get_authors(title_info)
author, authors_modified = clean_string(authors_raw)
modified = modified or authors_modified
modified = modified or len(titles_raw) != 1 or len(authors_raw) != 1
new_name = pattern.format(author=author, title=title)
return {
'name': new_name,
# 'full': repr(authors_raw) + " | " + repr(titles_raw),
# 'modified': modified,
'path': path,
'broken': 'empty name' if not new_name else None
}
def get_files(root, relpath='', file_name='', depth=0):
if depth >= MAX_PATH_DEPTH:
raise Exception(f'Too deep path {relpath}')
full_path = os.path.join(root, relpath) if relpath else root
full_file_path = os.path.join(full_path, file_name)
if os.path.isfile(full_file_path) and full_file_path.endswith('.fb2'):
yield relpath, full_path, full_file_path
if os.path.isdir(full_file_path):
for fname in os.listdir(full_file_path):
yield from get_files(root, os.path.join(relpath, file_name), fname, depth=depth + 1)
def load_config(path):
if path is None and os.path.isfile('config.json'):
path = 'config.json'
if path:
with open(path, 'r') as cfg_file:
return json.load(cfg_file)
return {'patterns': []}
def get_pattern(relpath, config):
for path_pattern, name_pattern in config['patterns']:
if fnmatch.fnmatch(relpath, path_pattern):
return name_pattern
return '{author} - {title}'
def main(args):
config = load_config(args.config)
for relpath, full_path, full_file_path in tqdm(get_files(args.path)):
fname_pattern = get_pattern(relpath, config)
row = file_info(full_file_path, fname_pattern)
if row['broken']:
print(row)
continue
new_path = os.path.join(full_path, row['name']) + '.fb2'
if args.n:
if full_file_path != new_path:
print('> ', full_file_path, ' -> ', new_path)
else:
os.rename(full_file_path, new_path)
if __name__ == '__main__':
args = parse_args()
main(args)
|
# Elizabeth A. Barnes and Randal J. Barnes
# March 3, 2021
# v1.1
import numpy as np
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
#----------------------------------------------------------------
def get_data(EXPINFO, to_plot=False):
# make the data
slope = EXPINFO['slope']
yint = EXPINFO['yint']
noise = EXPINFO['noise']
x_sigma = EXPINFO['x_sigma']
n_samples = EXPINFO['n_samples']
y_data = []
x_data = []
tr_data = []
for i in np.arange(0,len(slope)):
if i==0:
x = np.random.normal(4.,x_sigma[i],n_samples[i])
tr = np.zeros(np.shape(x))
else:
x = np.random.normal(0,x_sigma[i],n_samples[i])
tr = np.ones(np.shape(x))
y = slope[i] * x + yint[i] + np.random.normal(0,noise[i],n_samples[i])
x_data = np.append(x_data,x)
y_data = np.append(y_data,y)
tr_data = np.append(tr_data,tr)
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
tr_data = np.asarray(tr_data)
print('\nnoise fraction = ' + str(np.round(100*(1.-len(x)/len(x_data)))) + '%')
if to_plot:
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.title('all data')
plt.plot(x_data,y_data,'.',color='tab:blue',markeredgecolor=None,linewidth=.1,markersize=.2)
reg = stats.linregress(x=x_data,y=y_data)
plt.plot(x_data,x_data*reg.slope+reg.intercept,'--',color='tab:gray',linewidth=.5,label='OLS best fit')
plt.legend()
# mix the data
shuffler = np.random.permutation(len(x_data))
x_data = x_data[shuffler]
y_data = y_data[shuffler]
tr_data = tr_data[shuffler]
# grab training and validation
n = np.shape(x_data)[0]
n_val = 1000
n_test = 1000
n_train = n - (n_val + n_test)
x_train = x_data[:n_train]
y_train = y_data[:n_train]
tr_train = tr_data[:n_train]
x_val = x_data[n_train:n_train+n_val]
y_val = y_data[n_train:n_train+n_val]
tr_val = tr_data[n_train:n_train+n_val]
x_test = x_data[n_train+n_val:n_train+n_val+n_test]
y_test = y_data[n_train+n_val:n_train+n_val+n_test]
tr_test = tr_data[n_train+n_val:n_train+n_val+n_test]
print('training samples shapes = ' + str(np.shape(x_train)))
print('validation samples shapes = ' + str(np.shape(x_val)))
print('testing samples shapes = ' + str(np.shape(x_test)))
# standardize the data
print('not actually standardizing the data\n')
xmean = 0.#np.nanmean(x_train)
xstd = 1.#np.nanstd(x_train)
X_train_std = (x_train[:,np.newaxis] - xmean)/xstd
X_val_std = (x_val[:,np.newaxis] - xmean)/xstd
X_test_std = (x_test[:,np.newaxis] - xmean)/xstd
# create one hot vectors
N_CLASSES = EXPINFO['numClasses']
onehot_train = np.zeros((len(y_train),N_CLASSES))
onehot_train[:,0] = y_train
onehot_val = np.zeros((len(y_val),N_CLASSES))
onehot_val[:,0] = y_val
onehot_test = np.zeros((len(y_test),N_CLASSES))
onehot_test[:,0] = y_test
if to_plot:
plt.subplot(1,2,2)
plt.plot(x_train,y_train,'.',markersize=.5,label='training')
plt.plot(x_val,y_val,'o',markerfacecolor='None',markersize=.5,label='validation')
plt.title('training and validation data')
plt.legend()
plt.show()
return X_train_std, onehot_train, X_val_std, onehot_val, X_test_std, onehot_test, xmean, xstd, tr_train, tr_val, tr_test |
#!/usr/bin/env python
# This is a reimplementation of the default real-time feedback experiment
# distributed with AFNI, implemented in realtime_receiver.py, using WX and
# Matplotlib for generating the GUI and plotting the results.
#
# This replaces the default GUI toolkit with PsychoPy, and will draw the
# same results and shapes to a PsychoPy window, in a manner synchronous
# with the old toolkit.
#
# This will serve as a basis or a template to build neuro-feedback type
# of experiment that can get data from AFNI (through the 'afniRTI' module,
# also distributed here).
import sys
import logging
from optparse import OptionParser
import numpy as np
import afniRTI as nf
try:
from psychopy import visual, core # , sound
psychopyInstalled = 1
except ImportError:
psychopyInstalled = 0
class DemoExperiment(object):
def __init__(self, options):
self.TR_data = []
# dc_params = [P1, P2]
# P1 = dr low limit, P2 = scalar -> [0,1]
# result is (dr-P1)*P2 {applied in [0,1]}
self.dc_params = []
self.show_data = options.show_data
print ("++ Initializing experiment stimuli")
self.setupExperiment()
def setupExperiment(self):
"""create the GUI for display of the demo data"""
# self.exptWindow = visual.Window(fullscr=options.fullscreen, allowGUI=False)
self.exptWindow = visual.Window([1280, 720], allowGUI=False)
# For this demonstration experiement, set corners of the "active area" (where
# we will "draw") to be a square in the middle of a 16:9 screen.
self.nPlotPoints = 10
self.xMax = 0.50625
self.xMin = self.xMax * -1.0
self.xDelta = (self.xMax - self.xMin) / (1.0 * self.nPlotPoints)
self.yMax = 0.9
self.yMin = self.yMax * -1.0
self.yDelta = (self.yMax - self.yMin) / (1.0 * self.nPlotPoints)
# Now divide this area into a series of vertical rectangles that we will draw
# to when we have results.
self.stimAreaCorners = [None] * self.nPlotPoints
self.drawnCorners = [None] * self.nPlotPoints
for i in range(self.nPlotPoints):
self.stimAreaCorners[i] = np.array ([[(self.xMin + (self.xDelta*(i+1))), self.yMin],
[(self.xMin + (self.xDelta*(i+1))), self.yMin],
[(self.xMin + (self.xDelta*(i+0))), self.yMin],
[(self.xMin + (self.xDelta*(i+0))), self.yMin]])
self.drawnCorners[i] = self.stimAreaCorners[i]
displayArea = visual.ShapeStim (self.exptWindow, vertices = self.stimAreaCorners[i],
autoLog = False, fillColor = [1, 1, 1])
self.exptWindow.flip()
def runExperiment (self, data):
"""
After data is received and processed by the 'compute_TR_data' routine,
call this routine to update the display, or whatever stimulus is being
generated for the experiment. This update should be a consistent
follow on to what was done in the 'setupExperiment' routine.
"""
length = len(data)
if length == 0:
return
if self.show_data:
print('-- TR %d, demo value: %s' % (length, data[length - 1][0]))
if True:
if length > 10:
bot = length - 10
else:
bot = 0
pdata = [data[ind][0] for ind in range(bot, length)]
# To update the rectangles to be drawn, with the results of the stimulus modeling, add
# the new data to the base shapes (using the simple element-by-element addition done
# by numpy's matrix opertions). Also, update the display area as every shape is updated
# to avoid drawing artifacts, where vertices get incorrectly assigned to the area to be
# drawn.
for i in range(self.nPlotPoints):
if (len(data) - 1 - i) > 0:
plotIndex = self.nPlotPoints - 1 - i
self.drawnCorners[plotIndex] = np.array ([
[0.0, (self.yDelta * data[len(data) - 1 - i][0])],
[0.0, 0.0],
[0.0, 0.0],
[0.0, (self.yDelta * data[len(data) - 1 - i][0])]
]) + self.stimAreaCorners[plotIndex]
displayArea = visual.ShapeStim (self.exptWindow, vertices = self.drawnCorners[plotIndex],
autoLog = False, fillColor = [-1, -1, 1])
displayArea.draw()
self.exptWindow.flip()
def compute_TR_data(self, motion, extra):
"""If writing to the serial port, this is the main function to compute
results from motion and/or extras for the current TR and
return it as an array of floats.
Note that motion and extras are lists of time series of length nread,
so processing a time series is easy, but a single TR requires extracting
the data from the end of each list.
The possible computations is based on data_choice, specified by the user
option -data_choice. If you want to send data that is not listed, just
add a condition.
** Please add each data_choice to the -help. Search for motion_norm to
find all places to edit.
return 2 items:
error code: 0 on success, -1 on error
data array: (possibly empty) array of data to send
"""
print("++ Entering compute TR data")
# # case 'motion': send all motion
# if rec.data_choice == 'motion':
# if rti.nread > 0:
# return 0, [rti.motion[ind][rti.nread - 1] for ind in range(6)]
# else:
# return -1, []
# # case 'motion_norm': send Euclidean norm of motion params
# # --> sqrt(sum of squared motion params)
# elif rec.data_choice == 'motion_norm':
# if rti.nread > 0:
# motion = [rti.motion[ind][rti.nread - 1] for ind in range(6)]
# return 0 # , [UTIL.euclidean_norm(motion)]
# else:
# return -1, []
# # case 'all_extras': send all extra data
# elif rec.data_choice == 'all_extras':
# if rti.nextra > 0:
# return 0, [rti.extras[i][rti.nread - 1] for i in range(rti.nextra)]
# else:
# return -1, []
# # case 'diff_ratio': (a-b)/(abs(a)+abs(b))
# elif rec.data_choice == 'diff_ratio':
npairs = len(extra) // 2
print(npairs)
if npairs <= 0:
print('** no pairs to compute diff_ratio from...')
return None
# modify extra array, setting the first half to diff_ratio
for ind in range(npairs):
a = extra[2 * ind]
b = extra[2 * ind + 1]
if a == 0 and b == 0:
newval = 0.0
else:
newval = (a - b) / float(abs(a) + abs(b))
# --------------------------------------------------------------
# VERY data dependent: convert from diff_ratio to int in {0..10}
# assume AFNI_data6 demo 15 Jan
# 2013
# now scale [bot,inf) to {0..10}, where val>=top -> 10
# AD6: min = -0.1717, mean = -0.1605, max = -0.1490
bot = -0.17 # s620: bot = 0.008, scale = 43.5
scale = 55.0 # =~ 1.0/(0.1717-0.149), rounded up
if len(self.dc_params) == 2:
bot = self.dc_params[0]
scale = self.dc_params[1]
val = newval - bot
if val < 0.0:
val = 0.0
ival = int(10 * val * scale)
if ival > 10:
ival = 10
extra[ind] = ival
print('++ diff_ratio: ival = %d (from %s), (params = %s)' %
(ival, newval, self.dc_params))
# save data and process
self.TR_data.append(extra[0:npairs])
self.runExperiment(self.TR_data)
return extra[0:npairs] # return the partial list
def processExperimentOptions (self, options=None):
"""
Process command line options for on-going experiment.
Customize as needed for your own experiments.
"""
usage = "%prog [options]"
description = "AFNI real-time demo receiver with demo visualization."
parser = OptionParser(usage=usage, description=description)
parser.add_option("-d", "--debug", action="store_true",
help="enable debugging output")
parser.add_option("-v", "--verbose", action="store_true",
help="enable verbose output")
parser.add_option("-p", "--tcp_port", help="TCP port for incoming connections")
parser.add_option("-S", "--show_data", action="store_true",
help="display received data in terminal if this option is specified")
parser.add_option("-w", "--swap", action="store_true",
help="byte-swap numerical reads if set")
parser.add_option("-f", "--fullscreen", action="store_true",
help="run in fullscreen mode")
return parser.parse_args(options)
def main():
opts, args = processExperimentOptions(sys.argv)
if (psychopyInstalled == 0):
print("")
print(" *** This program requires the PsychoPy module.")
print(" *** PsychoPy was not found in the PYTHONPATH.")
print(" *** Please install PsychoPy before trying to use")
print(" this module.")
print("")
return -1
if opts.verbose and not opts.debug:
nf.add_stderr_logger(level=logging.INFO)
elif opts.debug:
nf.add_stderr_logger(level=logging.DEBUG)
print("++ Starting Demo...")
demo = DemoExperiment(opts)
# create main interface
receiver = nf.ReceiverInterface(port=opts.tcp_port, swap=opts.swap,
show_data=opts.show_data)
if not receiver:
return 1
# set signal handlers and look for data
receiver.set_signal_handlers() # require signal to exit
# set receiver callback
receiver.compute_TR_data = demo.compute_TR_data
# prepare for incoming connections
if receiver.RTI.open_incoming_socket():
return 1
rv = receiver.process_one_run()
return rv
if __name__ == '__main__':
sys.exit(main())
|
from bddrest import response, when, status, given
from card_holder.models import Foo
from card_holder.tests.helpers import LocalApplicationTestCase
class TestFoo(LocalApplicationTestCase):
@classmethod
def mockup(cls):
session = cls.create_session()
# Adding 5 Foos
for i in range(5):
session.add(Foo(title=f'Foo {i}'))
session.commit()
def test_foo_crud(self):
# Creating a new Foo!
with self.given(
'Create a new Foo',
'/apiv1/foos',
'CREATE',
form=dict(title='First foo')
):
assert status == 200
assert 'title' in response.json
assert response.json['title'] == 'First foo'
assert response.json['createdAt'] is not None
assert response.json['modifiedAt'] is None
foo_id = response.json['id']
# Edit it!
when(
'Updating the title',
f'/apiv1/foos/id: {foo_id}',
'EDIT',
form=given | dict(title='First foo(edited)')
)
assert status == 200
assert response.json['title'] == 'First foo(edited)'
assert response.json['modifiedAt'] is not None
# Get it!
when(
'Retrieve the first foo',
f'/apiv1/foos/id: {foo_id}',
'GET',
form=dict(),
)
assert status == 200
assert response.json['title'] == 'First foo(edited)'
assert response.json['id'] == foo_id
# Delete it!
when(
'Removing the first foo',
f'/apiv1/foos/id: {foo_id}',
'DELETE',
form=None
)
assert status == 200
assert response.json['title'] == 'First foo(edited)'
assert response.json['id'] == foo_id
# Get it again to ensure it removed
when(
'Retrieve the first foo',
f'/apiv1/foos/id: {foo_id}',
'GET',
form={},
)
assert status == 404
def test_foo_list(self):
# Listing all foos
with self.given(
'Listing all Foos',
'/apiv1/foos',
'LIST',
):
assert status == 200
assert len(response.json) >= 5
when(
'Paginating',
query=dict(take=1, skip=2, sort='id')
)
assert status == 200
assert len(response.json) == 1
assert response.json[0]['title'] == 'Foo 2'
|
#!/usr/bin/env python
import argparse
def get_new_flaky_test_names(known_flakys_tests, current_run_flakys_tests):
return [flaky for flaky in set(current_run_flakys_tests) - set(known_flakys_tests)]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Diff two files with flaky tests to get the new ones')
parser.add_argument('known_flaky_file', type=argparse.FileType('r'))
parser.add_argument('new_flaky_file', type=argparse.FileType('r'))
args = parser.parse_args()
known_flakys = args.known_flaky_file.readlines()
current_run_flakys = args.new_flaky_file.readlines()
for flaky_test_name in get_new_flaky_test_names(known_flakys, current_run_flakys):
print(flaky_test_name)
|
from urllib.parse import urlparse
from django.core.validators import integer_validator
from rest_framework import serializers
from datahub.company.constants import BusinessTypeConstant
from datahub.company.models import Company
from datahub.company.serializers import CompanySerializer
from datahub.company.validators import (
has_no_invalid_company_number_characters,
has_uk_establishment_number_prefix,
)
from datahub.core.constants import Country
from datahub.core.serializers import (
NestedRelatedField,
PermittedFieldsModelSerializer,
RelaxedURLField,
)
from datahub.core.validators import EqualsRule, OperatorRule, RulesBasedValidator, ValidationRule
from datahub.interaction.models import InteractionPermission
from datahub.metadata.models import AdministrativeArea, Country as CountryModel
class SerializerNotPartialError(Exception):
"""
Exception for when some logic was called which expects a serializer object
to be self.partial=True.
"""
class DNBMatchedCompanySerializer(PermittedFieldsModelSerializer):
"""
Serialiser for data hub companies matched with a DNB entry.
"""
latest_interaction = serializers.SerializerMethodField()
def get_latest_interaction(self, obj):
"""
Construct a latest interaction object from the latest_interaction_id,
latest_interaction_date and latest_interaction_subject query set annotations.
"""
if not obj.latest_interaction_id:
return None
return {
'id': obj.latest_interaction_id,
'created_on': obj.latest_interaction_created_on,
# For consistency with the main interaction API, only return the date part.
# See InteractionSerializer for more information
'date': obj.latest_interaction_date.date(),
'subject': obj.latest_interaction_subject,
}
class Meta:
model = Company
fields = (
'id',
'latest_interaction',
)
permissions = {
f'interaction.{InteractionPermission.view_all}': 'latest_interaction',
}
class DNBCompanySerializer(CompanySerializer):
"""
For creating a company from DNB data.
Essentially makes the DNB fields writable and removes the validators
that make: sector, business_type and uk_region fields required.
TODO: The validators would be put back in when we have done the work for
unpacking these fields from the DNB payload so this particular change
is temporary.
"""
duns_number = serializers.CharField(
max_length=9,
min_length=9,
validators=(integer_validator,),
)
global_ultimate_duns_number = serializers.CharField(
allow_blank=True,
max_length=9,
min_length=9,
validators=(integer_validator, ),
)
class Meta(CompanySerializer.Meta):
read_only_fields = []
dnb_read_only_fields = []
validators = (
RulesBasedValidator(
ValidationRule(
'required',
OperatorRule('company_number', bool),
when=EqualsRule(
'business_type',
BusinessTypeConstant.uk_establishment.value.id,
),
),
ValidationRule(
'invalid_uk_establishment_number_characters',
OperatorRule('company_number', has_no_invalid_company_number_characters),
when=EqualsRule(
'business_type',
BusinessTypeConstant.uk_establishment.value.id,
),
),
ValidationRule(
'invalid_uk_establishment_number_prefix',
OperatorRule('company_number', has_uk_establishment_number_prefix),
when=EqualsRule(
'business_type',
BusinessTypeConstant.uk_establishment.value.id,
),
),
),
RulesBasedValidator(
ValidationRule(
'uk_establishment_not_in_uk',
EqualsRule('address_country', Country.united_kingdom.value.id),
when=EqualsRule(
'business_type',
BusinessTypeConstant.uk_establishment.value.id,
),
),
),
)
def partial_save(self, **kwargs):
"""
Method to save the instance - by writing only the fields updated by the serializer.
Takes kwargs to override the values of specific fields for the model on save.
Note: modified_on will not be updated by this method - this is the original
reason for this method to exist as modified_on has auto_now=True and which makes it
difficult to to prevent updates to this field.
"""
if not self.partial:
raise SerializerNotPartialError(
'partial_save() called, but serializer is not set as partial.',
)
instance = self.instance
validated_data = {**self.validated_data, **kwargs}
for field, value in validated_data.items():
setattr(instance, field, value)
update_fields = validated_data.keys()
instance.save(update_fields=update_fields)
class DUNSNumberSerializer(serializers.Serializer):
"""
Parses duns_number from request body and validates format.
"""
duns_number = serializers.CharField(
write_only=True,
max_length=9,
min_length=9,
validators=(integer_validator,),
)
def validate_duns_number(self, duns_number):
"""
Check if the duns_number is valid i.e. isn't already assigned
to another company.
"""
if Company.objects.filter(duns_number=duns_number).exists():
raise serializers.ValidationError(
f'Company with duns_number: {duns_number} already exists in DataHub.',
)
return duns_number
# TODO: Remove this once the D&B investigations endpoint has been released
class LegacyDNBInvestigationDataSerializer(serializers.Serializer):
"""
Serializer for DNBInvestigationData - a JSON field that contains
auxuliary data needed for submitting to DNB for investigation.
"""
telephone_number = serializers.CharField(
required=False,
allow_null=True,
allow_blank=True,
)
class DNBCompanyLinkSerializer(DUNSNumberSerializer):
"""
Validate POST data for DNBCompanyLinkView.
"""
company_id = NestedRelatedField('company.Company', required=True)
class DNBAddressSerializer(serializers.Serializer):
"""
Validate address and convert it to the format expected by dnb-service.
"""
line_1 = serializers.CharField(source='address_line_1')
line_2 = serializers.CharField(source='address_line_2', required=False, allow_blank=True)
town = serializers.CharField(source='address_town')
county = serializers.CharField(source='address_county', required=False, allow_blank=True)
postcode = serializers.CharField(source='address_postcode', required=False, allow_blank=True)
country = NestedRelatedField(model=CountryModel, source='address_country')
area = NestedRelatedField(model=AdministrativeArea, source='address_area', required=False)
def validate_area(self, area):
"""
Return area name and abbrev_name as an object.
"""
return {
'name': area.name,
'abbrev_name': area.area_code,
}
def validate_country(self, country):
"""
Return iso_alpha2_code only.
"""
return country.iso_alpha2_code
class AddressRequestSerializer(DNBAddressSerializer):
"""
Validate address and convert it to the format expected by dnb-service.
"""
line_1 = serializers.CharField(source='address_line_1', required=False)
town = serializers.CharField(source='address_town', required=False)
country = NestedRelatedField(model=CountryModel, source='address_country', required=False)
class ChangeRequestSerializer(serializers.Serializer):
"""
Validate change requests and convert it to the format expected by dnb-service.
"""
name = serializers.CharField(source='primary_name', required=False)
trading_names = serializers.ListField(required=False)
number_of_employees = serializers.IntegerField(source='employee_number', required=False)
turnover = serializers.IntegerField(source='annual_sales', required=False)
address = AddressRequestSerializer(required=False)
website = RelaxedURLField(source='domain', required=False)
def validate_website(self, website):
"""
Change website to domain.
"""
return urlparse(website).netloc
class DNBCompanyChangeRequestSerializer(serializers.Serializer):
"""
Validate POST data for DNBCompanyChangeRequestView and convert it to the format
expected by dnb-service.
"""
duns_number = serializers.CharField(
max_length=9,
min_length=9,
validators=(integer_validator,),
)
changes = ChangeRequestSerializer()
def validate_duns_number(self, duns_number):
"""
Validate duns_number.
"""
try:
company = Company.objects.get(duns_number=duns_number)
except Company.DoesNotExist:
raise serializers.ValidationError(
f'Company with duns_number: {duns_number} does not exists in DataHub.',
)
self.company = company
return duns_number
def validate_changes(self, changes):
"""
Changes should not be empty.
"""
if not changes:
raise serializers.ValidationError(
'No changes submitted.',
)
return changes
def validate(self, data):
"""
Augment address changes with unchanged address fields and un-nest address changes.
"""
address_changes = data['changes'].pop('address', {})
if address_changes:
existing_address_data = {
'address_line_1': self.company.address_1,
'address_line_2': self.company.address_2,
'address_town': self.company.address_town,
'address_county': self.company.address_county,
'address_country': self.company.address_country.iso_alpha2_code,
'address_postcode': self.company.address_postcode,
}
if self.company.address_area:
existing_address_data.update({
'address_area': {
'name': self.company.address_area.name,
'abbrev_name': self.company.address_area.area_code,
},
})
data['changes'] = {
**data['changes'],
**existing_address_data,
**address_changes,
}
return data
class DNBGetCompanyChangeRequestSerializer(serializers.Serializer):
"""
Validate GET data for DNBCompanyChangeRequestView
"""
duns_number = serializers.CharField(
max_length=9,
min_length=9,
validators=(integer_validator,),
)
status = serializers.ChoiceField(
choices=['pending', 'submitted'],
required=False,
allow_null=True,
)
def validate_duns_number(self, duns_number):
"""
Validate duns_number.
"""
try:
company = Company.objects.get(duns_number=duns_number)
except Company.DoesNotExist:
raise serializers.ValidationError(
f'Company with duns_number: {duns_number} does not exists in DataHub.',
)
self.company = company
return duns_number
class DNBCompanyInvestigationSerializer(serializers.Serializer):
"""
Validate POST data for DNBCompanyInvestigationView and convert it to the format
expected by dnb-service.
"""
company = NestedRelatedField(Company)
name = serializers.CharField(source='primary_name')
address = DNBAddressSerializer()
website = RelaxedURLField(
source='domain',
required=False,
allow_blank=True,
)
telephone_number = serializers.CharField(
required=False,
allow_blank=True,
)
def validate_website(self, website):
"""
Change website to domain.
"""
return urlparse(website).netloc
def validate(self, data):
"""
Validate if either website or telephone_number is present.
"""
data = super().validate(data)
if (
data.get('domain') in (None, '')
and data.get('telephone_number') in (None, '')
):
raise serializers.ValidationError(
'Either website or telephone_number must be provided.',
)
address_data = data.pop('address', {})
return {
**data,
**address_data,
}
|
"""
Some constants.
"""
VERSION = "0.1.0"
BAT = "bat"
LIGHT_THEME = "GitHub"
LEFT_SIDE, RIGHT_SIDE = range(2)
|
import base64
import time
import cv2
def init():
"""
This method will be run once on startup. You should check if the supporting files your
model needs have been created, and if not then you should create/fetch them.
"""
# Placeholder init code. Replace the sleep with check for model files required etc...
global __model
__model = 1
time.sleep(1)
def predict(prediction_object_path):
"""
Interface method between model and server. This signature must not be
changed and your model must be able to create a prediction from the object
file that is passed in.
Depending on the model type as defined in model/config.py, this method will receive a different input:
'object' : Model receives a file name to an image file, opens it, and creates a prediction
'text' : Model receives a string of text and uses it to create a prediction.
Note: All objects are stored in the directory '/app/objects/' in the Docker container. You may assume that the file
path that is passed to this method is valid and that the image file exists.
prediction_object_path will be in the form: "app/objects/file_name", where file_name is the video, image, etc. file.
"""
cap = cv2.VideoCapture(prediction_object_path)
return {
'classes': ['isGreen', 'isRed'], # List every class in the classifier
'result': { # For results, use the class names above with the result value
'isGreen': 0,
'isRed': __model # Note that we reference the variable we used in init(). This will be returned as 1.
}
}
|
from django.apps import AppConfig
class AweConfig(AppConfig):
name = "awe"
|
import pandas as pd
from itertools import product
# TODO switch back the allow_output_mutation=True once bug
#@st.cache
def cache_game_data(q, f, _db):
returned_games = pd.DataFrame(list(_db.games.find(q, f)))
return returned_games
def get_teams_list(_db):
"""Get a list of all the teams with at least one game ever
Keyword Arguments:
_db {database connection} -- Connection to MongoDB
Returns:
List -- Every team with at least 1 game played ever
"""
pipeline = [{'$group': {'_id': {'Team': '$TmName'}}},
{'$sort': {'_id': 1}}
]
results = _db.games.aggregate(pipeline)
teams_list = []
for itm in results:
teams_list.append(itm['_id']['Team'])
return teams_list
def get_seasons_list(_db):
'''
Returns a list of the seasons in seasonteams
'''
pipeline = [{'$group': {'_id': {'Season': '$Season'}}},
{'$sort': {'_id': -1}}
]
results = _db.games.aggregate(pipeline)
seasons_list = []
for itm in results:
seasons_list.append(itm['_id']['Season'])
return seasons_list
if __name__ == "__main__":
pass
|
import unittest
import redis
import time
from concurrent.futures import ProcessPoolExecutor
r = redis.Redis(host='localhost', port=6379)
dt = '2020-04-25'
user_pin = 'mythling'
vec = [-0.23724064818168949520, 0.03306523783586075987, -0.08497630252384760774, 0.00895396226354287957,
0.10745647159054654007, -0.06007996182176951472, 0.09807567305915147748, 0.04517341231445454880,
0.12830542233198868884, -0.02580306232211793463, 0.09334919858070409027, -0.08075936510676832991,
-0.02655472569023659135, 0.05763216406401765191, -0.05663989662423555421, -0.08077605943623882012,
-0.03233934762865926793, 0.16573923394748041327, 0.17399052264918438149, 0.09557131892284054242,
0.00204232984952185895, 0.05230338753521064643, -0.01064952422894490519, -0.04843736902712864056,
0.04646897250562663506, -0.09572570387462560337, 0.12450353528963929717, 0.09844697635962633420,
-0.03531335661602759413, 0.11109076320896668633, 0.12877087194667563574, 0.12839790625502467414,
0.10147439701692843872, -0.05558407922550586550, -0.26024491590949117015, 0.04612586258282809493,
0.14092355793694610888, -0.00374342755702073695, -0.08694303570962516603, 0.01066794871046804205,
-0.20557679018022576023, 0.15302466276411028079, -0.04001982194461325276, 0.02555410684223100026,
-0.03748401520830615258, 0.06375220568226405704, -0.00491887975888972897, 0.14341569113883573250,
0.08343059592153272142, 0.01606452955576140318, 0.00448266405590822762, 0.03057380935080977746,
-0.05907041497226815652, -0.05023023045581508778, -0.11030184541063800530, -0.04896947475971725333,
0.06843204356874477146, 0.00396782038685007232, 0.02051782197271388719, 0.05011044869397734275,
0.04945552985999505935, 0.06884093574865350618, -0.02741847357918492448, 0.06387297744246651920,
-0.01619138379802090646, 0.02478814991673281704, -0.02126434982668039611, 0.00521734706855870364,
-0.02036494919169017440, -0.06333277818662474967, 0.05317773610449808702, 0.10955944788523780398,
-0.20640194436243136655, -0.24228011254061559976, -0.03009068240632090108, 0.07533602169777038615,
0.15429981836535905382, 0.03000405464596845109, 0.09964029736939881976, 0.00323747704818638663,
-0.17655823863042255373, -0.03714495647674035533, -0.03759818067621739646, -0.02799151229725113654,
-0.10440959997424704986, 0.04524676614576279149, -0.03586511818620434500, -0.02818067896327875951,
0.14923240054869613136, -0.13316687265648780025, 0.23773228667024806637, -0.05595455153279590976,
-0.01333354849076778988, -0.00374477832633413011, 0.10391139211918978824, -0.14474010764570591214,
0.00730544584232908304, 0.31351627287547656486, 0.05082821716245113780, 0.06985860433688256821]
data = {
'user_pin': user_pin,
'dt': dt,
'vec': vec,
}
key = 'cupid-sim-%s-sim_dmcqm_lhmx_sku_rec_faiss_user_vec_scene102002_v1_s_d_d100_e100' % user_pin
r.hmset(key, data)
a = r.hgetall(key)
print(a[b'user_pin'])
#
# # cupid-sim-mythling-sim_dmcqm_lhmx_sku_rec_faiss_user_vec_scene102002_v1_s_d_d100_e100
# r.hmset('ln1', {'101001100': '101001100', 'b': 'b'})
# r.hmset('ln2', {'101001100': 'a1', 'b': 'b1'})
#
# with r.pipeline(transaction=False) as p:
# for d in ['ln1', 'ln2']:
# p.hmget(d, '101001100')
# result = p.execute()
# print(result)
|
from monstro.core.exceptions import MonstroError
class FormError(MonstroError):
pass
class ValidationError(FormError):
def __init__(self, error, field=None):
self.error = error
self.field = field
super().__init__(self.__str__())
def __str__(self):
if self.field:
return '{} - {}'.format(self.field, self.error)
return str(self.error)
|
l1 = list(map(int, input().split()))
l2 = list(map(int, input().split()))
ans = 0
for i in range(3):
ans+=abs(l1[i]-l2[i])
if ans==3:
print("NO")
else:
print("YES") |
from collections import defaultdict
from math import ceil
from transformers.tokenization_utils import _is_control
from bootleg.symbols.constants import CLS_BERT, PAD, PAD_BERT, SEP_BERT
def determine_windowsX(
sentence, spans, aliases_seen_by_model, maxlen, mincontext, sanity_check=False
):
"""Truncate <sentence> into windows of <maxlen> tokens each.
* Returns a list of windows. Each window is a tuple with:
- The offset and endpos, indicating where it starts and ends in sentence.
- The first and the last spans that start (but maybe not end) in the window.
- The list of spans, among those from the above line, that lie within aliases2see.
* Each window will have exactly <maxlen> tokens unless the sentence itself is shorter than that.
* Windows may overlap. Conversely, large portions of the sentence may not exist in any window, particularly when
they don't contain any aliases2see.
* Windows are determined through a greedy packing appraoch that guarantees that:
- Every alias in aliases2see is present in at least one window.
- Every alias in aliases2see is present in exactly one window in which it's marked as "to predict".
- The alias may share this unique window with other aliases, some of which may be 'aliases2see' as well.
- In this unique window, the alias is guaranteed to have at least <mincontext> context on its left and right.
- The exception to the above rule is if the sentence boundaries are closer than <mincontext> words.
- In that case, more words are taken from the "other" direction (e.g., right) up to <maxlen>, if possible.
- Given multiple aliases to predict in the same window, the window is centered around its leftmost and
rightmost aliases, making sure their left and right contexts---respectively---are equal.
- For all of the above, an alias's position is taken as its first token.
- Something tells me all of the above just sounds like legalese. I hope it doesn't.
Args:
sentence: sentence
spans: List of mention spans
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
maxlen: maximum length of window size
mincontext: minimum length of left/right context around a mention
sanity_check: whether to sanity check the above conditions
Returns: List of window boundaries in terms of tokens and mentions
"""
assert 2 * mincontext < maxlen
windows = []
alias_idx = 0
while alias_idx < len(spans):
if alias_idx not in aliases_seen_by_model:
alias_idx += 1
continue
window_first_alias = alias_idx
window_last_alias = alias_idx
# left-most possible start position is first span - mincontext
max_possible_offset = max(0, spans[alias_idx][0] - mincontext)
window_aliases2see = [window_first_alias]
# Expand with more aliases within the same window
while alias_idx + 1 < len(spans):
# Stop if adding another alias would prevent retaining mincontext to the left of window_first_alias
# We +1 to the mincontext because the ending span is exclusive
# E.g., if sentence is ["alias", "##1", "alias", "##2", "alias", "##3", "##5"] spans [0,2], [2,4], [4,7]
# To have mincontext = 1 around the start of all aliases, we need final sentence of [0:6] (6 is exclusive)
# Therefore the condition is start span (i.e., 4) plus mincontext (i.e., 1) plus 1 (i.e., total of 6)
if (
min(spans[alias_idx + 1][0] + mincontext + 1, len(sentence))
> max_possible_offset + maxlen
):
break
alias_idx += 1
window_last_alias = (
alias_idx if alias_idx in aliases_seen_by_model else window_last_alias
)
if alias_idx in aliases_seen_by_model:
window_aliases2see.append(alias_idx)
# print("MAX LEN", maxlen, "SENT LEN", len(sentence))
# print("first", window_first_alias, "second", window_last_alias, "spans", spans)
center = (spans[window_first_alias][0] + spans[window_last_alias][0]) // 2
# print("Center", center)
# As the window_offset is inclusive while endpos is exclusive we make sure endpos gets +1 more than offset
# (e.g. if maxlen is 6, offset gets -2 while endpos gets +3). This ensure balance on both sides.
window_offset = max(center - ((maxlen - 1) // 2), 0)
window_endpos = min(center + int(ceil(maxlen / 2)), len(sentence))
# print("Start offset", window_offset, "start end", window_endpos)
assert (
window_endpos - window_offset <= maxlen
), f"windows_endpos {window_endpos} - window_startpos {window_offset} is more than maxlen {maxlen}"
# In the case the window_endpos - window_offset > maxlen, adjust endpos to be maxlen
window_endpos += max(maxlen - (window_endpos - window_offset), 0)
# In len(sentence) < maxlen, adjust endpos
window_endpos = min(window_endpos, len(sentence))
# In the case the window_endpos - window_offset > maxlen, adjust window_offset to be maxlen
window_offset -= max(maxlen - (window_endpos - window_offset), 0)
window_offset = max(window_offset, 0)
# print("Adjusted offset", window_offset, "Adjusted end", window_endpos)
# Adjust the alias indices based on what spans are in the sentence window or now
while window_first_alias > 0:
if spans[window_first_alias - 1][0] < window_offset:
break
window_first_alias -= 1
while window_last_alias + 1 < len(spans):
if spans[window_last_alias + 1][0] >= window_endpos:
break
window_last_alias += 1
windows.append(
(
window_offset,
window_endpos,
window_first_alias,
window_last_alias + 1,
window_aliases2see,
)
)
alias_idx += 1
if sanity_check:
for alias_idx, (offset, endpos) in enumerate(spans):
assert 0 <= offset and offset < endpos and endpos <= len(sentence)
windowX = [
(o, e, f, l, A)
for o, e, f, l, A in windows
if f <= alias_idx and alias_idx < l
]
assert len(windowX) >= int(alias_idx in aliases_seen_by_model)
window = [(o, e, f, l, A) for o, e, f, l, A in windows if alias_idx in A]
assert len(window) == int(alias_idx in aliases_seen_by_model)
if alias_idx in aliases_seen_by_model:
assert window[0] in windowX
window_offset, window_endpos, _, _, _ = window[0]
assert window_offset <= max(offset - mincontext, 0)
assert min(offset + mincontext, len(sentence)) <= window_endpos + 1
assert window_endpos - window_offset == min(maxlen, len(sentence))
return windows
def determine_windows(
sentence,
spans,
aliases_seen_by_model,
maxlen,
mincontext,
max_aliases,
sanity_check=False,
):
"""Refer to determine_windowsX(.) for documentation.
This function simply postprocesses the output of
determine_windowsX(.) to handle max_aliases. To do so, it replicates
each window whose number of aliases exceeds max_aliases. The
resulting sub-windows may overlap in their sets of aliases but not
in their aliases2see.
Args:
sentence: sentence
spans: List of mention spans
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
maxlen: maximum length of window size
mincontext: minimum length of left/right context around a mention
max_aliases: maximum number of mentions in a context window
sanity_check: whether to sanity check the above conditions
Returns: List of window boundaries with respect to tokens and mentions
"""
windows = determine_windowsX(
sentence, spans, aliases_seen_by_model, maxlen, mincontext, sanity_check
)
output = []
for window in windows:
(
split_offset,
split_endpos,
split_first_alias,
split_last_alias,
split_aliases2see,
) = window
# Determine the <number of aliases in window> and <number of sub-windows required to accomodate max_aliases>
window_width = split_last_alias - split_first_alias
num_subwindows = ceil(window_width / max_aliases)
# Determine the <average width of sub-window> and <some allowance for extra aliases per sub-window>
subwindow_width = ceil(window_width / num_subwindows)
subwindow_overflow = max(0, max_aliases - subwindow_width) // 2
if num_subwindows == 1:
output.append(window)
continue
current_alias = split_first_alias
for _ in range(num_subwindows):
last_alias = min(current_alias + subwindow_width, split_last_alias)
current_alias_ = max(split_first_alias, current_alias - subwindow_overflow)
last_alias_ = min(last_alias + subwindow_overflow, split_last_alias)
subwindow_aliases2see = [
x for x in split_aliases2see if current_alias <= x and x < last_alias
]
if len(subwindow_aliases2see):
assert last_alias_ - current_alias_ <= max_aliases
output.append(
(
split_offset,
split_endpos,
current_alias_,
last_alias_,
subwindow_aliases2see,
)
)
current_alias = last_alias
return output
def pad_sentence(sentence, pad_token, maxlen):
assert len(sentence) <= maxlen
return sentence + [pad_token] * (maxlen - len(sentence))
def split_sentence(
max_aliases,
phrase,
spans,
aliases,
aliases_seen_by_model,
seq_len,
is_bert,
tokenizer,
sanity_check=False,
):
"""
- Splits a sentence into windows using determine_windows(.)
- Returns 4 'parallel' lists, where the corresponding positions describe a single window:
* window_span_idxs[i] has the alias indices that start in the i^th window.
* window_aliases2see[i] has the alias indices (relative to window_span_idxs[i], starting at zero) that
lie within aliases_to_predict.
* window_spans[i] has the string-formatted spans for the spans in window_span_idxs[i], relative to the start
of the i^th window.
* window_sentences[i] has the tokens of the i^th window.
Args:
max_aliases: maximum number of mentions in a context window
phrase: sentence
spans: List of mention spans
aliases: List of all mention strings
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
seq_len: maximum length of window size
is_bert: is the tokenizer a BERT one with CLS/SEP tokens
tokenizer: input tokenizer
sanity_check: whether to sanity check the above conditions
Returns: list of window mention indices, list of window mention indices
(relative to window_span_idxs[i], starting at zero), list of tokenized sentences,
list of token positions (relative to tokenized entire sentence)
"""
sentence, aliases2see, maxlen, old_spans = (
phrase,
aliases_seen_by_model,
seq_len,
spans,
)
maxlen_prepad = maxlen
if is_bert:
maxlen_prepad = maxlen_prepad - 2
old_len = len(sentence.split())
assert old_spans == list(
sorted(old_spans)
), f"You spans {old_spans} for ***{phrase}*** are not in sorted order from smallest to largest"
old_to_new, sentence = get_old_to_new_word_idx_mapping(phrase, tokenizer)
spans = []
for sp in old_spans:
assert sp[0] < sp[1], (
f"We assume all mentions are at least length 1, but you have span {sp} where the right index is not "
f"greater than the left with phrase ***{phrase}***. Each span is in "
f"[0, length of sentence={old_len}], both inclusive"
)
assert (
sp[0] >= 0 and sp[1] >= 0 and sp[1] <= old_len and sp[0] <= old_len
), f"The span of {sp} with {phrase} was not between [0, length of sentence={old_len}], both inclusive"
# We should have the right side be old_to_new[sp[1]][0], but due do tokenization occasionally removing rare
# unicode characters, this way ensures the right span is greater than the left because, in that case,
# we will have old_to_new[sp[1]-1][-1] == old_to_new[sp[0]][0] (see test case in test_sentence_utils.py)
spans.append([old_to_new[sp[0]][0], old_to_new[sp[1] - 1][-1] + 1])
assert spans[-1][0] < spans[-1][1], (
f"Adjusted spans for old span {sp} and phrase ***{phrase}*** have the right side not greater than "
f"the left side. This might be due to a spans being on a unicode character removed by tokenization."
)
(
window_span_idxs,
window_aliases2see,
window_spans,
window_sentences,
window_sentence_pos_idxs,
) = ([], [], [], [], [])
# Sub-divide sentence into windows, respecting maxlen_prepad and max_aliases per window.
# This retains at least maxlen_prepad/5 context to the left and right of each alias2predict.
windows = determine_windows(
sentence,
spans,
aliases2see,
maxlen_prepad,
max(1, maxlen_prepad // 5),
max_aliases,
sanity_check,
)
for (
split_offset,
split_endpos,
split_first_alias,
split_last_alias,
split_aliases2see,
) in windows:
sub_sentence = sentence[split_offset:split_endpos]
sub_sentence_pos = list(range(split_offset, split_endpos))
if is_bert:
sub_sentence = pad_sentence(
[CLS_BERT] + sub_sentence + [SEP_BERT], PAD_BERT, maxlen
)
sub_sentence_pos = pad_sentence([-2] + sub_sentence_pos + [-3], -1, maxlen)
else:
sub_sentence = pad_sentence(sub_sentence, PAD, maxlen)
sub_sentence_pos = pad_sentence(sub_sentence_pos, -1, maxlen)
window_sentences.append(sub_sentence)
window_sentence_pos_idxs.append(sub_sentence_pos)
window_span_idxs.append([])
window_aliases2see.append([])
window_spans.append([])
current_alias_idx = split_first_alias
for span_offset, span_endpos in spans[split_first_alias:split_last_alias]:
window_span_idxs[-1].append(current_alias_idx)
if current_alias_idx in split_aliases2see:
assert current_alias_idx in aliases2see
window_aliases2see[-1].append(current_alias_idx - split_first_alias)
span_offset += int(is_bert) # add one for BERT to account for [CLS]
span_endpos += int(is_bert)
adjusted_endpos = span_endpos - split_offset
# If it's over the maxlen, adjust to be at the [CLS] token
if adjusted_endpos > maxlen:
adjusted_endpos = maxlen
if is_bert:
# Adjust so the end token is over the [CLS]
adjusted_endpos -= 1
assert span_offset - split_offset >= 0, (
f"The first span of {span_offset - split_offset} less than 0. "
f"Something went wrong in the span adjustment"
)
window_spans[-1].append([span_offset - split_offset, adjusted_endpos])
current_alias_idx += 1
return (
window_span_idxs,
window_aliases2see,
window_spans,
window_sentences,
window_sentence_pos_idxs,
)
def get_old_to_new_word_idx_mapping(sentence, tokenizer):
"""Method takes the original sentence and tokenized_sentence and builds a
mapping from the original sentence spans (split on " ") to the new sentence
spans (after tokenization). This will account for tokenizers splitting on
grammar and subwordpiece tokens from BERT.
For example:
phrase: 'Alexander få Baldwin III (born April 3, 1958, in Massapequa, Long Island, New York, USA).'
tokenized sentence: ['Alexander', 'f', '##å', 'Baldwin', 'III', '(', 'born', 'April', '3', ',', '1958',
',', 'in', 'Mass', '##ap', '##e', '##qua', ',', 'Long', 'Island', ',',
'New', 'York', ',', 'USA', ')']
Output: {0: [0], 1: [1, 2], 2: [3], 3: [4], 4: [5, 6], 5: [7], 6: [8, 9], 7: [10, 11], 8: [12],
9: [13, 14, 15, 16, 17], 10: [18], 11: [19, 20], 12: [21], 13: [22, 23], 14: [24, 25]}
We use this to convert spans from original sentence splitting to new sentence splitting.
Args:
sentence: sentence
tokenizer: tokenizer
Returns: Dict of word index to token index, tokenized sentence
"""
old_split = sentence.split()
final_tokenized_sentence = []
old_w = 0
new_w = 0
lost_words = 0
old_to_new = defaultdict(list)
while old_w < len(old_split):
old_word = old_split[old_w]
if old_w > 0:
# This will allow tokenizers that use spaces to know it's a middle word
old_word = " " + old_word
tokenized_word = [t for t in tokenizer.tokenize(old_word) if len(t) > 0]
# due to https://github.com/huggingface/transformers/commit/21ed3a6b993eba06e7f4cf7720f4a07cc8a0d4c2,
# certain characters are cleaned and removed
# if this is the case, we need to adjust the spans so the token is eaten
# print("OLD", old_w, old_word, "TOK", tokenized_word, "NEW W", new_w, "+", len(tokenized_word))
if len(tokenized_word) <= 0:
print(
f"TOKENIZED WORD IS LENGTH 0. It SHOULD BE WEIRD CHARACTERS WITH ORDS",
[ord(c) for c in old_word],
"AND IS CONTROL",
[_is_control(c) for c in old_word],
)
# if this is the last word, assign it to the previous word
if old_w + 1 >= len(old_split):
old_to_new[old_w] = [new_w - 1]
lost_words += 1
else:
# assign the span specifically to the new_w
old_to_new[old_w] = [new_w]
lost_words += 1
else:
new_w_ids = list(range(new_w, new_w + len(tokenized_word)))
old_to_new[old_w] = new_w_ids
final_tokenized_sentence.extend(tokenized_word)
new_w = new_w + len(tokenized_word)
old_w += 1
old_to_new = dict(old_to_new)
# Verify that each word from both sentences are in the mappings
len_tokenized_sentence = len(final_tokenized_sentence)
if final_tokenized_sentence != tokenizer.tokenize(sentence):
import pdb
pdb.set_trace()
assert final_tokenized_sentence == tokenizer.tokenize(sentence)
assert len_tokenized_sentence + lost_words >= len(
old_split
), f"Tokenize has compressed words that weren't lost {old_split} versus {tokenizer.tokenize(sentence)}"
assert all(len(val) > 0 for val in old_to_new.values()), f"{old_to_new}, {sentence}"
assert set(range(len_tokenized_sentence)) == set(
[v for val in old_to_new.values() for v in val]
), f"{old_to_new}, {sentence}"
assert set(range(len(old_split))) == set(
old_to_new.keys()
), f"{old_to_new}, {sentence}"
return old_to_new, final_tokenized_sentence
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from .events import events as event
from .events import *
class LibraryListener(object):
ROBOT_LISTENER_API_VERSION = 2
def start_suite(self, name, attrs):
dispatch('scope_start', attrs['longname'])
def end_suite(self, name, attrs):
dispatch('scope_end', attrs['longname'])
def start_test(self, name, attrs):
dispatch('scope_start', attrs['longname'])
def end_test(self, name, attrs):
dispatch('scope_end', attrs['longname'])
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Lehrstuhl fuer Angewandte Mechanik, Technische
# Universitaet Muenchen.
#
# Distributed under BSD-3-Clause License. See LICENSE-File for more information
#
"""
"""
import numpy as np
import scipy as sp
import time
import amfe
gmsh_input_file = amfe.amfe_dir('meshes/gmsh/c_bow_coarse.msh')
paraview_output_file = amfe.amfe_dir('results/c_bow_coarse/c_bow_coarse')
my_material = amfe.KirchhoffMaterial()
my_system = amfe.MechanicalSystem()
my_system.load_mesh_from_gmsh(gmsh_input_file, 15, my_material)
# Test the paraview basic output
# my_system.export_paraview(paraview_output_file)
my_system.apply_dirichlet_boundaries(13, 'xy')
harmonic_x = lambda t: np.sin(2*np.pi*t*30)
harmonic_y = lambda t: np.sin(2*np.pi*t*50)
my_system.apply_neumann_boundaries(14, 1E8, (1,0), harmonic_x)
my_system.apply_neumann_boundaries(14, 1E8, (0,1), harmonic_y)
###############################################################################
## time integration
###############################################################################
ndof = my_system.dirichlet_class.no_of_constrained_dofs
q0 = np.zeros(ndof)
dq0 = np.zeros(ndof)
initial_conditions = {'q0': q0, 'dq0': dq0}
dt = 5e-4
t_end = 2
#%%
solver = amfe.GeneralizedAlphaNonlinearDynamicsSolver(my_system,dt=dt, t_end=t_end, initial_conditions=initial_conditions)
solver.solve()
my_system.export_paraview(paraview_output_file + '_full_model')
my_system.clear_timesteps()
solver = amfe.GeneralizedAlphaLinearDynamicsSolver(my_system, dt=dt, t_end=t_end, initial_conditions=initial_conditions)
solver.solve()
my_system.export_paraview(paraview_output_file + '_linear_model')
my_system.clear_timesteps()
omega, V = amfe.vibration_modes(my_system, 7, save=True)
my_system.export_paraview(paraview_output_file + '_modes')
Theta, Theta_tilde = amfe.shifted_modal_derivatives(V, my_system.K, my_system.M(), omega)
my_system.clear_timesteps()
V_temp = amfe.augment_with_derivatives(None, Theta, deflate=False)
for i in np.arange(V_temp.shape[1]):
my_system.write_timestep(i, V_temp[:,i])
my_system.export_paraview(paraview_output_file + '_theta_shifted')
my_system.clear_timesteps()
V_temp = amfe.augment_with_derivatives(None, Theta_tilde, deflate=False)
for i in np.arange(V_temp.shape[1]):
my_system.write_timestep(i, V_temp[:,i])
my_system.export_paraview(paraview_output_file + '_theta_shifted_tilde')
static_derivatives = amfe.static_derivatives(V, my_system.K,my_system.M())
V_temp = amfe.augment_with_derivatives(None, static_derivatives, deflate=False)
for i in np.arange(V_temp.shape[1]):
my_system.write_timestep(i, V_temp[:,i])
my_system.export_paraview(paraview_output_file + '_static_derivatives')
V_extended = amfe.augment_with_derivatives(V, Theta)
V_extended = amfe.augment_with_derivatives(V_extended, Theta_tilde)
my_system.clear_timesteps()
for i in np.arange(V_extended.shape[1]):
my_system.write_timestep(i, V_extended[:,i])
my_system.export_paraview(paraview_output_file + '_basis_theta_theta_tilde_deflated')
V_extended_sd = amfe.augment_with_derivatives(V, static_derivatives)
my_system.clear_timesteps()
for i in np.arange(V_extended_sd.shape[1]):
my_system.write_timestep(i, V_extended_sd[:,i])
my_system.export_paraview(paraview_output_file + '_static_derivatives_deflated')
system_red_sd = amfe.reduce_mechanical_system(my_system, V_extended_sd[:,0:20])
system_red_theta =amfe.reduce_mechanical_system(my_system, V_extended[:,0:20])
q0_r = np.zeros(20)
dq0_r = np.zeros(20)
initial_conditions = {'q0': q0_r, 'dq0': dq0_r}
solver = amfe.GeneralizedAlphaNonlinearDynamicsSolver(system_red_sd,dt=dt, t_end=t_end, initial_conditions=initial_conditions)
solver.solve()
system_red_sd.export_paraview(paraview_output_file + '_red_sd_20')
solver = amfe.GeneralizedAlphaNonlinearDynamicsSolver(system_red_theta,dt=dt, t_end=t_end, initial_conditions=initial_conditions)
solver.solve()
system_red_theta.export_paraview(paraview_output_file + '_red_theta_20') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from wdom.util import reset
from wdom.document import get_document
class TestInitialize(unittest.TestCase):
def test_initialize(self):
from wdom.server import _tornado
old_doc = get_document()
old_app_tornado = _tornado.get_app()
reset()
self.assertIsNot(old_doc, get_document())
self.assertIsNot(old_app_tornado, _tornado.get_app())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui/gui.ui'
#
# Created by: PyQt5 UI code generator 5.14.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1260, 679)
MainWindow.setMinimumSize(QtCore.QSize(1257, 651))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
MainWindow.setFont(font)
MainWindow.setToolTip("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.open_button = QtWidgets.QPushButton(self.centralwidget)
self.open_button.setMinimumSize(QtCore.QSize(0, 25))
self.open_button.setMaximumSize(QtCore.QSize(16777215, 25))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.open_button.setFont(font)
self.open_button.setObjectName("open_button")
self.gridLayout.addWidget(self.open_button, 0, 0, 1, 1)
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setEnabled(False)
self.export_button.setMinimumSize(QtCore.QSize(0, 25))
self.export_button.setMaximumSize(QtCore.QSize(16777215, 25))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.export_button.setFont(font)
self.export_button.setObjectName("export_button")
self.gridLayout.addWidget(self.export_button, 0, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 4, 3, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 0, 4, 1, 1)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.video_player = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.video_player.sizePolicy().hasHeightForWidth())
self.video_player.setSizePolicy(sizePolicy)
self.video_player.setMinimumSize(QtCore.QSize(854, 480))
self.video_player.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.video_player.setStyleSheet("")
self.video_player.setText("")
self.video_player.setAlignment(QtCore.Qt.AlignCenter)
self.video_player.setObjectName("video_player")
self.verticalLayout_6.addWidget(self.video_player)
self.video_slider = QtWidgets.QSlider(self.centralwidget)
self.video_slider.setEnabled(False)
self.video_slider.setMinimumSize(QtCore.QSize(854, 20))
self.video_slider.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.video_slider.setOrientation(QtCore.Qt.Horizontal)
self.video_slider.setObjectName("video_slider")
self.verticalLayout_6.addWidget(self.video_slider)
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
spacerItem1 = QtWidgets.QSpacerItem(40, 40, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem1)
self.play_button = QtWidgets.QPushButton(self.centralwidget)
self.play_button.setEnabled(False)
self.play_button.setMinimumSize(QtCore.QSize(40, 40))
self.play_button.setMaximumSize(QtCore.QSize(40, 40))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.play_button.setFont(font)
self.play_button.setText("")
self.play_button.setObjectName("play_button")
self.horizontalLayout_13.addWidget(self.play_button)
spacerItem2 = QtWidgets.QSpacerItem(5, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem2)
self.stop_button = QtWidgets.QPushButton(self.centralwidget)
self.stop_button.setEnabled(False)
self.stop_button.setMinimumSize(QtCore.QSize(40, 40))
self.stop_button.setMaximumSize(QtCore.QSize(40, 40))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.stop_button.setFont(font)
self.stop_button.setText("")
self.stop_button.setObjectName("stop_button")
self.horizontalLayout_13.addWidget(self.stop_button)
spacerItem3 = QtWidgets.QSpacerItem(40, 40, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem3)
self.verticalLayout_6.addLayout(self.horizontalLayout_13)
self.verticalLayout_7.addLayout(self.verticalLayout_6)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.dark_mode_label = QtWidgets.QLabel(self.centralwidget)
self.dark_mode_label.setObjectName("dark_mode_label")
self.horizontalLayout_18.addWidget(self.dark_mode_label)
self.mode_toggle = AnimatedToggle(self.centralwidget)
self.mode_toggle.setMinimumSize(QtCore.QSize(58, 45))
self.mode_toggle.setMaximumSize(QtCore.QSize(58, 45))
self.mode_toggle.setLayoutDirection(QtCore.Qt.LeftToRight)
self.mode_toggle.setText("")
self.mode_toggle.setObjectName("mode_toggle")
self.horizontalLayout_18.addWidget(self.mode_toggle)
self.light_mode_label = QtWidgets.QLabel(self.centralwidget)
self.light_mode_label.setObjectName("light_mode_label")
self.horizontalLayout_18.addWidget(self.light_mode_label)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_18.addItem(spacerItem4)
self.verticalLayout_7.addLayout(self.horizontalLayout_18)
self.gridLayout_2.addLayout(self.verticalLayout_7, 0, 1, 5, 1)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem5, 0, 0, 1, 1)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.apply_button = QtWidgets.QPushButton(self.centralwidget)
self.apply_button.setEnabled(False)
self.apply_button.setMinimumSize(QtCore.QSize(0, 25))
self.apply_button.setMaximumSize(QtCore.QSize(16777215, 25))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.apply_button.setFont(font)
self.apply_button.setObjectName("apply_button")
self.horizontalLayout_14.addWidget(self.apply_button)
self.reset_button = QtWidgets.QPushButton(self.centralwidget)
self.reset_button.setEnabled(False)
self.reset_button.setMinimumSize(QtCore.QSize(0, 25))
self.reset_button.setMaximumSize(QtCore.QSize(62, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
self.reset_button.setFont(font)
self.reset_button.setObjectName("reset_button")
self.horizontalLayout_14.addWidget(self.reset_button)
self.gridLayout_2.addLayout(self.horizontalLayout_14, 3, 3, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(17, 450, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem6, 1, 2, 1, 1)
self.status_message = QtWidgets.QTextBrowser(self.centralwidget)
self.status_message.setMinimumSize(QtCore.QSize(300, 35))
self.status_message.setMaximumSize(QtCore.QSize(300, 35))
font = QtGui.QFont()
font.setPointSize(10)
self.status_message.setFont(font)
self.status_message.setLayoutDirection(QtCore.Qt.LeftToRight)
self.status_message.setInputMethodHints(QtCore.Qt.ImhMultiLine)
self.status_message.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.status_message.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.status_message.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.status_message.setLineWrapColumnOrWidth(0)
self.status_message.setObjectName("status_message")
self.gridLayout_2.addWidget(self.status_message, 2, 3, 1, 1)
self.all_options = QtWidgets.QGroupBox(self.centralwidget)
self.all_options.setEnabled(True)
self.all_options.setMinimumSize(QtCore.QSize(300, 0))
self.all_options.setMaximumSize(QtCore.QSize(300, 16777215))
font = QtGui.QFont()
font.setPointSize(8)
self.all_options.setFont(font)
self.all_options.setToolTip("")
self.all_options.setAlignment(QtCore.Qt.AlignCenter)
self.all_options.setObjectName("all_options")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.all_options)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.hue_Label = QtWidgets.QLabel(self.all_options)
self.hue_Label.setMinimumSize(QtCore.QSize(95, 15))
self.hue_Label.setMaximumSize(QtCore.QSize(95, 15))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.hue_Label.setFont(font)
self.hue_Label.setObjectName("hue_Label")
self.horizontalLayout_2.addWidget(self.hue_Label)
spacerItem7 = QtWidgets.QSpacerItem(40, 15, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem7)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.hue_slider = QtWidgets.QSlider(self.all_options)
self.hue_slider.setMinimumSize(QtCore.QSize(225, 20))
self.hue_slider.setMaximumSize(QtCore.QSize(225, 20))
self.hue_slider.setMaximum(179)
self.hue_slider.setOrientation(QtCore.Qt.Horizontal)
self.hue_slider.setObjectName("hue_slider")
self.horizontalLayout.addWidget(self.hue_slider)
self.hue_spinBox = QtWidgets.QSpinBox(self.all_options)
self.hue_spinBox.setMinimumSize(QtCore.QSize(33, 20))
self.hue_spinBox.setMaximumSize(QtCore.QSize(33, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
self.hue_spinBox.setFont(font)
self.hue_spinBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.hue_spinBox.setAlignment(QtCore.Qt.AlignCenter)
self.hue_spinBox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.hue_spinBox.setMinimum(0)
self.hue_spinBox.setMaximum(179)
self.hue_spinBox.setProperty("value", 0)
self.hue_spinBox.setObjectName("hue_spinBox")
self.horizontalLayout.addWidget(self.hue_spinBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_5.addLayout(self.verticalLayout)
spacerItem8 = QtWidgets.QSpacerItem(40, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_5.addItem(spacerItem8)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.saturation_label = QtWidgets.QLabel(self.all_options)
self.saturation_label.setMinimumSize(QtCore.QSize(95, 15))
self.saturation_label.setMaximumSize(QtCore.QSize(95, 15))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.saturation_label.setFont(font)
self.saturation_label.setObjectName("saturation_label")
self.horizontalLayout_4.addWidget(self.saturation_label)
spacerItem9 = QtWidgets.QSpacerItem(40, 15, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem9)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.saturation_slider = QtWidgets.QSlider(self.all_options)
self.saturation_slider.setMinimumSize(QtCore.QSize(225, 20))
self.saturation_slider.setMaximumSize(QtCore.QSize(225, 20))
self.saturation_slider.setMaximum(200)
self.saturation_slider.setSingleStep(1)
self.saturation_slider.setPageStep(10)
self.saturation_slider.setProperty("value", 100)
self.saturation_slider.setOrientation(QtCore.Qt.Horizontal)
self.saturation_slider.setObjectName("saturation_slider")
self.horizontalLayout_3.addWidget(self.saturation_slider)
self.saturation_spinBox = QtWidgets.QSpinBox(self.all_options)
self.saturation_spinBox.setMinimumSize(QtCore.QSize(33, 20))
self.saturation_spinBox.setMaximumSize(QtCore.QSize(33, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
self.saturation_spinBox.setFont(font)
self.saturation_spinBox.setAlignment(QtCore.Qt.AlignCenter)
self.saturation_spinBox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.saturation_spinBox.setMaximum(200)
self.saturation_spinBox.setProperty("value", 100)
self.saturation_spinBox.setObjectName("saturation_spinBox")
self.horizontalLayout_3.addWidget(self.saturation_spinBox)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalLayout_5.addLayout(self.verticalLayout_2)
spacerItem10 = QtWidgets.QSpacerItem(40, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_5.addItem(spacerItem10)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.brightness_label = QtWidgets.QLabel(self.all_options)
self.brightness_label.setMinimumSize(QtCore.QSize(95, 15))
self.brightness_label.setMaximumSize(QtCore.QSize(95, 15))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.brightness_label.setFont(font)
self.brightness_label.setObjectName("brightness_label")
self.horizontalLayout_6.addWidget(self.brightness_label)
spacerItem11 = QtWidgets.QSpacerItem(40, 15, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem11)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.brightness_slider = QtWidgets.QSlider(self.all_options)
self.brightness_slider.setMinimumSize(QtCore.QSize(225, 20))
self.brightness_slider.setMaximumSize(QtCore.QSize(225, 20))
self.brightness_slider.setMaximum(150)
self.brightness_slider.setProperty("value", 100)
self.brightness_slider.setOrientation(QtCore.Qt.Horizontal)
self.brightness_slider.setObjectName("brightness_slider")
self.horizontalLayout_5.addWidget(self.brightness_slider)
self.brightness_spinBox = QtWidgets.QSpinBox(self.all_options)
self.brightness_spinBox.setMinimumSize(QtCore.QSize(33, 20))
self.brightness_spinBox.setMaximumSize(QtCore.QSize(33, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
self.brightness_spinBox.setFont(font)
self.brightness_spinBox.setAlignment(QtCore.Qt.AlignCenter)
self.brightness_spinBox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.brightness_spinBox.setMaximum(150)
self.brightness_spinBox.setProperty("value", 100)
self.brightness_spinBox.setObjectName("brightness_spinBox")
self.horizontalLayout_5.addWidget(self.brightness_spinBox)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.verticalLayout_5.addLayout(self.verticalLayout_3)
spacerItem12 = QtWidgets.QSpacerItem(40, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_5.addItem(spacerItem12)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
spacerItem13 = QtWidgets.QSpacerItem(8, 35, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem13)
self.triangulate_label = QtWidgets.QLabel(self.all_options)
self.triangulate_label.setMinimumSize(QtCore.QSize(115, 35))
self.triangulate_label.setMaximumSize(QtCore.QSize(115, 35))
font = QtGui.QFont()
font.setPointSize(10)
self.triangulate_label.setFont(font)
self.triangulate_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.triangulate_label.setObjectName("triangulate_label")
self.horizontalLayout_12.addWidget(self.triangulate_label)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem14)
self.triangulation_check_box = Toggle(self.all_options)
self.triangulation_check_box.setEnabled(True)
self.triangulation_check_box.setMinimumSize(QtCore.QSize(48, 35))
self.triangulation_check_box.setMaximumSize(QtCore.QSize(48, 35))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.triangulation_check_box.setFont(font)
self.triangulation_check_box.setLayoutDirection(QtCore.Qt.LeftToRight)
self.triangulation_check_box.setAutoFillBackground(True)
self.triangulation_check_box.setText("")
self.triangulation_check_box.setChecked(False)
self.triangulation_check_box.setObjectName("triangulation_check_box")
self.horizontalLayout_12.addWidget(self.triangulation_check_box)
self.verticalLayout_5.addLayout(self.horizontalLayout_12)
spacerItem15 = QtWidgets.QSpacerItem(37, 13, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_5.addItem(spacerItem15)
self.groupBox = QtWidgets.QGroupBox(self.all_options)
self.groupBox.setEnabled(True)
self.groupBox.setMinimumSize(QtCore.QSize(280, 240))
self.groupBox.setMaximumSize(QtCore.QSize(280, 240))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(8)
self.groupBox.setFont(font)
self.groupBox.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox.setObjectName("groupBox")
self.triangulate_options = QtWidgets.QWidget(self.groupBox)
self.triangulate_options.setEnabled(False)
self.triangulate_options.setGeometry(QtCore.QRect(10, 20, 260, 220))
self.triangulate_options.setMinimumSize(QtCore.QSize(260, 220))
self.triangulate_options.setMaximumSize(QtCore.QSize(260, 220))
self.triangulate_options.setObjectName("triangulate_options")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.triangulate_options)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.max_points_label = QtWidgets.QLabel(self.triangulate_options)
self.max_points_label.setMinimumSize(QtCore.QSize(95, 20))
self.max_points_label.setMaximumSize(QtCore.QSize(95, 20))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.max_points_label.setFont(font)
self.max_points_label.setObjectName("max_points_label")
self.horizontalLayout_7.addWidget(self.max_points_label)
spacerItem16 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem16)
self.max_points_spinBox = QtWidgets.QSpinBox(self.triangulate_options)
self.max_points_spinBox.setMinimumSize(QtCore.QSize(40, 20))
self.max_points_spinBox.setMaximumSize(QtCore.QSize(40, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
self.max_points_spinBox.setFont(font)
self.max_points_spinBox.setAlignment(QtCore.Qt.AlignCenter)
self.max_points_spinBox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.max_points_spinBox.setMinimum(20)
self.max_points_spinBox.setMaximum(10000)
self.max_points_spinBox.setProperty("value", 2000)
self.max_points_spinBox.setObjectName("max_points_spinBox")
self.horizontalLayout_7.addWidget(self.max_points_spinBox)
self.verticalLayout_4.addLayout(self.horizontalLayout_7)
spacerItem17 = QtWidgets.QSpacerItem(240, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem17)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.method_label = QtWidgets.QLabel(self.triangulate_options)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.method_label.setFont(font)
self.method_label.setObjectName("method_label")
self.horizontalLayout_8.addWidget(self.method_label)
self.threshold_radioButton = QtWidgets.QRadioButton(self.triangulate_options)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(8)
self.threshold_radioButton.setFont(font)
self.threshold_radioButton.setChecked(True)
self.threshold_radioButton.setObjectName("threshold_radioButton")
self.horizontalLayout_8.addWidget(self.threshold_radioButton)
self.poisson_disk_radioButton = QtWidgets.QRadioButton(self.triangulate_options)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(8)
self.poisson_disk_radioButton.setFont(font)
self.poisson_disk_radioButton.setObjectName("poisson_disk_radioButton")
self.horizontalLayout_8.addWidget(self.poisson_disk_radioButton)
self.verticalLayout_4.addLayout(self.horizontalLayout_8)
spacerItem18 = QtWidgets.QSpacerItem(240, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem18)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.scale_factor_label = QtWidgets.QLabel(self.triangulate_options)
self.scale_factor_label.setMinimumSize(QtCore.QSize(100, 20))
self.scale_factor_label.setMaximumSize(QtCore.QSize(100, 20))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.scale_factor_label.setFont(font)
self.scale_factor_label.setObjectName("scale_factor_label")
self.horizontalLayout_9.addWidget(self.scale_factor_label)
spacerItem19 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem19)
self.scale_factor_comboBox = QtWidgets.QComboBox(self.triangulate_options)
self.scale_factor_comboBox.setMinimumSize(QtCore.QSize(55, 18))
self.scale_factor_comboBox.setMaximumSize(QtCore.QSize(55, 18))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(9)
self.scale_factor_comboBox.setFont(font)
self.scale_factor_comboBox.setInputMethodHints(QtCore.Qt.ImhNone)
self.scale_factor_comboBox.setObjectName("scale_factor_comboBox")
self.scale_factor_comboBox.addItem("")
self.scale_factor_comboBox.addItem("")
self.scale_factor_comboBox.addItem("")
self.scale_factor_comboBox.addItem("")
self.scale_factor_comboBox.addItem("")
self.horizontalLayout_9.addWidget(self.scale_factor_comboBox)
self.verticalLayout_4.addLayout(self.horizontalLayout_9)
spacerItem20 = QtWidgets.QSpacerItem(240, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem20)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.draw_line_checkBox = QtWidgets.QCheckBox(self.triangulate_options)
self.draw_line_checkBox.setMinimumSize(QtCore.QSize(14, 14))
self.draw_line_checkBox.setMaximumSize(QtCore.QSize(14, 14))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.draw_line_checkBox.setFont(font)
self.draw_line_checkBox.setText("")
self.draw_line_checkBox.setIconSize(QtCore.QSize(18, 18))
self.draw_line_checkBox.setObjectName("draw_line_checkBox")
self.horizontalLayout_10.addWidget(self.draw_line_checkBox)
self.label = QtWidgets.QLabel(self.triangulate_options)
self.label.setMinimumSize(QtCore.QSize(70, 20))
self.label.setMaximumSize(QtCore.QSize(70, 20))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setObjectName("label")
self.horizontalLayout_10.addWidget(self.label)
spacerItem21 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem21)
self.verticalLayout_4.addLayout(self.horizontalLayout_10)
spacerItem22 = QtWidgets.QSpacerItem(37, 5, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem22)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.thickness_label = QtWidgets.QLabel(self.triangulate_options)
self.thickness_label.setEnabled(False)
self.thickness_label.setMinimumSize(QtCore.QSize(70, 20))
self.thickness_label.setMaximumSize(QtCore.QSize(70, 20))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
self.thickness_label.setFont(font)
self.thickness_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.thickness_label.setObjectName("thickness_label")
self.horizontalLayout_11.addWidget(self.thickness_label)
spacerItem23 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_11.addItem(spacerItem23)
self.thickness_slider = QtWidgets.QSlider(self.triangulate_options)
self.thickness_slider.setEnabled(False)
self.thickness_slider.setMinimumSize(QtCore.QSize(130, 20))
self.thickness_slider.setMaximumSize(QtCore.QSize(130, 20))
self.thickness_slider.setMinimum(1)
self.thickness_slider.setMaximum(5)
self.thickness_slider.setOrientation(QtCore.Qt.Horizontal)
self.thickness_slider.setObjectName("thickness_slider")
self.horizontalLayout_11.addWidget(self.thickness_slider)
spacerItem24 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_11.addItem(spacerItem24)
self.thickness_spinBox = QtWidgets.QSpinBox(self.triangulate_options)
self.thickness_spinBox.setEnabled(False)
self.thickness_spinBox.setMinimumSize(QtCore.QSize(20, 20))
self.thickness_spinBox.setMaximumSize(QtCore.QSize(20, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
self.thickness_spinBox.setFont(font)
self.thickness_spinBox.setAlignment(QtCore.Qt.AlignCenter)
self.thickness_spinBox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.thickness_spinBox.setMinimum(1)
self.thickness_spinBox.setMaximum(5)
self.thickness_spinBox.setObjectName("thickness_spinBox")
self.horizontalLayout_11.addWidget(self.thickness_spinBox)
self.verticalLayout_4.addLayout(self.horizontalLayout_11)
self.verticalLayout_5.addWidget(self.groupBox)
self.gridLayout_2.addWidget(self.all_options, 0, 3, 2, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionSave_As = QtWidgets.QAction(MainWindow)
self.actionSave_As.setObjectName("actionSave_As")
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.actionNew_Window = QtWidgets.QAction(MainWindow)
self.actionNew_Window.setObjectName("actionNew_Window")
self.retranslateUi(MainWindow)
self.scale_factor_comboBox.setCurrentIndex(1)
self.hue_slider.valueChanged['int'].connect(self.hue_spinBox.setValue)
self.saturation_slider.valueChanged['int'].connect(self.saturation_spinBox.setValue)
self.brightness_slider.valueChanged['int'].connect(self.brightness_spinBox.setValue)
self.draw_line_checkBox.toggled['bool'].connect(self.thickness_slider.setEnabled)
self.draw_line_checkBox.toggled['bool'].connect(self.thickness_spinBox.setEnabled)
self.thickness_slider.valueChanged['int'].connect(self.thickness_spinBox.setValue)
self.triangulation_check_box.toggled['bool'].connect(self.triangulate_options.setEnabled)
self.draw_line_checkBox.toggled['bool'].connect(self.thickness_label.setEnabled)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Delaunay\'s Dream"))
MainWindow.setStatusTip(_translate("MainWindow", "Delaunay\'s Dream"))
self.open_button.setToolTip(_translate("MainWindow", "New Project"))
self.open_button.setStatusTip(_translate("MainWindow", "New Project"))
self.open_button.setText(_translate("MainWindow", "New"))
self.export_button.setToolTip(_translate("MainWindow", "Export Video"))
self.export_button.setStatusTip(_translate("MainWindow", "Export Video"))
self.export_button.setText(_translate("MainWindow", "Export"))
self.video_player.setStatusTip(_translate("MainWindow", "Video Playback"))
self.video_slider.setToolTip(_translate("MainWindow", "Video Seek Bar"))
self.video_slider.setStatusTip(_translate("MainWindow", "Video Seek Bar"))
self.play_button.setToolTip(_translate("MainWindow", "Play/Pause Video"))
self.play_button.setStatusTip(_translate("MainWindow", "Play/Pause Video"))
self.stop_button.setToolTip(_translate("MainWindow", "Stop Video"))
self.stop_button.setStatusTip(_translate("MainWindow", "Stop Video"))
self.dark_mode_label.setText(_translate("MainWindow", "Dark"))
self.mode_toggle.setToolTip(_translate("MainWindow", "Dark/Light Mode Toggle"))
self.mode_toggle.setStatusTip(_translate("MainWindow", "Dark/Light Mode Toggle"))
self.light_mode_label.setText(_translate("MainWindow", "Light"))
self.apply_button.setToolTip(_translate("MainWindow", "Apply Changes to All Frames"))
self.apply_button.setStatusTip(_translate("MainWindow", "Apply Changes to All Frames"))
self.apply_button.setText(_translate("MainWindow", "Apply to All Frames"))
self.reset_button.setToolTip(_translate("MainWindow", "Reset all changes applied to the video by reloading the video"))
self.reset_button.setStatusTip(_translate("MainWindow", "Reset all changes applied to the video by reloading the video"))
self.reset_button.setText(_translate("MainWindow", "Reset"))
self.status_message.setToolTip(_translate("MainWindow", "Status Messages"))
self.status_message.setStatusTip(_translate("MainWindow", "Status Messages"))
self.status_message.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Calibri\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Welcome to Delaunay\'s Dream!</p></body></html>"))
self.all_options.setTitle(_translate("MainWindow", "Options"))
self.hue_Label.setStatusTip(_translate("MainWindow", "Hue"))
self.hue_Label.setText(_translate("MainWindow", " Hue"))
self.hue_slider.setToolTip(_translate("MainWindow", "Adjust Hue (0 - 179)"))
self.hue_slider.setStatusTip(_translate("MainWindow", "Adjust Hue (0 - 179)"))
self.hue_spinBox.setToolTip(_translate("MainWindow", "Adjust Hue (0 - 179)"))
self.hue_spinBox.setStatusTip(_translate("MainWindow", "Adjust Hue (0 - 179)"))
self.saturation_label.setStatusTip(_translate("MainWindow", "Saturation"))
self.saturation_label.setText(_translate("MainWindow", " Saturation"))
self.saturation_slider.setToolTip(_translate("MainWindow", "Adjust Saturation (0 - 200)"))
self.saturation_slider.setStatusTip(_translate("MainWindow", "Adjust Saturation (0 - 200)"))
self.saturation_spinBox.setToolTip(_translate("MainWindow", "Adjust Saturation (0 - 200)"))
self.saturation_spinBox.setStatusTip(_translate("MainWindow", "Adjust Saturation (0 - 200)"))
self.brightness_label.setStatusTip(_translate("MainWindow", "Brightness"))
self.brightness_label.setText(_translate("MainWindow", " Brightness"))
self.brightness_slider.setToolTip(_translate("MainWindow", "Adjust Brightness (0 - 150)"))
self.brightness_slider.setStatusTip(_translate("MainWindow", "Adjust Brightness (0 - 150)"))
self.brightness_spinBox.setToolTip(_translate("MainWindow", "Adjust Brightness (0 - 150)"))
self.brightness_spinBox.setStatusTip(_translate("MainWindow", "Adjust Brightness (0 - 150)"))
self.triangulate_label.setStatusTip(_translate("MainWindow", "Triangulate Video"))
self.triangulate_label.setText(_translate("MainWindow", "Triangulate Video"))
self.triangulation_check_box.setToolTip(_translate("MainWindow", "Triangulate Video"))
self.triangulation_check_box.setStatusTip(_translate("MainWindow", "Triangulate Video"))
self.groupBox.setStatusTip(_translate("MainWindow", "Triangulate Options"))
self.groupBox.setTitle(_translate("MainWindow", "Triangulate Options"))
self.max_points_label.setStatusTip(_translate("MainWindow", "Max Points"))
self.max_points_label.setText(_translate("MainWindow", "Max Points"))
self.max_points_spinBox.setToolTip(_translate("MainWindow", "Adjust number of points used to triangulate (20 - 10000)"))
self.max_points_spinBox.setStatusTip(_translate("MainWindow", "Adjust number of points used to triangulate (20 - 10000)"))
self.method_label.setStatusTip(_translate("MainWindow", "Sampling Method"))
self.method_label.setText(_translate("MainWindow", "Method:"))
self.threshold_radioButton.setToolTip(_translate("MainWindow", "Threshold Sampling Method (quick, but with mediocre results)"))
self.threshold_radioButton.setStatusTip(_translate("MainWindow", "Threshold Sampling Method (quick, but with mediocre results)"))
self.threshold_radioButton.setText(_translate("MainWindow", "Threshold"))
self.poisson_disk_radioButton.setToolTip(_translate("MainWindow", "Poisson Disk Sampling Method (slow, but with great results)"))
self.poisson_disk_radioButton.setStatusTip(_translate("MainWindow", "Poisson Disk Sampling Method (slow, but with great results)"))
self.poisson_disk_radioButton.setText(_translate("MainWindow", "Poisson Disk"))
self.scale_factor_label.setStatusTip(_translate("MainWindow", "Scale Factor"))
self.scale_factor_label.setText(_translate("MainWindow", "Scale Factor"))
self.scale_factor_comboBox.setToolTip(_translate("MainWindow", "Scale Factor determines size of image used to sample colors during triangulation (higher is slower)"))
self.scale_factor_comboBox.setStatusTip(_translate("MainWindow", "Scale Factor determines size of image used to sample colors during triangulation (higher is slower)"))
self.scale_factor_comboBox.setCurrentText(_translate("MainWindow", "10%"))
self.scale_factor_comboBox.setItemText(0, _translate("MainWindow", "5%"))
self.scale_factor_comboBox.setItemText(1, _translate("MainWindow", "10%"))
self.scale_factor_comboBox.setItemText(2, _translate("MainWindow", "20%"))
self.scale_factor_comboBox.setItemText(3, _translate("MainWindow", "50%"))
self.scale_factor_comboBox.setItemText(4, _translate("MainWindow", "100%"))
self.draw_line_checkBox.setToolTip(_translate("MainWindow", "Draw Line"))
self.draw_line_checkBox.setStatusTip(_translate("MainWindow", "Draw Line"))
self.label.setText(_translate("MainWindow", "Draw Line"))
self.thickness_label.setStatusTip(_translate("MainWindow", "Thickness"))
self.thickness_label.setText(_translate("MainWindow", "Thickness"))
self.thickness_slider.setToolTip(_translate("MainWindow", "Adjust Line Thickness (1 - 5)"))
self.thickness_slider.setStatusTip(_translate("MainWindow", "Adjust Line Thickness (1 - 5)"))
self.thickness_spinBox.setToolTip(_translate("MainWindow", "Adjust Line Thickness (1 - 5)"))
self.thickness_spinBox.setStatusTip(_translate("MainWindow", "Adjust Line Thickness (1 - 5)"))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave_As.setText(_translate("MainWindow", "Save As..."))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionNew_Window.setText(_translate("MainWindow", "New Window"))
from qtwidgets import AnimatedToggle, Toggle
|
import vis
import utils
import met_funcs
import plotly_vis
import MetMastData
import pandas as pd
import matplotlib.pyplot as plt
from colour import Color
import plotly
from plotly import tools
#import plotly.tools as tls
import plotly.plotly as py
import plotly.graph_objs as go
# Place input files here
#inputfiles_here = ['2012_August.csv']
#inputfiles_here = ['2013_January.csv','2013_February.csv','2013_March.csv','2013_April.csv','2013_May.csv','2013_June.csv','2013_July.csv','2013_August.csv','2013_September.csv','2013_October.csv','2013_November.csv','2013_December.csv']
#inputfiles_here = ['2013_January.csv']
year = 2017
inputfiles_here = [str(year) + '_' + s + '.csv' for s in utils.monthnames()]
'''
inputfiles_here = MetMastData()
actual_data =
cate_info = actual_data.cate_info
'''
# Load and filter data
actual_data = met_funcs.load_met_data(inputfiles_here)
actual_data = met_funcs.drop_nan_cols(actual_data)
actual_data = met_funcs.qc_mask(actual_data)
# Extract categorical information
keep_cats = met_funcs.categories_to_keep()
ex_cats = met_funcs.categories_to_exclude()
var_cats,var_units,var_labels,var_save = met_funcs.categorize_fields(actual_data,keep_cats,ex_cats)
# Extract more information
met_funcs.groom_data(actual_data,var_cats)
stab_conds,stab_cats = met_funcs.flag_stability(actual_data)
cate_info = met_funcs.get_catinfo(actual_data)
# Plot the data with the desired category and function
category = 'speed'
#fig1 = plotly_vis.monthly_rose_fig(actual_data,cate_info,category)
fig1 = plotly_vis.monthlyhourlyplot(actual_data,cate_info,category)
py.iplot(fig1, filename = 'MetMast-Test_funcMonthlyHourly') |
import sys
import math
import operator
import json
"less than 1000 because document names overlap after removing - and _ and these files have same content"
N=0
docTokenCountMapping={}
avdl=0
def get_term_BM25_score(ni,fi,qfi,dl):
k1=1.2
b=0.75
k2=100
score=0.0
if fi==0:
score=0.0
else:
K=k1*((1-b)+b*float(dl)/float(avdl))
comp1=float(N-ni+0.5)/float(ni+0.5)
comp2=float((k1+1)*fi)/float(K+fi)
comp3=float((k2+1)*qfi)/float(k2+qfi)
score=math.log(comp1)*comp2*comp3
return score
def calculateDocumentStatisticsFromIndex(unigramIndex):
totalNumberOfTokens=0
for term in unigramIndex:
invertedList=unigramIndex[term]
for entry in invertedList:
docId=entry[0]
frequency=entry[1]
global docTokenCountMapping
if docTokenCountMapping.has_key(docId):
docTokenCountMapping[docId]=docTokenCountMapping[docId]+frequency
totalNumberOfTokens=totalNumberOfTokens+frequency
else:
docTokenCountMapping[docId]=frequency
totalNumberOfTokens=totalNumberOfTokens+frequency
global N
N=len(docTokenCountMapping.keys())
global avdl
avdl=totalNumberOfTokens/N
def writeBM25DocumentScoresToFile(doc_score,queryID):
sorted_docscore=sorted(doc_score.items(),key=operator.itemgetter(1),reverse=True)[:100]
doc_rank=1
file=open("query"+queryID+".txt",'w')
for doc in sorted_docscore:
file.write(str(queryID)+" "+"Q0 "+doc[0].replace(".txt","")+" "+str(doc_rank)+" "+str(doc[1])+" Hemanth"+"\n")
doc_rank+=1
file.close()
def main(argv):
if argv:
index_filename=argv[0]
query_filename=argv[1]
else:
index_filename="unigramIndex.txt"
query_filename="queries.txt"
unigramIndex={}
docIDMapping={}
with open(index_filename) as data_file:
unigramIndex = json.load(data_file)
with open("docIDMapping.txt") as mapping_file:
docIDMapping = json.load(mapping_file)
"Calculate the total number of documents in the index and the average document length"
calculateDocumentStatisticsFromIndex(unigramIndex)
with open(query_filename) as query_content:
queryEntries=query_content.readlines()
for queryEntry in queryEntries:
query=queryEntry.split('-')[1].rstrip()
queryID=queryEntry.split('-')[0]
queryTerms=query.split(' ')
doc_score={}
distinctQueryTerms=list(set(queryTerms))
for queryTerm in distinctQueryTerms:
if unigramIndex.has_key(queryTerm):
invertedList=unigramIndex[queryTerm]
documentFrequency=len(invertedList)
queryFrequency=queryTerms.count(queryTerm)
for entry in invertedList:
docID=entry[0]
docName=docIDMapping[str(docID)]
docLength=docTokenCountMapping[docID]
frequencyOfTermInDocument=entry[1]
termScore=get_term_BM25_score(documentFrequency,frequencyOfTermInDocument,queryFrequency,docLength)
if doc_score.has_key(docName):
doc_score[docName]=doc_score[docName]+termScore
else:
doc_score[docName]=termScore
writeBM25DocumentScoresToFile(doc_score,queryID)
main(sys.argv[1:])
|
# Generic CNN classifier that uses a geojson file and gbdx imagery to classify chips
import numpy as np
import os, random
import json, geojson
from mltools import geojson_tools as gt
from mltools.data_extractors import get_data_from_polygon_list as get_chips
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.models import Sequential, model_from_json
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
class PoolNet(object):
'''
Convolutional Neural Network model to classify chips as pool/no pool
INPUT classes (list [str]): Classes to train model on, exactly as they appear in
the properties of any geojsons used for training. Defaults to pool
classes: ['No swimming pool', 'Swimming pool'].
batch_size (int): Amount of images to use for each batch during training.
Defaults to 32.
input_shape (tuple[int]): Shape of input chips with theano dimensional
ordering (n_channels, height, width). Height and width must be equal. If
an old model is loaded (old_model_name is not None), input shape will be
automatically set from the architecture and does not need to be specified.
Defaults to (3,125,125).
old_model_name (str): Name of previous model to load (not including file
extension). There should be a json architecture file and HDF5 ('.h5')
weights file in the working directory under this name. If None, a new
model will be compiled for training. Defaults to None.
learning_rate (float): Learning rate for the first round of training. Defualts
to 0.001
small_model (bool): Use a model with nine layers instead of 16. Will train
faster but may be less accurate and cannot be used with large chips.
Defaults to False.
kernel_size (int): Size (in pixels) of the kernels to use at each
convolutional layer of the network. Defaults to 3 (standard for VGGNet).
'''
def __init__(self, classes=['No swimming pool', 'Swimming pool'], batch_size=32,
input_shape=(3, 125, 125), small_model=False, model_name=None,
learning_rate = 0.001, kernel_size=3):
self.nb_classes = len(classes)
self.classes = classes
self.batch_size = batch_size
self.small_model = small_model
self.input_shape = input_shape
self.lr = learning_rate
self.kernel_size = kernel_size
self.cls_dict = {classes[i]: i for i in xrange(len(self.classes))}
if model_name:
self.model_name = model_name
self.model = self._load_model_architecture(model_name)
self.model.load_weights(model_name + '.h5')
self.input_shape = self.model.input_shape
elif self.small_model:
self.model = self._small_model()
else:
self.model = self._VGG_16()
self.model_layer_names = [self.model.layers[i].get_config()['name']
for i in range(len(self.model.layers))]
def _VGG_16(self):
'''
Implementation of VGG 16-layer net.
'''
print 'Compiling VGG Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
def _small_model(self):
'''
Alternative model architecture with fewer layers for computationally expensive
training datasets
'''
print 'Compiling Small Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
def _load_model_architecture(self, model_name):
'''
Load a model arcitecture from a json file
INPUT model_name (str): Name of model to load
OUTPUT Loaded model architecture
'''
print 'Loading model {}'.format(self.model_name)
#load model
with open(model_name + '.json') as f:
mod = model_from_json(json.load(f))
return mod
def save_model(self, model_name):
'''
Saves model architecture as a json file and current weigts as h5df file
INPUT model_name (str): Name inder which to save the architecture and weights.
This should not include the file extension.
'''
# Save architecture
arch, arch_json = '{}.json'.format(model_name), self.model.to_json()
with open(arch, 'w') as f:
json.dump(arch_json, f)
# Save weights
weights = '{}.h5'.format(model_name)
self.model.save_weights(weights)
def fit_from_geojson(self, train_geojson, max_side_dim=None, min_side_dim=0,
chips_per_batch=5000, train_size=10000, validation_split=0.1,
bit_depth=8, save_model=None, nb_epoch=10,
shuffle_btwn_epochs=True, return_history=False,
save_all_weights=True, retrain=False, learning_rate_2=0.01):
'''
Fit a model from a geojson file with training data. This method iteratively
yields large batches of chips to train on for each epoch. Please ensure that
your current working directory contains all imagery referenced in the
image_id property in train_geojson, and are named as follows: <image_id>.tif,
where image_id is the catalog id of the image.
INPUT train_geojson (string): Filename for the training data (must be a
geojson). The geojson must be filtered such that all polygons are of
valid size (as defined by max_side_dim and min_side_dim)
max_side_dim (int): Maximum acceptable side dimension (in pixels) for a
chip. If None, defaults to input_shape[-1]. If larger than the
input shape the chips extracted will be downsampled to match the
input shape. Defaults to None.
min_side_dim (int): Minimum acceptable side dimension (in pixels) for a
chip. Defaults to 0.
chips_per_batch (int): Number of chips to yield per batch. Must be small
enough to fit into memory. Defaults to 5000 (decrease for larger
input sizes).
train_size (int): Number of chips to use for training data.
validation_split (float): Proportion of training chips to use as validation
data. Defaults to 0.1.
bit_depth (int): Bit depth of the image strips from which training chips
are extracted. Defaults to 8 (standard for DRA'ed imagery).
save_model (string): Name of model for saving. if None, does not save
model to disk. Defaults to None
nb_epoch (int): Number of epochs to train for. Each epoch will be trained
on batches * batches_per_epoch chips. Defaults to 10.
shuffle_btwn_epochs (bool): Shuffle the features in train_geojson
between each epoch. Defaults to True.
return_history (bool): Return a list containing metrics from past epochs.
Defaults to False.
save_all_weights (bool): Save model weights after each epoch. A directory
called models will be created in the working directory. Defaults to
True.
retrain (bool): Freeze all layers except final softmax to retrain only
the final weights of the model. Defaults to False
learning_rate_2 (float): Learning rate for the second round of training.
Only relevant if retrain is True. Defaults to 0.01.
OUTPUT trained model, history
'''
resize_dim, validation_data, full_hist = None, None, []
# load geojson training polygons
with open(train_geojson) as f:
polygons = geojson.load(f)['features'][:train_size]
if len(polygons) < train_size:
raise Exception('Not enough polygons to train on. Please add more training ' \
'data or decrease train_size.')
# Determine size of chips to extract and resize dimension
if not max_side_dim:
max_side_dim = self.input_shape[-1]
elif max_side_dim != self.input_shape[-1]:
resize_dim = self.input_shape # resize chips to match input shape
# Recompile model with retrain params
if retrain:
for i in xrange(len(self.model.layers[:-1])):
self.model.layers[i].trainable = False
sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
# Set aside validation data
if validation_split > 0:
val_size = int(validation_split * train_size)
val_data, polygons = polygons[: val_size], polygons[val_size: ]
train_size = len(polygons)
# extract validation chips
print 'Getting validation data...\n'
valX, valY = get_chips(val_data, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=True, mask=True,
bit_depth=bit_depth, show_percentage=True,
assert_all_valid=True, resize_dim=resize_dim)
validation_data = (valX, valY)
# Train model
for e in range(nb_epoch):
print 'Epoch {}/{}'.format(e + 1, nb_epoch)
# Make callback and diretory for saved weights
if save_all_weights:
chk = ModelCheckpoint(filepath="./models/epoch" + str(e) + \
"_{val_loss:.2f}.h5", verbose=1,
save_weights_only=True)
if 'models' not in os.listdir('.'):
os.makedirs('models')
if shuffle_btwn_epochs:
np.random.shuffle(polygons)
# Cycle through batches of chips and train
for batch_start in range(0, train_size, chips_per_batch):
callbacks = []
this_batch = polygons[batch_start: batch_start + chips_per_batch]
# Get chips from batch
X, Y = get_chips(this_batch, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=True, mask=True,
bit_depth=bit_depth, show_percentage=False,
assert_all_valid=True, resize_dim=resize_dim)
# Save weights if this is the final batch in the epoch
if batch_start == range(0, train_size, chips_per_batch)[-1]:
callbacks = [chk]
# Fit the model on this batch
hist = self.model.fit(X, Y, batch_size=self.batch_size, nb_epoch=1,
validation_data=validation_data,
callbacks=callbacks)
# Dict recording loss and val_loss after each epoch
full_hist.append(hist.history)
if save_model:
self.save_model(save_model)
if return_history:
return full_hist
def fit_xy(self, X_train, Y_train, validation_split=0.1, save_model=None,
nb_epoch=10, shuffle_btwn_epochs=True, return_history=False,
save_all_weights=True, retrain=False, learning_rate_2=0.01):
'''
Fit model on training chips already loaded into memory
INPUT X_train (array): Training chips with the following dimensions:
(train_size, num_channels, rows, cols). Dimensions of each chip
should match the input_size to the model.
Y_train (list): One-hot encoded labels to X_train with dimensions as
follows: (train_size, n_classes)
validation_split (float): Proportion of X_train to validate on while
training.
save_model (string): Name under which to save model. if None, does not
save model. Defualts to None.
nb_epoch (int): Number of training epochs to complete
shuffle_btwn_epochs (bool): Shuffle the features in train_geojson
between each epoch. Defaults to True.
return_history (bool): Return a list containing metrics from past epochs.
Defaults to False.
save_all_weights (bool): Save model weights after each epoch. A directory
called models will be created in the working directory. Defaults to
True.
retrain (bool): Freeze all layers except final softmax to retrain only
the final weights of the model. Defaults to False
learning_rate_2 (float): Learning rate for the second round of training.
Only relevant if retrain is True. Defaults to 0.01.
OUTPUT trained Keras model.
'''
callbacks = []
# Recompile model with retrain params
if retrain:
for i in xrange(len(self.model.layers[:-1])):
self.model.layers[i].trainable = False
sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
# Define callback to save weights after each epoch
if save_all_weights:
chk = ModelCheckpoint(filepath="./models/ch_{epoch:02d}-{val_loss:.2f}.h5",
verbose=1, save_weights_only=True)
callbacks = [chk]
# Fit model
hist = self.model.fit(X_train, Y_train, validation_split=validation_split,
callbacks=callbacks, nb_epoch=nb_epoch,
shuffle=shuffle_btwn_epochs)
if save_model:
self.save_model(save_model)
if return_history:
return hist
def classify_geojson(self, target_geojson, output_name, max_side_dim=None,
min_side_dim=0, numerical_classes=True, chips_in_mem=5000,
bit_depth=8):
'''
Use the current model and weights to classify all polygons in target_geojson. The
output file will have a 'CNN_class' property with the net's classification
result, and a 'certainty' property with the net's certainty in the assigned
classification.
Please ensure that your current working directory contains all imagery referenced
in the image_id property in target_geojson, and are named as follows:
<image_id>.tif, where image_id is the catalog id of the image.
INPUT target_geojson (string): Name of the geojson to classify. This file
should only contain chips with side dimensions between min_side_dim
and max_side_dim (see below).
output_name (string): Name under which to save the classified geojson.
max_side_dim (int): Maximum acceptable side dimension (in pixels) for a
chip. If None, defaults to input_shape[-1]. If larger than the
input shape the chips extracted will be downsampled to match the
input shape. Defaults to None.
min_side_dim (int): Minimum acceptable side dimension (in pixels) for a
chip. Defaults to 0.
numerical_classes (bool): Make output classifications correspond to the
indicies (base 0) of the 'classes' attribute. If False, 'CNN_class'
is a string with the class name. Defaults to True.
chips_in_mem (int): Number of chips to load in memory at once. Decrease
this parameter for larger chip sizes. Defaults to 5000.
bit_depth (int): Bit depth of the image strips from which training chips
are extracted. Defaults to 8 (standard for DRA'ed imagery).
'''
resize_dim, yprob, ytrue = None, [], []
# Determine size of chips to extract and resize dimension
if not max_side_dim:
max_side_dim = self.input_shape[-1]
elif max_side_dim != self.input_shape[-1]:
resize_dim = self.input_shape # resize chips to match input shape
# Format output filename
if not output_name.endswith('.geojson'):
output_name = '{}.geojson'.format(output_name)
# Get polygon list from geojson
with open(target_geojson) as f:
features = geojson.load(f)['features']
# Classify in batches of 1000
for ix in xrange(0, len(features), chips_in_mem):
this_batch = features[ix: (ix + chips_in_mem)]
try:
X = get_chips(this_batch, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=False,
bit_depth=bit_depth, mask=True, show_percentage=False,
assert_all_valid=True, resize_dim=resize_dim)
except (AssertionError):
raise ValueError('Please filter the input geojson file using ' \
'geojoson_tools.filter_geojson() and ensure all ' \
'polygons are valid before using this method.')
# Predict classes of test data
yprob += list(self.model.predict_proba(X))
# Get predicted classes and certainty
yhat = [np.argmax(i) for i in yprob]
ycert = [str(np.max(j)) for j in yprob]
if not numerical_classes:
yhat = [self.classes[i] for i in yhat]
# Update geojson, save as output_name
data = zip(yhat, ycert)
property_names = ['CNN_class', 'certainty']
gt.write_properties_to(data, property_names=property_names,
input_file=target_geojson, output_file=output_name)
# Tools for analyzing network performance
def x_to_rgb(X):
'''
Transform a normalized (3,h,w) image (theano ordering) to a (h,w,3) rgb image
(tensor flow).
Use this to view or save rgb polygons as images.
INPUT (1) 3d array 'X': originial chip with theano dimensional ordering (3, h, w)
OUTPUT (1) 3d array: rgb image in tensor flow dim-prdering (h,w,3)
'''
rgb_array = np.zeros((X.shape[1], X.shape[2], 3), 'uint8')
rgb_array[...,0] = X[0] * 255
rgb_array[...,1] = X[1] * 255
rgb_array[...,2] = X[2] * 255
return rgb_array
|
# Generated by Django 2.2.2 on 2019-08-23 04:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('result', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='listcommit',
name='platform',
field=models.CharField(max_length=20),
),
]
|
import pathlib
from traitlets.config import Configurable
from traitlets import (
Unicode,
Dict,
default
)
from jhubctl.utils import SubclassError
class Cluster(Configurable):
"""Base class for Kubernetes Cluster providers.
To create a new provider, inherit this class and
replace the folowing traits and methods with the
logic that is appropriate for the provider.
We recommend creating a new folder for that provider
where all templates can be grouped.
"""
# Specific type of the provider
provider_type = Unicode(help="Provider type")
# An alias for the provider. No spaces or hyphens. Underscores instead.
provider_alias = Unicode(help="Simple alias pointing to this provider.")
#
cluster_name = Unicode(help="Name of cluster.")
# Path to templates for this provider.
template_dir = Unicode(
help="Path to template"
).tag(config=True)
ssh_key_name = Unicode(
help='User SSH key name'
).tag(config=True)
@property
def kube_user_data(self):
"""Extra data to pass to the kubectl user for this cluster.
This can be used to map extra data to clusters in the kubeconf file.
"""
return None
def __init__(self, name, **traits):
self.name = name
super().__init__(**traits)
def check_if_cluster_is_deployed(self):
"""Returns True if the cluster is deployed and available.
"""
raise SubclassError("Must be implemented in a subclass.")
def create(self):
"""Deploy a cluster configured for running Jupyterhub
deployments on this provider.
"""
raise SubclassError("Must be implemented in a subclass.")
def delete(self):
"""Teardown a cluster running on this provider.
"""
raise SubclassError("Must be implemented in a subclass.")
def get_auth_config(self):
"""Get yaml describing authorized users for the cluster.
"""
raise SubclassError("Must be implemented in a subclass.")
def get_storage_config(self):
"""Get yaml describing storage on cluster.
"""
raise SubclassError("Must be implemented in a subclass.")
|
import re
import sys
import os
from os import listdir
import shutil
placeholder = '{{ cookiecutter.placeholder }}'
output_terms = '{{cookiecutter.filename}}.md'
output_component = '{{cookiecutter.component_name}}.js'
# todo read both files, rewrite Terms.js
# todo generate pdf from terms_and_conditions.md
components_block = None
if os.path.isfile(output_terms) and os.path.isfile(output_terms):
with open(output_terms) as terms:
terms_block = ''.join(terms.readlines())
with open(output_component) as components:
components_block = ''.join(components.readlines())
components_block = components_block.replace(placeholder, terms_block)
with open(output_component, 'w') as components:
components.write(components_block)
project_root = os.path.dirname(os.getcwd())
components_dirname = '{{cookiecutter.component}}'
react_app_dirname = 'app'
if os.path.isfile(os.path.join(project_root, components_dirname, 'App.js')):
shutil.move(os.path.join(project_root, components_dirname, 'App.js'),
os.path.join(project_root, react_app_dirname, 'src'))
if os.path.isdir(os.path.join(project_root, components_dirname)) and os.path.isdir(
os.path.join(project_root, react_app_dirname, 'src')):
print("react src dir for component found")
shutil.move(os.path.join(project_root, components_dirname), os.path.join(project_root, react_app_dirname, 'src'))
print("POST_GEN")
|
t = int(input())
for i in range(t):
a,b = input().split()
a = int(a)
b = int(b)
if b == 0:
print(a+1)
continue
if a == 0:
print(1)
continue
print(b*2 + a +1)
|
import datetime
import json
from flask_restplus import fields
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import sqltypes
db = SQLAlchemy()
class Marshaling:
# 用来给sqlalchemy模型自动生成序列化规则,供flask-restplus使用
# 偷懒用的,不放心还是可以直接手写模型,没有影响
type_mapper = {
sqltypes.String: fields.String,
sqltypes.Integer: fields.Integer,
sqltypes.Numeric: fields.Float,
}
@classmethod
def auto_marshaling_model(cls):
model: dict = {}
for column in cls.__table__.c:
pass
return {
column.name: cls.type_mapper[type(column.type)] for column in cls.__table__.c
}
class User(db.Model, Marshaling):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(50), nullable=False, unique=True)
first_name = db.Column(db.String(100))
balance = db.Column(db.Numeric(10, 2), default=0)
chat_id = db.Column(db.Integer)
planned_month_deposit = db.Column(db.Numeric(10, 2), default=None)
@classmethod
def auto_marshaling_model(cls):
model: dict = super().auto_marshaling_model()
model['id'] = fields.Integer(readonly=True)
return model
class Bill(db.Model, Marshaling):
__tablename__ = 'bill'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
create_time = db.Column(db.DateTime, default=datetime.datetime.now)
type = db.Column(db.String(20), default='out')
amount = db.Column(db.Numeric(10, 2))
category = db.Column(db.String(100))
name = db.Column(db.String(100), nullable=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('bills', lazy='dynamic'))
def __repr__(self):
return '{:12} '.format(str(self.amount) + '元') \
+ '{:4} '.format(self.category) \
+ ((" " + self.name) if self.name else "")
class ScheduledBillTask(db.Model, Marshaling):
__tablename__ = 'task'
id = db.Column(db.String(50), primary_key=True)
amount = db.Column(db.Numeric(10, 2))
trigger = db.Column(db.String(10))
category = db.Column(db.String(100))
type = db.Column(db.String(20))
name = db.Column(db.String(100), nullable=True)
trigger_kwargs = db.Column(db.String(200))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('tasks', lazy='dynamic'))
@classmethod
def auto_marshaling_model(cls):
model: dict = super().auto_marshaling_model()
class TriggerKwargs(fields.Raw):
def format(self, value):
return json.loads(value)
model['trigger_kwargs'] = TriggerKwargs(required=True)
model['id'] = fields.String(readonly=True)
return model
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for snlds.forward_backward_algo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from snlds import forward_backward_algo
class ForwardBackwardAlgoTest(tf.test.TestCase):
"""Testing the forward backward algorithm, against hand calculated examples.
The example is adapted from the following Lecture Note by Imperial College
https://ibug.doc.ic.ac.uk/media/uploads/documents/computing_em_hmm.pdf
"""
def setUp(self):
super(ForwardBackwardAlgoTest, self).setUp()
# initial discrete state likelihood p(s[0])
self.init_pi = tf.convert_to_tensor([0.5, 0.5])
# matrix A is transition matrix p(s[t] | s[t-1], x[t-1])
self.mat_a = tf.convert_to_tensor(
np.array([[[[0.5, 0.5],
[0.5, 0.5]],
[[0.5, 0.5],
[0.5, 0.5]],
[[0.5, 0.5],
[0.5, 0.5]],
[[0.5, 0.5],
[0.5, 0.5]]]], dtype=np.float32))
# matrix B is the emission matrix p(x[t](, z[t]) | s[t])
self.mat_b = tf.convert_to_tensor(
np.array([[[0.5, 0.75],
[0.5, 0.75],
[0.5, 0.25],
[0.5, 0.25]]], dtype=np.float32))
def test_forward_pass(self):
fwd_logprob, fwd_lognorm = forward_backward_algo.forward_pass(
tf.math.log(self.mat_a), tf.math.log(self.mat_b),
tf.math.log(self.init_pi))
fwd_prob = tf.exp(fwd_logprob)
fwd_norm = tf.math.cumprod(tf.exp(fwd_lognorm), axis=1)
fwd_norm = fwd_norm[:, :, None]
target_value = np.array([[[1./4., 3./8.],
[5./32., 15./64.],
[25./256., 25./512.],
[75./2048., 75./4096.]]], dtype=np.float32)
self.assertAllClose(self.evaluate(fwd_prob * fwd_norm), target_value)
def test_backward_pass(self):
bwd_logprob, bwd_lognorm = forward_backward_algo.backward_pass(
tf.math.log(self.mat_a), tf.math.log(self.mat_b),
tf.math.log(self.init_pi))
bwd_prob = tf.exp(bwd_logprob)
bwd_norm = tf.math.cumprod(tf.exp(bwd_lognorm), axis=1, reverse=True)
bwd_norm = bwd_norm[:, :, None]
target_value = np.array([[[45./512., 45./512.],
[9./64., 9./64.],
[3./8., 3./8.],
[1., 1.]]], dtype=np.float32)
self.assertAllClose(self.evaluate(bwd_prob * bwd_norm), target_value)
def test_forward_backward(self):
_, _, log_gamma1, log_gamma2 = forward_backward_algo.forward_backward(
tf.math.log(self.mat_a), tf.math.log(self.mat_b),
tf.math.log(self.init_pi))
gamma1, gamma2 = tf.exp(log_gamma1), tf.exp(log_gamma2)
gamma1_target = np.array([[[90./225., 135./225.],
[90./225., 135./225.],
[150./225., 75./225.],
[150./225., 75./225.]]], dtype=np.float32)
gamma2_target = np.array([[[[1., 1.],
[1., 1.]],
[[36./225., 54./225.],
[54./225., 81./225.]],
[[60./225., 90./225.],
[30./225., 45./225.]],
[[100./225., 50./225.],
[50./225., 25./225.]]]], dtype=np.float32)
self.assertAllClose(self.evaluate(gamma1), gamma1_target)
self.assertAllClose(self.evaluate(gamma2), gamma2_target)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/pyhon
#
# boxy2edf v0.2
# Copyright 2016 Paolo Masulli - Neuroheuristic Research Group
#
#
# This file is part of boxy2edf.
#
# boxy2edf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with boxy2edf. If not, see <http://www.gnu.org/licenses/>.
import struct
import os.path
import sys
import datetime
import logging
import numpy as np
# logging configuration
# logging.basicConfig(filename='example.log', filemode='w', level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
interactive_mode = False
if len(sys.argv) < 2:
print "Interactive mode"
interactive_mode = True
boxy_file = raw_input("Path to the boxy file: ")
else:
boxy_file = sys.argv[1]
if not os.path.isfile(boxy_file):
raise ValueError("Boxy file not found: %s" % boxy_file)
edf_file = boxy_file.replace(".txt", "")+".edf"
# the boxy signals are decimals. I multiply for the multiply and then round to int
multiplier = 1
digital_min = -100000
digital_max = 100000
EEG_EVENT_CHANNEL = 'EDF Annotations'
# each event data is coded as 20 2-byte "integers" i.e. 40 bytes available -- each TAL has this length
eeg_event_length = 20
BOXY_EVENT_CHANNEL = 'digaux'
def spacepad(string, n, paddingchar=" "):
string = str(string)
if len(string) > n:
raise ValueError(string + ": The string is too long to be space padded as requested.")
return string + paddingchar * (n - len(string))
# open the two files
bf = open(boxy_file, 'r')
ef = open(edf_file, 'wb')
# read the Boxy file
data_begins_line = data_ends_line = 0
b_update_rate = 0.0
bf_fields = []
bf_data = {} # each element is a signal/channel, a list of data points
i = 0
for line in bf:
if '#DATA BEGINS' in line:
data_begins_line = i
if '#DATA ENDS' in line:
data_ends_line = i
if "Update Rate (Hz)" in line:
b_update_rate = float(line.split(" ")[0])
if data_begins_line != 0:
# if data has begun
if i == data_begins_line + 1:
# the field names line
bf_fields = line.rstrip().split("\t")
for (field_id, field) in enumerate(bf_fields):
bf_data[field] = []
logging.info("There are %d fields" % len(bf_fields))
if data_ends_line == 0 and i > data_begins_line + 2:
# if we are in a data line
data_line = line.rstrip().split("\t")
# logging.info("There are %d data columns" % len(bf_data[-1]))
if len(data_line) != len(bf_fields):
raise ValueError(("We have a problem! There are %d fields, but line %d " +
"in the boxy file has %d points.") % (len(bf_fields), i, len(data_line)))
for (field_id, v) in enumerate(data_line):
bf_data[bf_fields[field_id]].append(np.round(float(v), 3))
# bf_data[bf_fields[field_id]].append(float(v))
i += 1
print "Data read in the Boxy file".center(100, "-")
print "Data begins at line", data_begins_line, "and ends at", data_ends_line
print "There are", len(bf_data), "columns."
print "Fields:", bf_fields
print "Update rate:", b_update_rate
# columns to skip
skipped_columns = {"time", "group", "step", "mark", "flag", "aux-1", "aux-2", "aux-3", "aux-4", BOXY_EVENT_CHANNEL}
events = []
events_present = 0
event_begun = False # paolo: to separate events -- each event happens at ONE specific time, not several contiguous
for (t, x) in enumerate(bf_data[BOXY_EVENT_CHANNEL]):
if x != 0.0 and not event_begun:
events.append((t, x))
event_begun = not event_begun
if x == 0.0 and event_begun:
event_begun = not event_begun
print "Events (time, code):", events
if len(events) != 0:
events_present = 1
for f in bf_data.keys():
if max(bf_data[f]) - min(bf_data[f]) == 0:
logging.warning("%s almost constant" % f)
# skipped_columns.add(f)
else:
logging.info("Channel %s\t%f\t%f" % (f, min(bf_data[f]), max(bf_data[f])))
skipped_columns = list(skipped_columns)
# change here to select some signals only
selected_fields = [field for field in bf_fields if field not in skipped_columns]
selected_signals = {field: bf_data[field][:] for field in selected_fields}
n_signals = len(selected_fields)
data_time_duration = round(len(selected_signals[selected_fields[0]]) / b_update_rate) # freq * n_points
physical_minimum = {}
physical_maximum = {}
print "Scaling and centering".center(100, "-")
for s in selected_fields:
# for each signal
# centering around 0
logging.info("Channel %s" % s)
# save physical min and max
physical_minimum[s] = min(selected_signals[s])
physical_maximum[s] = max(selected_signals[s])
mean = sum(selected_signals[s]) / len(selected_signals[s])
logging.info("Average = %f --- centering the values." % mean)
i = 0
while i < len(selected_signals[s]):
selected_signals[s][i] -= mean
i += 1
logging.info("After centering:\t%f\t%f" % (min(selected_signals[s]), max(selected_signals[s])))
abs_max = max(abs(min(selected_signals[s])), abs(max(selected_signals[s])))
logging.info("Abs max = %f -- now scaling" % abs_max)
scaling_factor = 32767.0/abs_max # the scaling factor to be in the range -32k to 32k
logging.info("The scaling factor is: %f" % scaling_factor)
i = 0
while i < len(selected_signals[s]):
selected_signals[s][i] = int(round(scaling_factor * selected_signals[s][i]))
i += 1
logging.info("After scaling:\t\t%d\t%d" % (min(selected_signals[s]), max(selected_signals[s])))
# ###
print "".center(100, "=")
print "Data to write in the EDF file".center(100, "-")
print "Fields:", selected_fields
print "Writing", len(selected_fields), "fields."
print "Number of data points:", len(selected_signals[selected_fields[0]])
print "Approximate data recording duration: %s" % \
str(datetime.timedelta(seconds=len(selected_signals[selected_fields[0]]) / b_update_rate))
print "".center(100, "=")
if interactive_mode:
raw_input("Press Enter to continue...")
date = "01-JAN-2015"
patient_code = "P001" # local code
patient_sex = "F" # M/F/X
patient_birthdate = "01-MAR-1951" # DD-MMM-YYYY
patient_name = "fNIRS_Patient" # replace spaces w underscores
recording_startdate_short = "01.01.15" # DD.MM.YY
recording_startdate_long = "01-JAN-2015" # DD-MMM-YYYY
recording_starttime = "10.00.00" # HH.MM.SS
investigation_code = "NIRS_001"
responsible_investigator_code = "AV"
equipment_code = "NHRG_IMAGENT"
transducer_type = "NIRS Optode" # always this constant value, since it's data from Boxy
physical_dimension = "A" # to be checked...
prefiltering_text = "None"
local_patient_ident = " ".join([patient_code, patient_sex, patient_birthdate, patient_name])
local_recording_ident = " ".join(["Startdate", recording_startdate_long, investigation_code,
responsible_investigator_code, equipment_code])
header = dict()
# 8 ascii : version of this data format (0)
header[0] = spacepad("0", 8)
# 80 ascii : local patient identification (mind item 3 of the additional EDF+ specs)
header[1] = spacepad(local_patient_ident, 80)
# 80 ascii : local recording identification (mind item 4 of the additional EDF+ specs)
header[2] = spacepad(local_recording_ident, 80)
# 8 ascii : startdate of recording (dd.mm.yy) (mind item 2 of the additional EDF+ specs)
header[3] = recording_startdate_short
# 8 ascii : starttime of recording (hh.mm.ss)
header[4] = recording_starttime
# 8 ascii : number of bytes in header record
header[5] = spacepad(str(256 * (n_signals + 1 + events_present)), 8)
# 44 ascii : reserved
header[6] = spacepad("EDF+C", 44)
# 8 ascii : number of data records (-1 if unknown, obey item 10 of the additional EDF+ specs)
header[7] = spacepad("1", 8)
# 8 ascii : duration of a data record, in seconds
header[8] = spacepad(str(int(data_time_duration)), 8)
# 4 ascii : number of signals (ns) in data record
header[9] = spacepad(str(n_signals + events_present), 4)
# ns * 16 ascii : ns * label (e.g. EEG Fpz-Cz or Body temp) (mind item 9 of the additional EDF+ specs)
header[10] = ""
for field in selected_fields:
header[10] += spacepad(field, 16)
if events_present:
header[10] += spacepad(EEG_EVENT_CHANNEL, 16)
# ns * 80 ascii : ns * transducer type (e.g. AgAgCl electrode)
header[11] = ""
for field in selected_fields:
header[11] += spacepad(transducer_type, 80)
if events_present:
header[11] += spacepad(EEG_EVENT_CHANNEL, 80)
# ns * 8 ascii : ns * physical dimension (e.g. uV or degreeC)
header[12] = ""
for field in selected_fields:
header[12] += spacepad(physical_dimension, 8)
if events_present:
header[12] += spacepad("", 8)
# ns * 8 ascii : ns * physical minimum (e.g. -500 or 34)
header[13] = ""
for field in selected_fields:
header[13] += spacepad(physical_minimum[field], 8)
if events_present:
header[13] += spacepad("-1", 8)
# ns * 8 ascii : ns * physical maximum (e.g. 500 or 40)
header[14] = ""
for field in selected_fields:
header[14] += spacepad(physical_maximum[field], 8)
if events_present:
header[14] += spacepad("1", 8)
# ns * 8 ascii : ns * digital minimum (e.g. -2048)
header[15] = ""
for field in selected_fields:
header[15] += spacepad(min(selected_signals[field]), 8)
if events_present:
header[15] += spacepad("-32768", 8)
# ns * 8 ascii : ns * digital maximum (e.g. 2047)
header[16] = ""
for field in selected_fields:
header[16] += spacepad(max(selected_signals[field]), 8)
if events_present:
header[16] += spacepad("32767", 8)
# ns * 80 ascii : ns * prefiltering (e.g. HP:0.1Hz LP:75Hz)
header[17] = ""
for field in selected_fields:
header[17] += spacepad(prefiltering_text, 80)
if events_present:
header[17] += spacepad("", 80)
# ns * 8 ascii : ns * nr of samples in each data record
header[18] = ""
for field in selected_fields:
header[18] += spacepad(str(len(selected_signals[field])), 8)
if events_present:
header[18] += spacepad(len(events) * eeg_event_length, 8)
# ns * 32 ascii : ns * reserved
header[19] = ""
for field in selected_fields:
header[19] += spacepad("Reserved for " + field + " signal", 32)
if events_present:
header[19] += spacepad("", 32)
header_string = ""
for i in header.keys():
header_string += header[i]
logging.debug(header_string)
logging.info("Header length = %d" % len(header_string))
print "Writing in the file", edf_file
# write the header string
ef.write(header_string)
for s in selected_fields:
# for each signal
datastring = ''
for v in selected_signals[s]:
try:
datastring += struct.pack("<h", v)
except struct.error:
logging.warning("Ooops tried to pack a number that was too big: %f" % v)
ef.write(datastring)
logging.info("Wrote signal %s" % s)
if events_present:
# write the event channel
logging.info("Writing the event channel...")
eventstring = ''
eventstring += spacepad('+0\x14\x14Recording starts\x14\x00', 2 * eeg_event_length, '\x00')
for (t, x) in events:
time = round(t / b_update_rate, 3) # ?????
event = spacepad("+"+str(time)+"\x14"+str(x)+"\x14\x00", 2 * eeg_event_length, '\x00')
eventstring += event
ef.write(eventstring)
bf.close()
ef.close()
logging.info("Done writing the file %s. Success." % edf_file)
|
import copy
import json
import logging
import math
import pickle
import random
from collections import Counter
import numpy as np
from srdatasets.datasets import __datasets__
from srdatasets.utils import (__warehouse__, get_datasetname,
get_processed_datasets)
logger = logging.getLogger(__name__)
class DataLoader:
def __init__(
self,
dataset_name: str,
config_id: str,
batch_size: int = 1,
train: bool = True,
development: bool = False,
negatives_per_target: int = 0,
include_timestamp: bool = False,
drop_last: bool = False,
):
"""Loader of sequential recommendation datasets
Args:
dataset_name (str): dataset name.
config_id (str): dataset config id
batch_size (int): batch_size
train (bool, optional): load training data
development (bool, optional): use the dataset for hyperparameter tuning
negatives_per_target (int, optional): number of negative samples per target
include_timestamp (bool, optional): add timestamps to batch data
drop_last (bool, optional): drop last incomplete batch
Note: training data is shuffled automatically.
"""
dataset_name = get_datasetname(dataset_name)
if dataset_name not in __datasets__:
raise ValueError(
"Unrecognized dataset, currently supported datasets: {}".format(
", ".join(__datasets__)
)
)
_processed_datasets = get_processed_datasets()
if dataset_name not in _processed_datasets:
raise ValueError(
"{} has not been processed, currently processed datasets: {}".format(
dataset_name,
", ".join(_processed_datasets) if _processed_datasets else "none",
)
)
if config_id not in _processed_datasets[dataset_name]:
raise ValueError(
"Unrecognized config id, existing config ids: {}".format(
", ".join(_processed_datasets[dataset_name])
)
)
if negatives_per_target < 0:
negatives_per_target = 0
logger.warning(
"Number of negative samples per target should >= 0, reset to 0"
)
if not train and negatives_per_target > 0:
logger.warning(
"Negative samples are used for training, set negatives_per_target has no effect when testing"
)
dataset_dir = __warehouse__.joinpath(
dataset_name, "processed", config_id, "dev" if development else "test"
)
with open(dataset_dir.joinpath("stats.json"), "r") as f:
self.stats = json.load(f)
dataset_path = dataset_dir.joinpath("train.pkl" if train else "test.pkl")
with open(dataset_path, "rb") as f:
self.dataset = pickle.load(f) # list
if train:
counter = Counter()
for data in self.dataset:
if len(data) > 5:
counter.update(data[1] + data[2] + data[3])
else:
counter.update(data[1] + data[2])
self.item_counts = np.array(
[counter[i] for i in range(max(counter.keys()) + 1)]
)
if batch_size <= 0:
raise ValueError("batch_size should >= 1")
if batch_size > len(self.dataset):
raise ValueError("batch_size exceeds the dataset size")
self.batch_size = batch_size
self.train = train
self.include_timestamp = include_timestamp
self.negatives_per_target = negatives_per_target
self.drop_last = drop_last
self._batch_idx = 0
@property
def num_users(self):
return self.stats["users"]
@property
def num_items(self):
return self.stats["items"]
def __iter__(self):
return self
def __len__(self):
"""Number of batches
"""
if self.drop_last:
return math.floor(len(self.dataset) / self.batch_size)
else:
return math.ceil(len(self.dataset) / self.batch_size)
def sample_negatives(self, batch_items_list):
negatives = []
for b in np.concatenate(batch_items_list, 1):
item_counts = copy.deepcopy(self.item_counts)
item_counts[b] = 0
item_counts[0] = 0
probs = item_counts / item_counts.sum()
_negatives = np.random.choice(
len(item_counts),
size=self.negatives_per_target * batch_items_list[-1].shape[1],
replace=False,
p=probs,
)
_negatives = _negatives.reshape((-1, self.negatives_per_target))
negatives.append(_negatives)
return np.stack(negatives)
def __next__(self):
"""
Returns:
user_ids: (batch_size,)
input sequences: (batch_size, input_len)
target sequences: (batch_size, target_len)
"""
if self._batch_idx == len(self):
self._batch_idx = 0
raise StopIteration
else:
if self._batch_idx == 0 and self.train:
random.shuffle(self.dataset)
batch = self.dataset[
self._batch_idx
* self.batch_size : (self._batch_idx + 1)
* self.batch_size
]
self._batch_idx += 1
batch_data = [np.array(b) for b in zip(*batch)]
# Diff task
target_idx = 3 if len(batch_data) > 5 else 2
if not self.include_timestamp:
batch_data = batch_data[: target_idx + 1]
# Sampling negatives
if self.train and self.negatives_per_target > 0:
negatives = self.sample_negatives(batch_data[1 : target_idx + 1])
batch_data.append(negatives)
return batch_data
|
'''
Restart CrySPY
'''
import os
from .. import utility
from ..BO import bo_restart
from ..EA import ea_append
from ..gen_struc.random.random_generation import Rnd_struc_gen
from ..IO import io_stat, pkl_data
from ..IO import read_input as rin
from ..LAQA import laqa_restart
from ..RS import rs_restart
def restart():
print('\n\n')
print(utility.get_date())
print(utility.get_version())
print('Restart cryspy.py\n\n')
# ---------- read stat
stat = io_stat.stat_read()
# ---------- read input and check the change
rin.readin()
rin.diffinstat(stat)
# ---------- load init_struc_data for appending structures
init_struc_data = pkl_data.load_init_struc()
# ---------- append structures
if len(init_struc_data) < rin.tot_struc:
prev_nstruc = len(init_struc_data)
init_struc_data = append_struc(init_struc_data)
# ------ RS
if rin.algo == 'RS':
rs_restart.restart(stat, prev_nstruc)
# ------ BO
if rin.algo == 'BO':
bo_restart.restart(init_struc_data, prev_nstruc)
# ------ LAQA
if rin.algo == 'LAQA':
laqa_restart.restart(stat, prev_nstruc)
os.remove('lock_cryspy')
raise SystemExit()
elif rin.tot_struc < len(init_struc_data):
raise ValueError('tot_struc < len(init_struc_data)')
# ---------- append structures by EA (option)
if rin.append_struc_ea:
prev_nstruc = len(init_struc_data)
init_struc_data = ea_append.append_struc(stat, init_struc_data)
# ------ RS
if rin.algo == 'RS':
rs_restart.restart(stat, prev_nstruc)
# ------ BO
if rin.algo == 'BO':
bo_restart.restart(init_struc_data, prev_nstruc)
# ------ LAQA
if rin.algo == 'LAQA':
laqa_restart.restart(stat, prev_nstruc)
os.remove('lock_cryspy')
raise SystemExit()
# ---------- return
return stat, init_struc_data
def append_struc(init_struc_data):
# ---------- append initial structures
print('\n# ---------- Append structures')
with open('cryspy.out', 'a') as fout:
fout.write('\n# ---------- Append structures\n')
id_offset = len(init_struc_data)
nstruc = rin.tot_struc - id_offset
rsg = Rnd_struc_gen(rin.natot, rin.atype, rin.nat,
rin.minlen, rin.maxlen, rin.dangle,
rin.mindist, rin.maxcnt, rin.symprec)
if rin.spgnum == 0:
rsg.gen_wo_spg(nstruc, id_offset, init_pos_path='./data/init_POSCARS')
init_struc_data.update(rsg.init_struc_data)
else:
fwpath = utility.check_fwpath()
rsg.gen_with_spg(nstruc, rin.spgnum, id_offset,
init_pos_path='./data/init_POSCARS', fwpath=fwpath)
init_struc_data.update(rsg.init_struc_data)
print('') # for blank line
with open('cryspy.out', 'a') as fout:
fout.write('Generated structures up to ID {}\n\n'.format(
len(init_struc_data)-1))
# ---------- save
pkl_data.save_init_struc(init_struc_data)
return init_struc_data
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Class to generate summary from events received
"""
import json
from .logger import Logger
from .json_display import JsonDisplay
from collections import defaultdict
from collections import OrderedDict
import textwrap
class ViewEventsSummary(object):
"""Generate summary from events"""
def __init__(self, json_data):
if json_data is None or len(json_data) == 0:
raise RuntimeError("No data returned try with different filters.")
self._json_data = json_data
self.logger = Logger()
self.result = ''
def generate_summary(self):
raw_json = JsonDisplay(self._json_data).display_raw_json()
self.result = "\nRAS EVENTS SUMMARY\n"
# Summary Based on Severity
self.result += "\n" + self._generate_event_based_summary(raw_json)
# Summary Based on Lctn
self.result += "\n" + self._generate_location_based_summary(raw_json)
# Summary Based on EventType
self.result += "\n" + self._generate_eventtype_based_summary(raw_json)
# Event & Location Based Summary
self.result += "\n" + self._generate_event_location_based_summary(raw_json)
return self.result
def _generate_event_based_summary(self, raw_json):
severity_summary = defaultdict(int)
severity_dict = json.loads(raw_json)
for severity in severity_dict:
severity_summary[severity['severity']] += 1
result = "EVENTS SUMMARY BASED ON SEVERITY\n"
result += "{:<10} {:<15}\n".format('SEVERITY', 'COUNT')
for severity, count in severity_summary.items():
result += "{:<10} {:<15}\n".format(str(severity), count)
return result
def _generate_location_based_summary(self, raw_json):
location_summary = defaultdict(int)
location_dict = json.loads(raw_json)
for location in location_dict:
location_summary[location['lctn']] += 1
result = "EVENTS SUMMARY BASED ON LOCATION\n"
result += "{:<14} {:<15}\n".format('LOCATION', 'COUNT')
for location, count in location_summary.items():
result += "{:<14} {:<15}\n".format(str(location), count)
return result
def _generate_eventtype_based_summary(self, raw_json):
event_summary = dict()
event_dict = json.loads(raw_json)
wrapper = textwrap.TextWrapper(width = 100)
for event in event_dict:
if event['type'] in event_summary:
event_summary[event['type']][0] = event_summary[event['type']][0] + 1
else:
event_summary[event['type']] = list()
event_summary[event['type']] = [1, event['detail'], event['severity']]
result = "EVENTS SUMMARY BASED ON EVENT TYPE\n"
result += "{:<5} {:<70} {:<10} {:<60}\n".format('COUNT', 'EVENT TYPE', 'SEVERITY', 'DETAILS')
for type, v in event_summary.items():
count, message, severity = v
short_message = wrapper.fill(text=(textwrap.shorten(text=message, width=100)))
result += "{:<5} {:<70} {:<10} {:<60}\n".format(count, str(type),
str(severity),
str(short_message))
return result
def _generate_event_location_based_summary(self, raw_json):
location_summary = OrderedDict()
event_location_summ = json.loads(raw_json)
for location in event_location_summ:
if (location['lctn'], location['type']) in location_summary:
location_summary[(location['lctn'], location['type'])][0] += 1
else:
location_summary[(location['lctn'], location['type'])] = list()
location_summary[(location['lctn'], location['type'])] = [1, location['type'], location['severity'],
location['controloperation'], location['time']]
result = "EVENTS SUMMARY BASED ON THE COMBINATION OF LOCATION & EVENTS\n"
result += "{:<14} {:<10} {:<70} {:<15} {:<20}\n".format('LOCATION', 'COUNT', 'TYPE', 'SEVERITY',
'CONTROL OPERATION', 'LATEST EVENT TIME')
sort_by_location = lambda lctn, ras: (lctn is None, lctn or '', ras)
location_summary = OrderedDict(sorted(location_summary.items(), key=lambda t: sort_by_location(*t[0])))
for key, values in location_summary.items():
count, type, severity, control_operation, timestamp = values
result += "{:<14} {:<10} {:<70} {:<15} {:<20}\n".format(str(key[0]),
count,
str(type),
str(severity),
str(control_operation),
str(timestamp))
return result
|
"""An Agent that mimics the most recent possible action done by a player."""
import sys, random
if "." not in __name__:
from utils.game import *
from utils.network import *
from Agent import Agent
from HonestAgent import HonestAgent
from RandomAgent import RandomAgent
else:
from .utils.game import *
from .utils.network import *
from .Agent import Agent
from .HonestAgent import HonestAgent
from .RandomAgent import RandomAgent
class TrickyAgent(Agent):
def __init__(self, honesty=0.5, **kwargs):
super().__init__(**kwargs)
self.honest_agent = HonestAgent()
self.random_agent = RandomAgent()
self.honesty = honesty
self.tag = str(round(100*honesty))
def update(self, event):
self.honest_agent.update(event)
def decide_action(self, options):
if random.random() < self.honesty:
return self.honest_agent.decide_action(options)
else:
return self.random_agent.decide_action(options)
def decide_reaction(self, options):
if random.random() < self.honesty:
return self.honest_agent.decide_reaction(options)
else:
return self.random_agent.decide_reaction(options)
def decide_card(self, options):
return random.choice(list(options.keys()))
def decide_exchange(self, options):
return choose_exchange_cards(random.sample(options["cards"].keys(), options["n"]))
if __name__ == "__main__":
start(TrickyAgent(), sys.argv[1])
|
from django.conf.urls import patterns, url
from polls import views
urlpatterns = patterns('',
#e.g., /polls/
url(r'^$', views.index, name='index'),
#e.g., /polls/1/
url(r'^(?P<poll_id>\d+)/$', views.detail, name='detail'),
#e.g., /polls/1/results
url(r'^(?P<poll_id>\d+)/results/$', views.results, name='results'),
#e.g., /polls/1/vote
url(r'^(?P<poll_id>\d+)/vote/$', views.vote, name='vote'),
) |
from pdb import set_trace as T
import numpy as np
from signal import signal, SIGINT
import sys, os, json, pickle, time
import ray
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
def sign(x):
return int(np.sign(x))
def move(orig, targ):
ro, co = orig
rt, ct = targ
dr = rt - ro
dc = ct - co
if abs(dr) > abs(dc):
return ro + sign(dr), co
elif abs(dc) > abs(dr):
return ro, co + sign(dc)
else:
return ro + sign(dr), co + sign(dc)
class GodswordServerProtocol(WebSocketServerProtocol):
def __init__(self):
super().__init__()
print("Created a server")
self.frame = 0
self.packet = {}
def onOpen(self):
print("Opened connection to server")
def onClose(self, wasClean, code=None, reason=None):
print('Connection closed')
def connectionMade(self):
super().connectionMade()
self.factory.clientConnectionMade(self)
def connectionLost(self, reason):
super().connectionLost(reason)
self.factory.clientConnectionLost(self)
#Not used without player interaction
def onMessage(self, packet, isBinary):
print("Message", packet)
def onConnect(self, request):
print("WebSocket connection request: {}".format(request))
realm = self.factory.realm
self.realm = realm
self.frame += 1
data = self.serverPacket()
sz = data['environment'].shape[0]
self.vals = None
if data['values'] is not None:
self.vals = self.visVals(data['values'], sz)
self.sendUpdate()
def serverPacket(self):
data = self.realm.clientData()
return data
def sendUpdate(self):
ent = {}
data = self.serverPacket()
entities = data['entities']
environment = data['environment']
self.packet['ent'] = entities
gameMap = environment.np().tolist()
self.packet['map'] = gameMap
tiles = []
for tileList in environment.tiles:
tl = []
for tile in tileList:
tl.append(tile.counts.tolist())
tiles.append(tl)
self.packet['counts'] = tiles
self.packet['values'] = self.vals
if self.vals is not None:
self.packet['values'] = self.vals.tolist()
packet = json.dumps(self.packet).encode('utf8')
self.sendMessage(packet, False)
#Todo: would be nicer to move this into the javascript,
#But it would possibly have to go straight into the shader
def visVals(self, vals, sz):
ary = np.zeros((sz, sz, 3))
vMean = np.mean([e[1] for e in vals])
vStd = np.std([e[1] for e in vals])
nStd, nTol = 4.0, 0.5
grayVal = int(255 / nStd * nTol)
for v in vals:
pos, mat = v
r, c = pos
mat = (mat - vMean) / vStd
color = np.clip(mat, -nStd, nStd)
color = int(color * 255.0 / nStd)
if color > 0:
color = (0, color, 128)
else:
color = (-color, 0, 128)
ary[r, c] = color
return ary.astype(np.uint8)
class WSServerFactory(WebSocketServerFactory):
def __init__(self, ip, realm, step):
super().__init__(ip)
self.realm, self.step = realm, step
self.clients = []
self.tickRate = 0.6
self.tick = 0
lc = LoopingCall(self.announce)
lc.start(self.tickRate)
def announce(self):
self.tick += 1
uptime = np.round(self.tickRate*self.tick, 1)
print('Uptime: ', uptime, ', Tick: ', self.tick)
if self.tick == 5:
pass
#time.sleep(20)
self.step()
for client in self.clients:
client.sendUpdate()
def clientConnectionMade(self, client):
self.clients.append(client)
def clientConnectionLost(self, client):
self.clients.remove(client)
class Application:
def __init__(self, realm, step):
signal(SIGINT, self.kill)
self.realm = realm
log.startLogging(sys.stdout)
port = 8080
factory = WSServerFactory(u'ws://localhost:' + str(port), realm, step)
factory.protocol = GodswordServerProtocol
resource = WebSocketResource(factory)
# We server static files under "/" and our WebSocket
# server under "/ws" (note that Twisted uses bytes
# for URIs) under one Twisted Web Site
root = File(".")
root.putChild(b"ws", resource)
site = Site(root)
reactor.listenTCP(port, site)
reactor.run()
def kill(*args):
print("Killed by user")
reactor.stop()
os._exit(0)
|
#author: Bryan Bishop <[email protected]>
#date: 2012-01-02
#url: http://hax.iimarck.us/files/rbheaders.txt
import json
import os
from __future__ import print_function
#parse hex values as base 16 (see calculate_pointer)
base = 16
# TODO: load the rom based on config.rom_path
rom_filename = os.path.join(os.getcwd(), "baserom.gbc")
rom = None #load the rom later
#map header pointers start at 0x1AE
start_map_header_pointers = 0x1AE
#bank bytes for each map header start at 0xC23D
start_map_header_pointer_banks = 0xC23D
#number of maps in this list
map_count = 0xF8 #including the 0th the total is is 248 or 0xF8
bad_maps = [0x0b, 0x45, 0x4b, 0x4e, 0x69, 0x6a, 0x6b, 0x6d, 0x6e, 0x6f, 0x70, 0x72, 0x73, 0x74, 0x75, 0xad, 0xcc, 0xcd, 0xce, 0xe7, 0xed, 0xee, 0xf1, 0xf2, 0xf3, 0xf4]
maps = {
0x00: "Pallet Town",
0x01: "Viridian City",
0x02: "Pewter City",
0x03: "Cerulean City",
0x04: "Lavender Town", #??
0x05: "Vermilion City", #correct
0x06: "Celadon City",
0x07: "Fuchsia City",
0x08: "Cinnabar Island",
0x09: "Indigo Plateau",
0x0A: "Saffron City",
0x0B: "FREEZE",
0x0C: "Route 1",
0x0D: "Route 2",
0x0E: "Route 3",
0x0F: "Route 4",
0x10: "Route 5",
0x11: "Route 6",
0x12: "Route 7",
0x13: "Route 8",
0x14: "Route 9",
0x15: "Route 10",
0x16: "Route 11",
0x17: "Route 12",
0x18: "Route 13",
0x19: "Route 14",
0x1A: "Route 15",
0x1B: "Route 16",
0x1C: "Route 17",
0x1D: "Route 18",
0x1E: "Route 19",
0x1F: "Route 20",
0x20: "Route 21",
0x21: "Route 22",
0x22: "Route 23",
0x23: "Route 24",
0x24: "Route 25",
0x25: "Red's House 1F",
0x26: "Red's House 2F",
0x27: "Blue's House",
0x28: "Oak's Lab",
0x29: "Viridian Poke Center",
0x2A: "Viridian Mart",
0x2B: "School",
0x2C: "Viridian House",
0x2D: "Viridian Gym",
0x2E: "Digletts Cave (Route 2)",
0x2F: "Viridian Forest (exit)",
0x30: "Route 2 House",
0x31: "Route 2 Gate",
0x32: "Viridian Forest (Entrance)",
0x33: "Viridian Forest",
0x34: "Museum F1",
0x35: "Museum F2",
0x36: "Pewter Gym",
0x37: "Pewter House (1)",
0x38: "Pewter Mart",
0x39: "Pewter House (2)",
0x3A: "Pewter Pokecenter",
0x3B: "Mt. Moon (1)",
0x3C: "Mt. Moon (2)",
0x3D: "Mt. Moon (3)",
0x3E: "Cerulean House (Trashed)",
0x3F: "Cerulean House (2)",
0x40: "Cerulean Pokecenter",
0x41: "Cerulean Gym",
0x42: "Bike Shop",
0x43: "Cerulean Mart",
0x44: "Mt. Moon Pokecenter",
0x45: "COPY OF: Trashed House",
0x46: "Route 5 Gate",
0x47: "Underground Tunnel Entrance (Route 5)",
0x48: "Day Care M",
0x49: "Route 6 Gate",
0x4A: "Underground Tunnel Entrance (Route 6)",
0x4B: "COPY OF: Underground Tunnel Entrance (Route 6)",
0x4C: "Route 7 Gate",
0x4D: "Underground Path Entrance (Route 7)",
0x4E: "COPY OF: Underground Path Entrance (Route 7)",
0x4F: "Route 8 Gate",
0x50: "Underground Path Entrance (Route 8)",
0x51: "Rock Tunnel Pokecenter",
0x52: "Rock Tunnel (1)",
0x53: "Power Plant",
0x54: "Route 11 Gate",
0x55: "Digletts Cave Entrance (Route 11)",
0x56: "Route 11 Gate (Upstairs)",
0x57: "Route 12 Gate",
0x58: "Bill's House",
0x59: "Vermilion Pokecenter",
0x5A: "Fan Club",
0x5B: "Vermilion Mart",
0x5C: "Vermilion Gym",
0x5D: "Vermilion House (1)",
0x5E: "Vermilion Dock",
0x5F: "S.S. Anne (1)",
0x60: "S.S. Anne (2)",
0x61: "S.S. Anne (3)",
0x62: "S.S. Anne (4)",
0x63: "S.S. Anne (5)",
0x64: "S.S. Anne (6)",
0x65: "S.S. Anne (7)",
0x66: "S.S. Anne (8)",
0x67: "S.S. Anne (9)",
0x68: "S.S. Anne (10)",
0x69: "FREEZE",
0x6A: "FREEZE",
0x6B: "FREEZE",
0x6C: "Victory Road (1)",
0x6D: "FREEZE",
0x6E: "FREEZE",
0x6F: "FREEZE",
0x70: "FREEZE",
0x71: "Lance",
0x72: "FREEZE",
0x73: "FREEZE",
0x74: "FREEZE",
0x75: "FREEZE",
0x76: "Hall of Fame Room",
0x77: "Underground Path (N/S)",
0x78: "Gary",
0x79: "Underground Path (W/E)",
0x7A: "Celadon Mart (1)",
0x7B: "Celadon Mart (2)",
0x7C: "Celadon Mart (3)",
0x7D: "Celadon Mart (4)",
0x7E: "Celadon Mart Roof",
0x7F: "Celadon Mart Elevator",
0x80: "Celadon Mansion (1)",
0x81: "Celadon Mansion (2)",
0x82: "Celadon Mansion (3)",
0x83: "Celadon Mansion (4)",
0x84: "Celadon Mansion (5)",
0x85: "Celadon Pokecenter",
0x86: "Celadon Gym",
0x87: "Celadon Game Corner",
0x88: "Celadon Mart 5",
0x89: "Celadon Prize Room",
0x8A: "Celadon Diner",
0x8B: "Celadon House",
0x8C: "Celadon Hotel",
0x8D: "Lavender Pokecenter",
0x8E: "Pokemon Tower (1)",
0x8F: "Pokemon Tower (2)",
0x90: "Pokemon Tower (3)",
0x91: "Pokemon Tower (4)",
0x92: "Pokemon Tower (5)",
0x93: "Pokemon Tower (6) ",
0x94: "Pokemon Tower (7)",
0x95: "Lavender House (1)",
0x96: "Lavender Mart",
0x97: "Lavender House (2)",
0x98: "Fuchsia Mart",
0x99: "Fuchsia House (1)",
0x9A: "Fuchsia Pokecenter",
0x9B: "Fuchsia House (2)",
0x9C: "Safari Zone Entrance",
0x9D: "Fuchsia Gym",
0x9E: "Fuchsia Meeting Room",
0x9F: "Seafoam Islands (2)",
0xA0: "Seafoam Islands (3)",
0xA1: "Seafoam Islands (4)",
0xA2: "Seafoam Islands (5)",
0xA3: "Vermilion House (2)",
0xA4: "Fuchsia House (3)",
0xA5: "Mansion (1)",
0xA6: "Cinnabar Gym",
0xA7: "Lab (1)",
0xA8: "Lab (2)",
0xA9: "Lab (3)",
0xAA: "Lab (4)",
0xAB: "Cinnabar Pokecenter",
0xAC: "Cinnabar Mart",
0xAD: "COPY: Cinnabar Mart",
0xAE: "Indigo Plateau Lobby",
0xAF: "Copycat's House F1",
0xB0: "Copycat's House F2",
0xB1: "Fighting Dojo",
0xB2: "Saffron Gym",
0xB3: "Saffron House (1)",
0xB4: "Saffron Mart",
0xB5: "Silph Co (1)",
0xB6: "Saffron Pokecenter",
0xB7: "Saffron House (2)",
0xB8: "Route 15 Gate",
0xBA: "Route 16 Gate Map",
0xBB: "Route 16 Gate Upstairs",
0xBC: "Route 16 House",
0xBD: "Route 12 House",
0xBE: "Route 18 Gate",
0xBF: "Route 18 Gate Header",
0xC0: "Seafoam Islands (1)",
0xC1: "Route 22 Gate",
0xC2: "Victory Road (2)",
0xC3: "Route 12 Gate Upstairs",
0xC4: "Vermilion House (3)",
0xC5: "Diglett's Cave",
0xC6: "Victory Road (3)",
0xC7: "Rocket Hideout (1)",
0xC8: "Rocket Hideout (2)",
0xC9: "Rocket Hideout (3)",
0xCA: "Rocket Hideout (4) ",
0xCB: "Rocket Hideout (Elevator)",
0xCC: "FREEZE",
0xCD: "FREEZE",
0xCE: "FREEZE",
0xCF: "Silph Co (2)",
0xD0: "Silph Co (3)",
0xD1: "Silph Co (4)",
0xD2: "Silph Co (5)",
0xD3: "Silph Co (6)",
0xD4: "Silph Co (7)",
0xD5: "Silph Co (8)",
0xD6: "Mansion (2)",
0xD7: "Mansion (3)",
0xD8: "Mansion (4)",
0xD9: "Safari Zone East",
0xDA: "Safari Zone North",
0xDB: "Safari Zone West",
0xDC: "Safari Zone Center",
0xDD: "Safari Zone Rest House (1)",
0xDE: "Safari Zone Secret House",
0xDF: "Safari Zone Rest House (2)",
0xE0: "Safari Zone Rest House (3)",
0xE1: "Safari Zone Rest House (4)",
0xE2: "Unknown Dungeon (2)",
0xE3: "Unknown Dungeon (3)",
0xE4: "Unknown Dungeon (1)",
0xE5: "Name Rater",
0xE6: "Cerulean House (3)",
0xE7: "FREEZE",
0xE8: "Rock Tunnel (2)",
0xE9: "Silph Co (9)",
0xEA: "Silph Co (10)",
0xEB: "Silph Co (11)",
0xEC: "Silph Co (Elevator)",
0xED: "FREEZE",
0xEE: "FREEZE",
0xEF: "Battle Center M",
0xF0: "Trade Center M",
0xF1: "FREEZE",
0xF2: "FREEZE",
0xF3: "FREEZE",
0xF4: "FREEZE",
0xF5: "Lorelei",
0xF6: "Bruno",
0xF7: "Agatha"
}
map_pointers = {
#0x00: {
# "name": "Pallet Town",
# "address": 0x182a1
# },
}
map_headers = {
#0x00: {
# "name": "Pallet Town",
# "address": 0x182a1,
# "tileset"
# "y"
# "x"
# "map_pointer"
# "texts_pointer"
# "script_pointer"
# "connection_byte"
# "num_connections"
# "connections":
# { "0":
# { map_id, connected_map_tile_pointer, current_map_tile_pointer, bigness, width, y, x, window_pointer }
# },
# "object_data_pointer"
# },
}
#haters gonna hate
def load_rom(filename=None):
"load the rom into a global (returns True/False)"
global rom
if not filename:
filename = rom_filename
try:
rom = open(filename, "rb").read()
return True
except Exception as exception:
print("error loading rom")
return False
def assert_rom():
global rom
assert rom, "rom must be loaded, see load_rom()"
def calculate_pointer(short_pointer, bank):
short_pointer = int(short_pointer)
bank = int(bank)
pointer = short_pointer - 0x4000 + (bank * 0x4000)
#result will be an integer
return pointer
def get_nth_map_header_pointer_bank_byte_address(map_id):
"returns the address to the bank byte associated with this map pointer"
address = start_map_header_pointer_banks + map_id
return address
def get_nth_map_header_pointer_bank_byte(map_id):
"returns the bank number for this map header"
assert_rom()
address = get_nth_map_header_pointer_bank_byte_address(map_id)
bank_byte = ord(rom[address])
return bank_byte
def get_nth_map_header_pointer(map_id):
"returns the full pointer to the map header struct for this map"
assert_rom()
#figure out where the bytes for this pointer are located
byte1_address = start_map_header_pointers + (map_id * 2)
byte2_address = start_map_header_pointers + (map_id * 2) + 1
#grab the two bytes making up the partial pointer
byte1 = ord(rom[byte1_address])
byte2 = ord(rom[byte2_address])
#swap the bytes (16-bit pointers for z80 are little endian)
temp = byte1
byte1 = byte2
byte2 = temp
del temp
#combine these into a single pointer (0x byte1 byte2)
partial_pointer = (byte2 + (byte1 << 8))
#print hex(partial_pointer)
#get the bank id
bank = get_nth_map_header_pointer_bank_byte(map_id)
#calculate the full pointer
pointer = calculate_pointer(partial_pointer, bank)
#return it as an integer
return pointer
def load_map_pointers():
global maps
global map_pointers
for map in maps.keys():
pointer = get_nth_map_header_pointer(map)
#print maps[map] + "\t\t\t" + hex(pointer)
entry = {
"name": maps[map],
"address": hex(pointer),
"bank": hex(get_nth_map_header_pointer_bank_byte(map))
}
map_pointers[map] = entry
#print json.dumps(map_pointers)
def read_connection_bytes(connection_bytes, bank):
map_id = ord(connection_bytes[0])
#connection strip
connected_map_tile_pointer_byte1 = ord(connection_bytes[1])
connected_map_tile_pointer_byte2 = ord(connection_bytes[2])
connected_map_tile_pointer = (connected_map_tile_pointer_byte1 + (connected_map_tile_pointer_byte2 << 8))
#connection strip
current_map_tile_pointer_byte1 = ord(connection_bytes[3])
current_map_tile_pointer_byte2 = ord(connection_bytes[4])
current_map_tile_pointer = (current_map_tile_pointer_byte1 + (current_map_tile_pointer_byte2 << 8))
bigness_byte = ord(connection_bytes[5])
width_byte = ord(connection_bytes[6])
y = ord(connection_bytes[7])
x = ord(connection_bytes[8])
#window
window_pointer_byte1 = ord(connection_bytes[9])
window_pointer_byte2 = ord(connection_bytes[10])
window_pointer = (window_pointer_byte1 + (window_pointer_byte2 << 8))
connection_data = {
"map_id": map_id,
"connected_map_tile_pointer": hex(connected_map_tile_pointer),
"current_map_tile_pointer": hex(current_map_tile_pointer),
"bigness": hex(bigness_byte),
"width": hex(width_byte),
"y": y,
"x": x,
"window_pointer": hex(window_pointer),
}
return connection_data
def read_warp_data(address, warp_count):
warps = {}
for warp_id in range(0, warp_count):
offset = address + (warp_id*4) #4 bytes per warp
warp = {}
warp["y"] = ord(rom[offset])
warp["x"] = ord(rom[offset+1])
warp["warp_to_point"] = ord(rom[offset+2])
warp["warp_to_map_id"] = ord(rom[offset+3])
warps[warp_id] = warp
return warps
def read_sign_data(address, sign_count):
signs = {}
for sign_id in range(0, sign_count):
offset = address + (sign_id * 3)
sign = {}
sign["y"] = ord(rom[offset])
sign["x"] = ord(rom[offset+1])
sign["text_id"] = ord(rom[offset+2])
signs[sign_id] = sign
return signs
def read_warp_tos(address, warp_count):
warp_tos = {}
for warp_to_id in range(0, warp_count):
offset = address + (warp_to_id * 4)
warp_to = {}
warp_to["event_displacement"] = [ord(rom[offset]),ord(rom[offset+1])]
warp_to["y"] = ord(rom[offset+2])
warp_to["x"] = ord(rom[offset+3])
warp_tos[warp_to_id] = warp_to
return warp_tos
def get_object_data(address):
if type(address) == str: address = int(address, base)
output = {}
maps_border_tile = ord(rom[address])
number_of_warps = ord(rom[address+1])
if number_of_warps == 0: warps = {}
else:
warps = read_warp_data(address+2, number_of_warps)
offset = number_of_warps * 4
address = address + 2 + offset
number_of_signs = ord(rom[address])
if number_of_signs == 0: signs = {}
else:
signs = read_sign_data(address+1, number_of_signs)
offset = number_of_signs * 3
address = address + 1 + offset
number_of_things = ord(rom[address])
address = address + 1
things = {}
for thing_id in range(0, number_of_things):
thing = {}
picture_number = ord(rom[address])
y = ord(rom[address+1])
x = ord(rom[address+2])
movement1 = ord(rom[address+3])
movement2 = ord(rom[address+4])
text_string_number = ord(rom[address+5])
address += 5 + 1
if text_string_number & (1 << 6) != 0: #trainer
thing["type"] = "trainer"
thing["trainer_type"] = ord(rom[address])
thing["pokemon_set"] = ord(rom[address+1])
address += 2
elif text_string_number & (1 << 7) != 0: #item
thing["type"] = "item"
thing["item_number"] = ord(rom[address])
address += 1
else: #normal person
thing["type"] = "person"
thing["picture_number"] = picture_number
thing["y"] = y
thing["x"] = x
thing["movement1"] = movement1
thing["movement2"] = movement2
thing["original_text_string_number"] = text_string_number
thing["text_string_number"] = text_string_number & 0xF
things[thing_id] = thing
warp_tos = read_warp_tos(address, number_of_warps)
output["maps_border_tile"] = maps_border_tile
output["number_of_warps"] = number_of_warps
output["warps"] = warps
output["number_of_signs"] = number_of_signs
output["signs"] = signs
output["number_of_things"] = number_of_things
output["things"] = things
output["warp_tos"] = warp_tos
return output
def compute_object_data_size(object):
size = 4
size += 6 * (int(object["number_of_things"]))
trainer_count = 0
item_count = 0
for thing in object["things"]:
thing = object["things"][thing]
if thing["type"] == "trainer": trainer_count += 1
elif thing["type"] == "item": item_count += 1
size += 2 * trainer_count
size += item_count
size += 8 * object["number_of_warps"]
size += 3 * object["number_of_signs"]
return size
def get_direction(connection_byte, connection_id):
"""given a connection byte and a connection id, which direction is this connection?
the 0th connection of $5 is SOUTH and the 1st connection is EAST"""
connection_options = [0b1000, 0b0100, 0b0010, 0b0001]
results = ["NORTH", "SOUTH", "WEST", "EAST"]
for option in connection_options:
if (option & connection_byte) == 0:
results[connection_options.index(option)] = ""
#prune results
while "" in results:
results.remove("")
return results[connection_id]
def read_map_header(address, bank):
address = int(address, base)
bank = int(bank, base)
tileset = ord(rom[address])
y = ord(rom[address+1])
x = ord(rom[address+2])
map_pointer_byte1 = ord(rom[address+3])
map_pointer_byte2 = ord(rom[address+4])
partial_map_pointer = (map_pointer_byte1 + (map_pointer_byte2 << 8))
map_pointer = calculate_pointer(partial_map_pointer, bank)
texts_pointer_byte1 = ord(rom[address+5])
texts_pointer_byte2 = ord(rom[address+6])
partial_texts_pointer = (texts_pointer_byte1 + (texts_pointer_byte2 << 8))
texts_pointer = calculate_pointer(partial_texts_pointer, bank)
script_pointer_byte1 = ord(rom[address+7])
script_pointer_byte2 = ord(rom[address+8])
partial_script_pointer = (script_pointer_byte1 + ( script_pointer_byte2 << 8))
script_pointer = calculate_pointer(partial_script_pointer, bank)
connection_byte = ord(rom[address+9]) #0xc = NORTH | SOUTH
# <&IIMarckus> the connection byte is a bitmask allowing 0-4 connections
# <&IIMarckus> each connection is 11 bytes
# <&IIMarckus> or'd
# <&IIMarckus> east = 1, west = 2, south = 4, north = 8
# <&IIMarckus> so a connection byte of 0xc means north/south
# <&IIMarckus> which means there are 22 more bytes, 11 for each connection
# < kanzure> 4 | 8 = c?
# <&IIMarckus> yes
# <&IIMarckus> easier to see if you convert to binary
# <&IIMarckus> 0100 | 1000 = 1100
num_connections = 0
connection_value = bin(connection_byte)[2:]
if connection_value[0] == "1": #NORTH
num_connections += 1
if len(connection_value) > 1 and connection_value[1] == "1": #SOUTH
num_connections += 1
if len(connection_value) > 2 and connection_value[2] == "1": #WEST
num_connections += 1
if len(connection_value) > 3 and connection_value[3] == "1": #EAST
num_connections += 1
#quick test for connection data
#connection0_stuff = rom[(address + 10):(address + 10 + 11)]
#print "Route: " + hex(ord(connection0_stuff[0]))
#setup
connections = {}
#go straight to object data if there are no connections
if num_connections > 0:
for connection in range(0, num_connections):
base_connection_address = address + 10 + (11 * connection)
connection_bytes = rom[base_connection_address : base_connection_address + 11]
connection_data = read_connection_bytes(connection_bytes, bank)
connection_data["direction"] = get_direction(connection_byte, connection)
connections[connection] = connection_data
#we might have to jump around a bit
offset = address + 10 + (11 * num_connections)
#object data
object_data_pointer_byte1 = ord(rom[offset])
object_data_pointer_byte2 = ord(rom[offset+1])
partial_object_data_pointer = (object_data_pointer_byte1 + (object_data_pointer_byte2 << 8))
object_data_pointer = calculate_pointer(partial_object_data_pointer, bank)
object_data = get_object_data(object_data_pointer)
texts = set()
for thing_id in object_data["things"].keys():
thing = object_data["things"][thing_id]
texts.add(thing["text_string_number"])
for sign_id in object_data["signs"].keys():
sign = object_data["signs"][sign_id]
texts.add(sign["text_id"])
texts = list(texts)
number_of_referenced_texts = len(texts)
map_header = {
"tileset": hex(tileset),
"y": hex(y),
"x": hex(x),
"map_pointer": hex(map_pointer),
"texts_pointer": hex(texts_pointer),
"number_of_referenced_texts": number_of_referenced_texts,
"referenced_texts": texts,
"script_pointer": hex(script_pointer),
"connection_byte": hex(connection_byte),
"num_connections": str(num_connections),
"connections": connections, #NORTH, SOUTH, WEST, EAST order matters
"object_data_pointer": hex(object_data_pointer),
"object_data": object_data,
}
return map_header
def read_all_map_headers():
if rom == None: load_rom()
assert_rom()
if len(map_pointers) == 0: load_map_pointers()
for map_id in map_pointers.keys():
if map_id in bad_maps: continue
map2 = map_pointers[map_id]
map_header = read_map_header(map2["address"], map2["bank"])
map_header["id"] = map_id
map_header["name"] = map2["name"]
map_header["address"] = map2["address"]
map_header["bank"] = map2["bank"]
map_headers[map_id] = map_header
return map_headers
if __name__ == "__main__":
#read binary data from file
load_rom()
#where are the map structs?
load_map_pointers()
#print json.dumps(map_pointers)
#experimental...
#print json.dumps(read_map_header(map_pointers[0]["address"], map_pointers[0]["bank"]))
read_all_map_headers()
#print json.dumps(map_headers)
#print map_headers[37]
for header in map_headers:
if header in bad_maps: continue
print("map " + str(header) + " has " + str(map_headers[header]["number_of_referenced_texts"]) + " referenced texts")
|
number_cakes= int(input("Enter the number of cake(s) you want to buy"))
cake_price = 4.50
bill=cake_price * number_cakes
if number_cakes == 1:
print('The price of a cake is ', bill,' pounds.')
elif number_cakes > 1:
print('The price of', number_cakes, 'cakes is', bill,'pounds.')
else:
print('Error: the number entered is invalid')
|
import collections
import typing
from typing import Dict, List, Optional
import math
import numpy as np
import tensorflow.keras.backend as K
MAXIMUM_FLOAT_VALUE = float('inf')
KnownBounds = collections.namedtuple('KnownBounds', ['min', 'max'])
class MinMaxStats(object):
"""A class that holds the min-max values of the tree."""
def __init__(self, known_bounds: Optional[KnownBounds]):
self.maximum = known_bounds.max if known_bounds else -MAXIMUM_FLOAT_VALUE
self.minimum = known_bounds.min if known_bounds else MAXIMUM_FLOAT_VALUE
def update(self, value: float):
self.maximum = max(self.maximum, value)
self.minimum = min(self.minimum, value)
def normalize(self, value: float) -> float:
if self.maximum > self.minimum:
# We normalize only when we have set the maximum and minimum values.
return (value - self.minimum) / (self.maximum - self.minimum)
return value
class MuZeroConfig(object):
def __init__(self,
action_space_size: int,
max_moves: int,
discount: float,
dirichlet_alpha: float,
num_simulations: int,
batch_size: int,
td_steps: int,
num_actors: int,
lr_init: float,
lr_decay_steps: float,
visit_softmax_temperature_fn,
known_bounds: Optional[KnownBounds] = None):
### Self-Play
self.action_space_size = action_space_size
self.num_actors = num_actors
self.visit_softmax_temperature_fn = visit_softmax_temperature_fn
self.max_moves = max_moves
self.num_simulations = num_simulations
self.discount = discount
# Root prior exploration noise.
self.root_dirichlet_alpha = dirichlet_alpha
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
# If we already have some information about which values occur in the
# environment, we can use them to initialize the rescaling.
# This is not strictly necessary, but establishes identical behaviour to
# AlphaZero in board games.
self.known_bounds = known_bounds
### Training
self.training_steps = int(10)
self.checkpoint_interval = int(5)
self.window_size = int(1e6)
self.batch_size = batch_size
self.num_unroll_steps = 8
self.td_steps = td_steps
self.weight_decay = 1e-4
self.momentum = 0.9
# Exponential learning rate schedule
self.lr_init = lr_init
self.lr_decay_rate = 0.1
self.lr_decay_steps = lr_decay_steps
def make_board_game_config(action_space_size: int, max_moves: int,
dirichlet_alpha: float,
lr_init: float, num_simulations: int, num_actors: int) -> MuZeroConfig:
def visit_softmax_temperature(num_moves, training_steps):
if num_moves < 30:
return 1.0
else:
return 0.0 # Play according to the max.
return MuZeroConfig(
action_space_size=action_space_size,
max_moves=max_moves,
discount=1.0,
dirichlet_alpha=dirichlet_alpha,
num_simulations=num_simulations,
batch_size=2048,
td_steps=max_moves, # Always use Monte Carlo return.
num_actors=num_actors,
lr_init=lr_init,
lr_decay_steps=400e3,
visit_softmax_temperature_fn=visit_softmax_temperature,
known_bounds=KnownBounds(-1, 1))
class Action(object):
def __init__(self, index: int):
self.index = index
def __hash__(self):
return self.index
def __eq__(self, other):
return self.index == other.index
def __gt__(self, other):
return self.index > other.index
class Player(object):
def turn(self):
pass
class Node(object):
def __init__(self, prior: float):
self.visit_count = 0
self.to_play = -1
self.prior = prior
self.value_sum = 0
self.children = {}
self.hidden_state = None
self.reward = 0
def expanded(self) -> bool:
return len(self.children) > 0
def value(self) -> float:
if self.visit_count == 0:
return 0
return self.value_sum / self.visit_count
class ActionHistory(object):
"""Simple history container used inside the search.
Only used to keep track of the actions executed.
"""
def __init__(self, history: List[Action], action_space_size: int):
self.history = list(history)
self.action_space_size = action_space_size
def clone(self):
return ActionHistory(self.history, self.action_space_size)
def add_action(self, action: Action):
self.history.append(action)
def last_action(self) -> Action:
return self.history[-1]
def action_space(self) -> List[Action]:
return [Action(i) for i in range(self.action_space_size)]
def to_play(self) -> Player:
return Player()
class Environment(object):
"""The environment MuZero is interacting with."""
def step(self, action):
pass
class NetworkOutput(typing.NamedTuple):
value: float
reward: float
policy_logits: float
hidden_state: List[float]
# We expand a node using the value, reward and policy prediction obtained from
# the neural network.
def expand_node(node: Node, to_play: Player, actions: List[Action],
network_output: NetworkOutput):
node.to_play = to_play
node.hidden_state = network_output.hidden_state
node.reward = network_output.reward
policy = {a: math.exp(K.flatten(network_output.policy_logits)[hash(a)]) for a in actions}
policy_sum = sum(policy.values())
for action, p in policy.items():
node.children[action] = Node(p / policy_sum)
# Stubs to make the typechecker happy.
def softmax_sample(visit_counts, temperature: float):
N_total = sum([ visit_count[0] for visit_count in visit_counts])
distribution = []
for visit_count, action in visit_counts:
distribution.append((action, visit_count/N_total))
action = np.random.choice(len(distribution), p=[d[1] for d in distribution])
return 0, distribution[action][0]
def launch_job(f, *args):
f(*args)
def make_uniform_network():
return Network()
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from fastapi import APIRouter
from fastapi import WebSocket
from starlette.websockets import WebSocketState as WebSocketState
from paddlespeech.cli.log import logger
from paddlespeech.server.engine.engine_pool import get_engine_pool
router = APIRouter()
@router.websocket('/paddlespeech/tts/streaming')
async def websocket_endpoint(websocket: WebSocket):
"""PaddleSpeech Online TTS Server api
Args:
websocket (WebSocket): the websocket instance
"""
#1. the interface wait to accept the websocket protocal header
# and only we receive the header, it establish the connection with specific thread
await websocket.accept()
#2. if we accept the websocket headers, we will get the online tts engine instance
engine_pool = get_engine_pool()
tts_engine = engine_pool['tts']
connection_handler = None
if tts_engine.engine_type == "online":
from paddlespeech.server.engine.tts.online.python.tts_engine import PaddleTTSConnectionHandler
elif tts_engine.engine_type == "online-onnx":
from paddlespeech.server.engine.tts.online.onnx.tts_engine import PaddleTTSConnectionHandler
else:
logger.error("Online tts engine only support online or online-onnx.")
sys.exit(-1)
try:
while True:
# careful here, changed the source code from starlette.websockets
assert websocket.application_state == WebSocketState.CONNECTED
message = await websocket.receive()
websocket._raise_on_disconnect(message)
message = json.loads(message["text"])
if 'signal' in message:
# start request
if message['signal'] == 'start':
session = uuid.uuid1().hex
resp = {
"status": 0,
"signal": "server ready",
"session": session
}
connection_handler = PaddleTTSConnectionHandler(tts_engine)
await websocket.send_json(resp)
# end request
elif message['signal'] == 'end':
connection_handler = None
resp = {
"status": 0,
"signal": "connection will be closed",
"session": session
}
await websocket.send_json(resp)
break
else:
resp = {"status": 0, "signal": "no valid json data"}
await websocket.send_json(resp)
# speech synthesis request
elif 'text' in message:
text_bese64 = message["text"]
sentence = connection_handler.preprocess(
text_bese64=text_bese64)
# run
wav_generator = connection_handler.run(sentence)
while True:
try:
tts_results = next(wav_generator)
resp = {"status": 1, "audio": tts_results}
await websocket.send_json(resp)
except StopIteration as e:
resp = {"status": 2, "audio": ''}
await websocket.send_json(resp)
logger.info(
"Complete the synthesis of the audio streams")
break
except Exception as e:
resp = {"status": -1, "audio": ''}
await websocket.send_json(resp)
break
else:
logger.error(
"Invalid request, please check if the request is correct.")
except Exception as e:
logger.error(e)
|
Subsets and Splits