metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jdashg/dag-runner",
"score": 3
}
|
#### File: jdashg/dag-runner/dagr.py
```python
class DagrNode(object):
nodes_by_name = dict()
def __init__(self, name, parents=[], cmd=None):
self.name = name
self.parents = set(parents)
assert name not in DagrNode.nodes_by_name
DagrNode.nodes_by_name[name] = self
self.cmds = []
if cmd:
self.cmds.append(cmd)
return
####################
# Traverse!
DAGR_FILENAME = '.dagr'
with open(DAGR_FILENAME) as f:
code = compile(f.read(), DAGR_FILENAME, 'exec')
exec(code)
####################
# Now include
import multiprocessing
import shlex
import subprocess
import sys
import threading
import time
####################
# Privates
class Task(object):
def __init__(self, target, args=(), kwargs={}):
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self.target(*self.args, **self.kwargs)
class ThreadPool(object):
def __init__(self, name, num_threads):
self.name = name
self.cond = threading.Condition()
self.is_alive = True
self.task_queue = []
self.threads = set()
for i in range(num_threads):
t_name = '{}[{}]'.format(self.name, i)
t = threading.Thread(name=t_name, target=self.thread, args=(i,))
self.threads.add(t)
t.start()
def enqueue(self, task):
assert self.is_alive
with self.cond:
self.task_queue.append(task)
self.cond.notify_all()
def thread(self, thread_i):
while self.is_alive:
with self.cond:
try:
task = self.task_queue.pop(0)
except IndexError:
self.cond.wait()
continue
task.run()
def kill(self):
self.is_alive = False
with self.cond:
self.cond.notify_all()
def join(self):
for t in self.threads:
t.join()
####################
class ExDagHasCycle(Exception):
def __init__(self, stack, cycle):
self.stack = stack
self.cycle = cycle
def map_dag(roots, map_func):
mapped_nodes = dict()
stack = []
def recurse(node):
if node in stack:
cycle = stack[stack.index(cur):] + [cur]
raise ExDagHasCycle(stack, cycle)
try:
mapped = mapped_nodes[node]
return mapped
except KeyError:
pass
# --
stack.append(node)
parents = node.parents
mapped_parents = set()
for parent in parents:
mapped_parent = recurse(parent)
if mapped_parent:
mapped_parents.add(mapped_parent)
stack.pop()
# --
mapped = map_func(node, mapped_parents, stack)
mapped_nodes[node] = mapped
return mapped
mapped_roots = set()
for x in roots:
mapped = recurse(x)
if mapped:
mapped_roots.add(mapped)
return mapped_roots
####################
class PromiseGraphNode(object):
def __init__(self, parents, info=''):
self.info = str(info)
#print 'PromiseGraphNode {}: '.format(self.info) + ', '.join(map(lambda x: x.info, parents))
self.lock = threading.Lock()
self.pending_parents = set(parents)
self.children = set()
for parent in self.pending_parents:
with parent.lock:
parent.children.add(self)
self.result = None
def run(self):
self.resolve(True)
def on_resolve(self):
pass
def resolve(self, result):
assert result in (True, False)
with self.lock:
if self.result != None:
return
self.result = result
self.pending_parents.clear()
self.on_resolve()
assert self.result in (True, False)
if not self.result:
for child in self.children:
child.resolve(False)
else:
for child in self.children:
with child.lock:
try:
child.pending_parents.remove(self)
except KeyError:
continue
if child.pending_parents:
continue
assert child.result == None
child.run()
####################
def quote_args(args):
if os.name == 'nt':
return subprocess.list2cmdline(args)
return shlex.quote(args)
class SubprocCallNode(PromiseGraphNode):
ECHO = False
def __init__(self, parents, info, pool, call):
PromiseGraphNode.__init__(self, parents, info)
self.pool = pool
self.call = call
def run(self):
self.pool.enqueue(Task(self.task_run))
def task_run(self):
shell = type(self.call) is str
if SubprocCallNode.ECHO:
call = self.call
if not shell:
call = quote_args(call)
sys.stdout.write(call + '\n')
self.resolve(True)
return
result = False
try:
p = subprocess.Popen(self.call, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
stdout = stdout.decode(errors='replace')
stderr = stderr.decode(errors='replace')
if p.returncode == 0:
result = True
except OSError:
stdout = ''
stderr = 'Binary not found: {}'.format(self.call[0])
if not result:
stderr += '\n{} FAILED: {}\n'.format(self.info, self.call)
sys.stdout.write(stdout)
sys.stderr.write(stderr)
self.resolve(result)
####################
class EventNode(PromiseGraphNode):
def __init__(self, parents, info=''):
PromiseGraphNode.__init__(self, parents, info)
self.event = threading.Event()
def on_resolve(self):
self.event.set()
####################
NUM_THREADS = multiprocessing.cpu_count()
#NUM_THREADS = 1
def run_dagr(roots, thread_count=NUM_THREADS):
pool = ThreadPool('run_dagr pool', thread_count)
mapped_leaves = set()
def map_dagr_node(node, mapped_parents, stack):
if not node.cmds:
begin = PromiseGraphNode(mapped_parents, node.name)
end = begin
elif len(node.cmds) == 1:
begin = SubprocCallNode(mapped_parents, node.name, pool, node.cmds[0])
end = begin
else:
begin = PromiseGraphNode(mapped_parents, node.name + '.begin')
subs = set()
for i, cmd in enumerate(node.cmds):
info = node.name + '.{}'.format(i)
sub = SubprocCallNode([begin], info, pool, cmd)
subs.add(sub)
end = PromiseGraphNode(subs, node.name + '.end')
if not mapped_parents:
mapped_leaves.add(begin)
return end
mapped_roots = map_dag(roots, map_dagr_node)
# --
terminal_root = EventNode(mapped_roots, '<terminal_root>')
for x in mapped_leaves:
x.run()
terminal_root.event.wait()
pool.kill()
pool.join()
return terminal_root.result
if __name__ == '__main__':
start_time = time.time()
args = sys.argv[1:]
while True:
try:
cur = args.pop(0)
except IndexError:
break
if cur == '--dump':
SubprocCallNode.ECHO = True
continue
if cur == '--':
break
args.insert(0, cur)
break
root_names = args
if not root_names:
root_names = ['DEFAULT']
roots = []
for x in root_names:
try:
roots.append(DagrNode.nodes_by_name[x])
except KeyError:
sys.stderr.write('No such DagrNode: {}\n'.format(x))
exit(1)
success = run_dagr(roots)
elapsed_time = time.time() - start_time
if success:
sys.stderr.write('BUILD SUCCEEDED (in {:.4}s)\n'.format(elapsed_time))
else:
sys.stderr.write('BUILD FAILED\n\n\n')
assert len(threading.enumerate()) == 1, str(threading.enumerate())
exit(int(not success))
```
|
{
"source": "jdashg/misc",
"score": 3
}
|
#### File: jdashg/misc/https_server.py
```python
assert __name__ == '__main__'
print(__file__)
import http.server
import pathlib
import ssl
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--bind', '-b', default='localhost', metavar='ADDRESS',
help='Specify alternate bind address '
'[default: all interfaces]')
parser.add_argument('--cache', '-c', action='store',
default=0, type=int,
help='Allow caching [default: 0]')
parser.add_argument('port', action='store',
default=4443, type=int,
nargs='?',
help='Specify alternate port [default: 4443]')
args = parser.parse_args()
class CustomRequestHandler(http.server.SimpleHTTPRequestHandler):
def end_headers(self):
if not args.cache:
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
super().end_headers()
CERT_FILE = str(pathlib.PurePath(__file__).with_name('server.pem'))
httpd = http.server.ThreadingHTTPServer(('', 4443), CustomRequestHandler)
try:
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=CERT_FILE, server_side=True)
except FileNotFoundError:
print(f'''{CERT_FILE} not found.
Try `openssl req -new -x509 -keyout server.pem -out {CERT_FILE} -days 365 -nodes`''')
exit(1)
print(f'Serving at {httpd.socket.getsockname()}...')
httpd.serve_forever()
```
|
{
"source": "jdashg/pydra",
"score": 2
}
|
#### File: jdashg/pydra/worker.py
```python
assert __name__ == '__main__'
from common import *
import itertools
import job_client
import psutil
import threading
# --------------------------------
LockingLogHandler.install()
log_conn_counter = itertools.count(1)
def th_on_accept_log(conn, addr):
conn_id = next(log_conn_counter)
conn_prefix = ''
if CONFIG['LOG_LEVEL'] == logging.DEBUG:
conn_prefix = '[log {}] '.format(conn_id)
logging.debug(conn_prefix + '<connected>')
pconn = nu.PacketConn(conn, CONFIG['KEEPALIVE_TIMEOUT'], True)
try:
while True:
text = pconn.recv().decode()
text = text.replace('\n', '\n' + ' '*len(conn_prefix))
locked_print(conn_prefix, text)
except OSError:
pass
finally:
logging.debug(conn_prefix + '<disconnected>')
pconn.nuke()
# --
log_server = nu.Server([CONFIG['LOG_ADDR']], target=th_on_accept_log)
log_server.listen_until_shutdown()
# ---------------------------
MODS = LoadPydraModules()
logging.info('MODS', MODS)
def get_mods_by_key():
mods_by_key = {}
for (mod_name,m) in MODS.items():
for sk in m.pydra_get_subkeys():
key = make_key(mod_name, sk)
mods_by_key[key] = m
return mods_by_key
# --
worker_prefix = ''
work_conn_counter = itertools.count(1)
utilization_cv = threading.Condition()
active_slots = 0
cpu_load = 0.0
# --
def th_on_accept_work(conn, addr):
conn_id = next(work_conn_counter)
conn_prefix = worker_prefix + '[worklet {}] '.format(conn_id)
global active_slots
try:
active_slots += 1
if active_slots > CONFIG['WORKERS']:
logging.info(conn_prefix + '<refused>')
return
logging.debug(conn_prefix + '<connected>')
with utilization_cv:
utilization_cv.notify_all()
pconn = nu.PacketConn(conn, CONFIG['KEEPALIVE_TIMEOUT'], True)
hostname = pconn.recv().decode()
key = pconn.recv()
logging.debug(conn_prefix + 'hostname: ' + hostname)
(mod_name, subkey) = key.split(b'|', 1)
m = MODS[mod_name.decode()]
m.pydra_job_worker(pconn, hostname, subkey)
except OSError:
pass
finally:
active_slots -= 1
logging.debug(conn_prefix + '<disconnected>')
with utilization_cv:
utilization_cv.notify_all()
work_server = nu.Server([CONFIG['WORKER_BASE_ADDR']], target=th_on_accept_work)
work_server.listen_until_shutdown()
# --
def th_cpu_percent():
try:
import psutil
except ImportError:
logging.warning('cpu load tracking requires psutil, disabling...')
return
global cpu_load
while True:
cpu_load = psutil.cpu_percent(interval=None, percpu=True) # [0,100]
with utilization_cv:
utilization_cv.notify_all()
time.sleep(3.0)
threading.Thread(target=th_cpu_percent, daemon=True).start()
# -
def advert_to_server():
if not work_server:
logging.error(worker_prefix + 'No work_server.')
return
mods_by_key = get_mods_by_key()
keys = list(mods_by_key.keys())
logging.warning(worker_prefix + 'keys: {}'.format(keys))
gais = work_server.get_gais()
addrs = [Address(x[4]) for x in gais]
if not (keys and addrs):
logging.warning(worker_prefix + str((keys, addrs)))
return
timeout = CONFIG['TIMEOUT_WORKER_TO_SERVER']
addr = job_server_addr(timeout)
if not addr:
logging.warning(worker_prefix + 'No mDNS response from job_server.')
return
conn = nu.connect_any([addr[:2]], timeout=timeout)
if not conn:
logging.error(worker_prefix + 'Failed to connect: {}'.format(addr))
return
pconn = nu.PacketConn(conn, CONFIG['KEEPALIVE_TIMEOUT'], True)
logging.warning(worker_prefix + 'Connected to job_server {} as {}'.format(
addr, CONFIG['HOSTNAME']))
def th_nuke_on_recv():
pconn.wait_for_disconnect()
def th_nuke_on_change():
while pconn.alive:
new_mods_by_key = get_mods_by_key()
new_gais = work_server.get_gais()
if new_mods_by_key != mods_by_key:
logging.info(worker_prefix + 'Keys changed: {}'.format(new_mods_by_key))
break
if new_gais != gais:
logging.info(worker_prefix + 'Gais changed.')
break
time.sleep(1.0)
pconn.nuke()
threading.Thread(target=th_nuke_on_recv, daemon=True).start()
threading.Thread(target=th_nuke_on_change, daemon=True).start()
try:
pconn.send(b'worker')
max_slots = CONFIG['WORKERS']
desc = WorkerDescriptor()
desc.hostname = CONFIG['HOSTNAME']
desc.max_slots = max_slots
desc.keys = keys
desc.addrs = addrs
pconn.send(desc.encode())
with utilization_cv:
while pconn.alive:
avail_slots = max_slots - active_slots
cpu_idle = len(cpu_load) - (sum(cpu_load) / 100.0)
avail_slots = min(avail_slots, cpu_idle)
if avail_slots > max_slots - 1:
avail_slots = max_slots
pconn.send_t(F64_T, avail_slots)
utilization_cv.wait(10.0) # Refresh, just slowly if not notified.
time.sleep(0.1) # Minimum delay between updates
except OSError:
pass
finally:
logging.warning(worker_prefix + 'Socket disconnected.')
pconn.nuke()
# --
try:
while True:
advert_to_server()
time.sleep(1.0)
logging.warning(worker_prefix + 'Reconnecting to server...')
except KeyboardInterrupt:
exit(0)
```
|
{
"source": "jdashg/webgl-1on2",
"score": 2
}
|
#### File: jdashg/webgl-1on2/build.py
```python
import content_script_embed
import os
import pathlib
import shutil
DIR = pathlib.Path(__file__).parent
WEB_EXT_OUT = DIR / 'web-ext/out'
def write_to(dest, data):
print(f' ({len(data)} bytes) => {dest}')
dest.write_bytes(data)
def clean():
print('[clean]')
if WEB_EXT_OUT.exists():
shutil.rmtree(WEB_EXT_OUT)
os.mkdir(WEB_EXT_OUT)
def build_content_script(src, dest):
print(f'[build_content_script {src}]')
data = src.read_bytes()
data = content_script_embed.from_script(data)
write_to(dest, data)
# -
clean();
build_content_script(DIR / 'webgl-1on2.js', WEB_EXT_OUT / 'webgl-1on2.content.js')
print('Build complete.')
```
|
{
"source": "JDASoftwareGroup/kartothek_hive_integration_test",
"score": 2
}
|
#### File: docker/test_executor/test_hive_compatibility.py
```python
import pytest
import os
import shutil
from copy import deepcopy
from functools import partial
import storefact
import pandas as pd
from pyhive import hive
import pandas.testing as pdt
from kartothek.serialization.testing import get_dataframe_not_nested
from kartothek.io.eager import store_dataframes_as_dataset, read_table
#### Fixtures ####
@pytest.fixture()
def test_df():
df = _create_df()
# Rename because `date` and `null` are reserved in Hive QL
df = df.rename(columns={"date": "date_", "null": "null_"})
return df
@pytest.fixture()
def volume_location(request):
volume_location = "/parquet_data"
def cleanup_data():
for item in os.listdir(volume_location):
itempath = os.path.join(volume_location, item)
if os.path.isdir(itempath):
shutil.rmtree(itempath)
else:
os.remove(itempath)
yield volume_location
request.addfinalizer(cleanup_data)
@pytest.fixture()
def store_factory(volume_location):
store_factory = partial(storefact.get_store_from_url, f"hfs://{volume_location}")
return store_factory
@pytest.fixture(scope="session")
def uuid():
return "test"
@pytest.fixture(scope="session")
def tested_columns_and_dtypes():
## Non-nested columns not included: `np.uint64` (max value is too large for `BIGINT`)
## The `null` column can be specified as multiple types (at least `STRING` and `FLOAT`)
# TODO: have a mapping from kartothek/arrow dtypes to Hive dtypes
tested_columns_and_dtypes = {
"bool": "BOOLEAN",
"bytes": "BINARY",
"date_": "DATE",
"datetime64": "BIGINT",
"float32": "FLOAT",
"float64": "DOUBLE",
"int8": "TINYINT",
"int16": "SMALLINT",
"int32": "INT",
"int64": "BIGINT",
"uint8": "SMALLINT",
"uint16": "INT",
"uint32": "BIGINT",
"unicode": "STRING",
"null_": "FLOAT",
}
return tested_columns_and_dtypes
@pytest.fixture(
params=[
None,
{"date_": {"cast_as": str, "hive_type": "STRING"}},
{"date_": {"cast_as": str, "hive_type": "STRING"}, "int32": None},
]
)
def partition_on(request, tested_columns_and_dtypes):
partition_on = request.param and {
f"partition_{col_idx}": {
"source_col": source_col,
**(
col_info
or {"hive_type": tested_columns_and_dtypes[source_col], "cast_as": None}
),
}
for col_idx, (source_col, col_info) in enumerate(request.param.items())
}
return partition_on
#### Test function ####
def test_hive_compat(
test_df,
volume_location,
store_factory,
uuid,
tested_columns_and_dtypes,
partition_on,
):
df = _create_df(test_df, partition_on)
hive_df, expected_df, expected_columns = _prep_data(
df=df,
volume_location=volume_location,
store_factory=store_factory,
uuid=uuid,
tested_columns_and_dtypes=tested_columns_and_dtypes,
partition_on=partition_on,
)
_assert_hive_frame_equal(hive_df, expected_df, expected_columns)
_print_success_msg(expected_columns, partition_on)
#### Helper functions ####
def _create_df(df=None, partition_cols=None):
# Create dataset on local filesystem
if df is None:
df = get_dataframe_not_nested(100)
if partition_cols:
for partition_col, partition_from in partition_cols.items():
source_col = partition_from["source_col"]
cast_as = partition_from["cast_as"]
# create partitioning column and cast if require
df[partition_col] = df[source_col].apply(
lambda x: cast_as and cast_as(x) or x
)
return df
def _prep_data(
df,
volume_location,
store_factory,
uuid,
tested_columns_and_dtypes,
partition_on=None,
):
dfs = [df, df]
TABLE_NAME = uuid # Hive table name
store_dataframes_as_dataset(
store=store_factory,
dataset_uuid=uuid,
dfs=dfs,
partition_on=partition_on and list(partition_on),
)
expected_df = read_table(
store=store_factory, dataset_uuid=uuid, dates_as_object=True
)
print(f"Dataset location: {volume_location}")
conn = hive.Connection(host="hive-server", port=10000)
cursor = conn.cursor()
parquet_file_parentdir = None
for filepath in store_factory().iter_keys():
if filepath.endswith(".parquet"):
parquet_file_parentdir = (
f"{volume_location}{os.path.sep}{os.path.dirname(filepath)}"
)
break
# Create Hive table
selected_columns_and_dtypes = deepcopy(tested_columns_and_dtypes)
print(
_create_hive_table(
cursor,
TABLE_NAME,
selected_columns_and_dtypes,
partition_on,
parquet_file_parentdir,
)
)
if partition_on:
# If on Hive >= 4.0, this code block should be removed and the following should be used:
# https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-DiscoverPartitions
cursor.execute(f"MSCK REPAIR TABLE {TABLE_NAME}")
selected_columns_and_dtypes.update(partition_on)
expected_columns = list(selected_columns_and_dtypes) # + partition_on
# Read hive table into pandas
hive_df = pd.read_sql(
f"SELECT {', '.join(expected_columns)} FROM {TABLE_NAME}", conn
)
return hive_df, expected_df, expected_columns
def _create_hive_table(
cursor,
table_name,
selected_columns_and_dtypes,
partition_on,
parquet_file_parentdir,
):
cursor.execute(f"drop table if exists {table_name}")
partitioned_by_arg = None
if partition_on:
partitioned_by_arg = f"""PARTITIONED BY ({
", ".join([f"{col_name} {col_info['hive_type']}" for col_name, col_info in partition_on.items()])
})"""
dataset_parentdir = _get_dataset_file_path(parquet_file_parentdir, list(partition_on))
else:
dataset_parentdir = parquet_file_parentdir
# Hive allows us to only select a subset of columns to be loaded from the Parquet file
columns_arg = ",\n ".join(
[
f"{col_name} {dtype}"
for col_name, dtype in selected_columns_and_dtypes.items()
]
)
hive_query = f"""
CREATE external table {table_name} (
{columns_arg}
)
{partitioned_by_arg or ""}
STORED AS PARQUET
LOCATION "{dataset_parentdir}"
"""
print(f"Hive query: {hive_query}")
cursor.execute(hive_query)
return "Table created"
def _get_dataset_file_path(parquet_file_parentdir, partition_on):
# Get the parent directory of the dataset
# Note: Parquet filepath looks like: `/tmp/uuid/table/partition_col1=x/partition_col2=y`
# from which we want : `/tmp/uuid/table`
# we do this by finding the index of the column names in the path string
# and returning the path upto the minimum index
required_path = min(
parquet_file_parentdir[: parquet_file_parentdir.index(col_name) - 1]
for col_name in list(partition_on)
)
return required_path
def _assert_hive_frame_equal(hive_df, expected_df, expected_columns):
# Pyarrow stores timestamp as microseconds from epoch, convert to date
hive_df["datetime64"] = pd.to_datetime(
hive_df.loc[:, "datetime64"] * 1000, unit="ns"
)
# Output from hive is a string, parse this to date
hive_df["date_"] = pd.to_datetime(hive_df.loc[:, "date_"], format="%Y-%m-%d").apply(
lambda x: x.date()
)
hive_df = hive_df.sort_values("bytes").reset_index(drop=True)
expected_df = expected_df.sort_values("bytes").reset_index(drop=True)[
expected_columns
]
# Ignore dtype for numeric comparisons (e.g. int32 with int64)
pdt.assert_frame_equal(expected_df, hive_df, check_dtype=False)
assert len(hive_df) > 0
def _print_success_msg(expected_columns, partition_on=None):
msg_if_partitioned = (
"".join(
(
" partitioned on ",
" and ".join(
f"{col_name} ({col_info['source_col'].rstrip('_')} as {col_info['hive_type']})"
for col_name, col_info in partition_on.items()
),
" ",
)
)
if partition_on
else " "
)
cols_to_print = [
ec.rstrip("_")
for ec in expected_columns
if ec not in (partition_on and list(partition_on) or [])
]
print(
f"Test completed successfully on test dataset{msg_if_partitioned}for the following data types: {cols_to_print}"
)
```
|
{
"source": "JDASoftwareGroup/rle-array",
"score": 2
}
|
#### File: rle-array/tests/test_operators.py
```python
import operator
from typing import Any, Callable, cast
import numpy as np
import pandas as pd
import pytest
from _pytest.fixtures import SubRequest
from numpy import testing as npt
from pandas.core import ops
from rle_array import RLEArray, RLEDtype
pytestmark = pytest.mark.filterwarnings("ignore:performance")
FCompareOperator = Callable[[Any, Any], Any]
FUnaryOperator = Callable[[Any], Any]
FUnaryBoolOperator = Callable[[Any], Any]
FBinaryOperator = Callable[[Any, Any], Any]
FBinaryBoolOperator = Callable[[Any, Any], Any]
@pytest.fixture
def values() -> np.ndarray:
return np.array([2.0, 2.0, 2.0, 3.0, 3.0, 2.0, np.nan, np.nan, 1.0, 1.0])
@pytest.fixture
def scalar(values: np.ndarray) -> float:
return float(values[0])
@pytest.fixture
def uncompressed_series(values: np.ndarray) -> pd.Series:
return pd.Series(values, index=np.arange(len(values)) + 1)
@pytest.fixture
def uncompressed_series2(values: np.ndarray) -> pd.Series:
return pd.Series(values[::-1], index=np.arange(len(values)) + 1)
@pytest.fixture
def rle_series(values: np.ndarray) -> pd.Series:
return pd.Series(RLEArray._from_sequence(values), index=np.arange(len(values)) + 1)
@pytest.fixture
def rle_series2(values: np.ndarray) -> pd.Series:
return pd.Series(
RLEArray._from_sequence(values[::-1]), index=np.arange(len(values)) + 1
)
@pytest.fixture
def bool_values() -> np.ndarray:
return np.array([False] * 4 + [True] * 5 + [False])
@pytest.fixture
def bool_scalar(bool_values: np.ndarray) -> bool:
return bool(bool_values[0])
@pytest.fixture
def uncompressed_bool_series(bool_values: np.ndarray) -> pd.Series:
return pd.Series(bool_values)
@pytest.fixture
def uncompressed_bool_series2(bool_values: np.ndarray) -> pd.Series:
return pd.Series(bool_values[::-1])
@pytest.fixture
def rle_bool_series(bool_values: np.ndarray) -> pd.Series:
return pd.Series(RLEArray._from_sequence(bool_values))
@pytest.fixture
def rle_bool_series2(bool_values: np.ndarray) -> pd.Series:
# TODO: Use `index=np.arange(len(bool_values)) + 1`.
# For some reason, pandas casts us back to dtype=bool in that case.
return pd.Series(RLEArray._from_sequence(bool_values[::-1]))
@pytest.fixture(
params=[
operator.eq,
operator.ne,
operator.lt,
operator.gt,
operator.le,
operator.ge,
],
ids=lambda op: str(op.__name__),
)
def compare_operator(request: SubRequest) -> FCompareOperator:
return cast(FCompareOperator, request.param)
@pytest.fixture(
params=[operator.abs, operator.neg, operator.pos], ids=lambda op: str(op.__name__)
)
def unary_operator(request: SubRequest) -> FUnaryOperator:
return cast(FUnaryOperator, request.param)
@pytest.fixture(params=[operator.inv], ids=lambda op: str(op.__name__))
def unary_bool_operator(request: SubRequest) -> FUnaryBoolOperator:
return cast(FUnaryBoolOperator, request.param)
@pytest.fixture(
params=[
operator.add,
operator.iadd,
ops.radd,
operator.sub,
operator.isub,
ops.rsub,
operator.mul,
operator.imul,
ops.rmul,
operator.truediv,
operator.itruediv,
ops.rtruediv,
operator.floordiv,
operator.ifloordiv,
ops.rfloordiv,
operator.mod,
operator.imod,
ops.rmod,
operator.pow,
operator.ipow,
ops.rpow,
],
ids=lambda op: str(op.__name__),
)
def binary_operator(request: SubRequest) -> FBinaryOperator:
return cast(FBinaryOperator, request.param)
@pytest.fixture(
params=[
operator.and_,
operator.iand,
ops.rand_,
operator.or_,
operator.ior,
ops.ror_,
operator.xor,
operator.ixor,
ops.rxor,
],
ids=lambda op: str(op.__name__),
)
def binary_bool_operator(request: SubRequest) -> FBinaryBoolOperator:
return cast(FBinaryBoolOperator, request.param)
def test_compare_scalar(
rle_series: pd.Series,
uncompressed_series: pd.Series,
scalar: float,
compare_operator: FCompareOperator,
) -> None:
actual = compare_operator(rle_series, scalar)
assert actual.dtype == RLEDtype(bool)
expected = compare_operator(uncompressed_series, scalar).astype("RLEDtype[bool]")
pd.testing.assert_series_equal(actual, expected)
def test_compare_rle_series(
rle_series: pd.Series,
rle_series2: pd.Series,
uncompressed_series: pd.Series,
uncompressed_series2: pd.Series,
compare_operator: FCompareOperator,
) -> None:
actual = compare_operator(rle_series, rle_series2)
assert actual.dtype == RLEDtype(bool)
expected = compare_operator(uncompressed_series, uncompressed_series2).astype(
"RLEDtype[bool]"
)
pd.testing.assert_series_equal(actual, expected)
def test_compare_uncompressed_series(
rle_series: pd.Series,
uncompressed_series: pd.Series,
compare_operator: FCompareOperator,
) -> None:
actual = compare_operator(rle_series, uncompressed_series)
assert actual.dtype == bool
expected = compare_operator(uncompressed_series, uncompressed_series)
pd.testing.assert_series_equal(actual, expected)
def test_binary_operator_scalar(
rle_series: pd.Series,
uncompressed_series: pd.Series,
scalar: float,
binary_operator: FBinaryOperator,
) -> None:
actual = binary_operator(rle_series, scalar)
assert actual.dtype == RLEDtype(float)
expected = binary_operator(uncompressed_series, scalar).astype("RLEDtype[float]")
pd.testing.assert_series_equal(actual, expected)
def test_binary_operator_rle_series(
rle_series: pd.Series,
rle_series2: pd.Series,
uncompressed_series: pd.Series,
uncompressed_series2: pd.Series,
binary_operator: FBinaryOperator,
) -> None:
actual = binary_operator(rle_series, rle_series2)
assert actual.dtype == RLEDtype(float)
expected = binary_operator(uncompressed_series, uncompressed_series2).astype(
"RLEDtype[float]"
)
pd.testing.assert_series_equal(actual, expected)
def test_binary_operator_uncompressed_series(
rle_series: pd.Series,
uncompressed_series: pd.Series,
uncompressed_series2: pd.Series,
binary_operator: FBinaryOperator,
) -> None:
actual = binary_operator(rle_series, uncompressed_series2)
assert actual.dtype == float
expected = binary_operator(uncompressed_series, uncompressed_series2)
pd.testing.assert_series_equal(actual, expected)
def test_binary_bool_operator_scalar(
rle_bool_series: pd.Series,
uncompressed_bool_series: pd.Series,
bool_scalar: bool,
binary_bool_operator: FBinaryBoolOperator,
) -> None:
actual = binary_bool_operator(rle_bool_series, bool_scalar)
assert actual.dtype == RLEDtype(bool)
expected = binary_bool_operator(uncompressed_bool_series, bool_scalar).astype(
RLEDtype(bool)
)
pd.testing.assert_series_equal(actual, expected)
def test_binary_bool_operator_rle_series(
rle_bool_series: pd.Series,
rle_bool_series2: pd.Series,
uncompressed_bool_series: pd.Series,
uncompressed_bool_series2: pd.Series,
binary_bool_operator: FBinaryBoolOperator,
) -> None:
actual = binary_bool_operator(rle_bool_series, rle_bool_series2)
assert actual.dtype == RLEDtype(bool)
expected = binary_bool_operator(
uncompressed_bool_series, uncompressed_bool_series2
).astype(RLEDtype(bool))
pd.testing.assert_series_equal(actual, expected)
def test_binary_bool_operator_uncompressed_series(
rle_bool_series: pd.Series,
uncompressed_bool_series: pd.Series,
uncompressed_bool_series2: pd.Series,
binary_bool_operator: FBinaryBoolOperator,
) -> None:
actual = binary_bool_operator(rle_bool_series, uncompressed_bool_series2)
assert actual.dtype == bool
expected = binary_bool_operator(uncompressed_bool_series, uncompressed_bool_series2)
pd.testing.assert_series_equal(actual, expected)
def test_unary_operator(
rle_series: pd.Series,
uncompressed_series: pd.Series,
unary_operator: FUnaryOperator,
) -> None:
actual = unary_operator(rle_series)
assert actual.dtype == RLEDtype(float)
expected = unary_operator(uncompressed_series).astype(RLEDtype(float))
pd.testing.assert_series_equal(actual, expected)
def test_unary_operator_array(
rle_series: pd.Series,
uncompressed_series: pd.Series,
unary_operator: FUnaryOperator,
) -> None:
actual = unary_operator(rle_series.array)
assert actual.dtype == RLEDtype(float)
expected = unary_operator(uncompressed_series.array)
npt.assert_array_equal(actual, expected)
def test_unary_bool_operator(
rle_bool_series: pd.Series,
uncompressed_bool_series: pd.Series,
unary_bool_operator: FUnaryBoolOperator,
) -> None:
actual = unary_bool_operator(rle_bool_series)
assert actual.dtype == RLEDtype(bool)
expected = unary_bool_operator(uncompressed_bool_series).astype(RLEDtype(bool))
pd.testing.assert_series_equal(actual, expected)
def test_unary_bool_operator_array(
rle_bool_series: pd.Series,
uncompressed_bool_series: pd.Series,
unary_bool_operator: FUnaryBoolOperator,
) -> None:
actual = unary_bool_operator(rle_bool_series.array)
assert actual.dtype == RLEDtype(bool)
expected = unary_bool_operator(uncompressed_bool_series.array)
npt.assert_array_equal(actual, expected)
def test_different_length_raises(values: np.ndarray) -> None:
array1 = RLEArray._from_sequence(values)
array2 = RLEArray._from_sequence(values[:-1])
with pytest.raises(ValueError, match="arrays have different lengths"):
array1 + array2
```
|
{
"source": "JdataNich/reddit-project",
"score": 3
}
|
#### File: JdataNich/reddit-project/reddit_get_data.py
```python
import reddit_request
import requests
import os
REDDIT_USERNAME = os.environ.get('REDDIT_USERNAME')
REDDIT_PASS = os.environ.get('REDDIT_PASS')
APP_ID = os.environ.get('APP_ID')
APP_SECRET = os.environ.get('APP_SECRET')
def get_subreddit_data(subreddit, auth):
token = 'bearer ' + auth['access_token']
headers = {'Authorization': token, 'User-Agent': 'APP-NAME by REDDIT-USERNAME'}
api_url = 'https://oauth.reddit.com'
payload = {'q': subreddit, 'limit': 5, 'sort': 'relevance'}
response = requests.get(api_url + '/subreddits/search', headers=headers, params=payload)
return response
# %%
auth = reddit_request.reddit_auth(REDDIT_USERNAME, REDDIT_PASS, APP_ID, APP_SECRET)
response = get_subreddit_data('gamestop', auth)
data = response.json()['data']
print(data)
```
|
{
"source": "jdatascientist/Machine_Learning_4_MRI",
"score": 3
}
|
#### File: Code/MATLAB/PLS.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from scipy.stats import randint as sp_randint
from sklearn.model_selection import RandomizedSearchCV
get_ipython().magic('matplotlib inline')
# In[2]:
zspectra = pd.read_csv('centered_cest.csv', header = None).values
conc = pd.read_csv('concentration.csv', header = None).values
pH = pd.read_csv('pH.csv', header = None).values
# In[5]:
def mymetric(yexp, ypred):
d = np.sum((yexp - ypred)**2 )
d = d / ypred.shape[0]
d = np.sqrt(d)
d = d / np.mean(yexp)
d = 100 * d
return d
# In[10]:
X = zspectra[:, 0:101:2]
Y = pH
num_components = 20
Error = np.zeros((num_components -1,1))
for idx,K in enumerate(np.arange(1,num_components)):
X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size=0.05, random_state=42)
pls = PLSRegression(n_components = K, scale = False)
pls.fit(X_train, y_train)
y_hat = pls.predict(X_test)
Error[idx] = mymetric(y_test , y_hat)
plt.plot( np.arange(1,num_components), Error ,'o-')
# In[12]:
steps = [1,4,8]
labels = list()
for step in steps:
X = zspectra[:, 0:101:step]
labels.append(int(X.shape[1]))
Y = pH
num_components = 10
Error = np.zeros((num_components -1,1))
for idx,K in enumerate(np.arange(1,num_components)):
X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size=0.50, random_state=42)
pls = PLSRegression(n_components = K, scale = False)
pls.fit(X_train, y_train)
y_hat = pls.predict(X_test)
Error[idx] = mymetric(y_test , y_hat)
plt.plot( np.arange(1,num_components), Error ,'o-')
plt.legend(labels)
```
#### File: Infection_vs_Inflammation/Code/Process_Data_V3.py
```python
import numpy as np
from mylocal_functions import *
import matplotlib.pyplot as plt
# ======== CEST============= #
CEST_list=get_ipython().getoutput('ls ../Study_03_CBA/*CEST.txt')
CEST_Int_matrix=np.zeros((len(CEST_list),4))
ppm=np.linspace(-8,8,101);
for i in range( len(CEST_list) ):
D=txt_2_array(CEST_list[i]); #Convert txt file to array
Zn=normalize_data(D.T,8); Zn=Zn[:,9::]
M=np.zeros([1,4])
for j in range(4):
p=fit_L2_scale(ppm,Zn[j,:])
L=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
#CEST_centered[i,:]=L
#CEST_integral[i,0]=np.sum(L)
M[0,j]=np.sum(L)
CEST_Int_matrix[i,:]=M
# ======== T2 MSME============= #a
# Make list of all T2.txt files
T2_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2.txt')
T2_matrix=np.zeros( (len(T2_list),4) )
TR=np.linspace(.012,.012*12,12)
# Fit T2
for i in range(len(T2_list)):
YDataMatrix=txt_2_array(T2_list[i])
#Estimate T2
T2time=fitT2(TR,YDataMatrix)
T2_matrix[i,:]=T2time.T
# ======== T2ex DCE============= #
# Make list of all T2.txt files
T2ex_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2exDCE.txt')
T2ex_Int_matrix=np.zeros( (len(T2ex_list),4) )
# T2ex integral
for i in range( len(T2ex_list) ):
D=txt_2_array(T2ex_list[i]); #Convert txt file to array
Zn=normalize_data(D.T,0); Zn=Zn[:,9::]
T2ex_Int_matrix[i,:]=np.sum(Zn-1,axis=1)
#======== create violing plots ============= #
Tissues=["Infected","Healthy R","Sterile Infl.","Healthy K"]
# Set dimensions of plot
fig = plt.figure(1,figsize=(10,10));
# CEST
ax = fig.add_subplot(3,1,1); ax.set_xticks([1, 2, 3, 4]); ax.set_xticklabels(Tissues)
plt.violinplot(CEST_Int_matrix, showextrema=True,showmedians=True);
plt.ylabel("CEST Integral")
#T2
ax = fig.add_subplot(3,1,2); ax.set_xticks([1, 2, 3, 4]); ax.set_xticklabels(Tissues)
plt.violinplot(T2_matrix,showextrema=True,showmedians=True);
plt.violinplot(T2_matrix,showextrema=True,showmedians=True);
plt.violinplot(T2_matrix,showextrema=True,showmedians=True);
plt.ylabel("T2 time")
#T2ex
ax = fig.add_subplot(3,1,3); ax.set_xticks([1, 2, 3, 4]); ax.set_xticklabels(Tissues)
plt.violinplot(T2ex_Int_matrix,showextrema=True,showmedians=True);
plt.violinplot(T2ex_Int_matrix,showextrema=True,showmedians=True);
plt.ylabel("T2ex Integral")
# plot non_neg only
# ======== T2ex DCE ALL============= #
# Make list of all T2.txt files
def plotvio(slice_num):
p1='ls ../Study_03_CBA/*S'
p2=str(slice_num)
p3='*T2exDCE.txt'
file_names=p1+p2+p3
T2ex_list = get_ipython().getoutput(file_names)
T2ex_Int_matrix=np.zeros( (len(T2ex_list),4) )
# T2ex integral
for i in range( len(T2ex_list) ):
D=txt_2_array(T2ex_list[i]); #Convert txt file to array
Zn=normalize_data(D.T,0); #Zn=Zn[:,9::]
T2ex_Int_matrix[i,:]=np.sum(Zn-1,axis=1)
plt.violinplot(T2ex_Int_matrix,showextrema=True,showmedians=True);
for i in range(5):
n=i+1
plt.figure(99,figsize=(15,15));
plt.subplot(5,1,n); plt.title("Slice_0"+str(n))
plotvio(n)
```
|
{
"source": "JDatPNW/faceTrack",
"score": 3
}
|
#### File: faceTrack/src/clInitializer.py
```python
import os
from .Initializer import Initializer
class clInitializer(Initializer):
def getInput(self):
self.visualize = input('Enable visualization? [1=Yes/0=No]: ')
self.visualize = int(self.visualize)
self.inputfile = input(
'Enter the name of the file(folder) containing the YouTube URLs/images/Video : ')
self.inputfile = "/input/" + self.inputfile
self.inputfile = os.path.dirname(
os.path.abspath(__file__)) + "/.." + self.inputfile
self.experiment = input(
'Enter the name of the directory in which the video folders should be saved in: ')
self.experiment = self.experiment + "/"
# Default would be 0 - Cuts off after lower certanties
self.threshold = input('Enter treshhold: ')
self.sampling = input('Enter sampling: ') # 1 works well
self.logger = input(
'Choose between Command Line Logging[1 - faster] and GUI Logging[0]: ') # 1 works well
self.loader = input(
'Do you want to load a YouTube video[0], a folder of images[1], or a video file [2]?: ') # 1 works well
self.tracker = input(
'Choose between dlib[1 - Recommended] and cv2[0] tracking: ') # 1 works well
self.visualizer = input(
'Choose between the cv2[1 - fatser] and GUI[0] Visualizer: ') # 1 works well
self.archiver = input(
'Do you want to safe the results as a .npy[2], .jpg[1] or as a .csv[0]: ') # 1 works well
if(int(self.visualize < 0)):
self.visualize = 0
elif(int(self.visualize) > 0 and int(self.visualize) < 1):
self.visualize = 1
elif(int(self.visualize) > 1):
self.visualize = 1
if(int(self.threshold) < 0):
self.threshold = 0
if(int(self.sampling) < 0):
self.sampling = 0
if(int(self.loader) < 0):
self.loader = 0
elif(int(self.loader) > 0 and int(self.loader) < 1):
self.loader = 1
elif(int(self.loader) > 1 and int(self.loader) < 2):
self.loader = 1
elif(int(self.loader) > 2):
self.loader = 2
if(int(self.logger) < 0):
self.logger = 0
elif(int(self.logger) > 0 and int(self.logger) < 1):
self.logger = 1
elif(int(self.logger) > 1):
self.logger = 1
if(int(self.tracker) < 0):
self.tracker = 0
elif(int(self.tracker) > 0 and int(self.tracker) < 1):
self.tracker = 1
elif(int(self.tracker) > 1):
self.tracker = 1
if(int(self.visualizer) < 0):
self.visualizer = 0
elif(int(self.visualizer) > 0 and int(self.visualizer) < 1):
self.visualizer = 1
elif(int(self.visualizer) > 1):
self.visualizer = 1
if(int(self.archiver) < 0):
self.archiver = 0
elif(int(self.archiver) > 0 and int(self.archiver) < 1):
self.archiver = 1
elif(int(self.archiver) > 1 and int(self.archiver) < 2):
self.archiver = 2
return (self.visualize,
self.inputfile, self.experiment, self.threshold, self.sampling, self.tracker,
self.logger, self.visualizer, self.loader, self.archiver)
```
#### File: faceTrack/src/csvArchiver.py
```python
import cv2
import re
import PIL
import csv
import os
from .Archiver import Archiver
class csvArchiver(Archiver):
def __init__(self):
self.image_list = []
self.dir = "none"
def saveImg(self, dir, num, scores, crop_img_re, d, i):
if(d[0][0] >= 0 and d[0][1] >= 0 and d[1][0] >= 0 and d[1][1] >= 0):
img = cv2.cvtColor(crop_img_re, cv2.COLOR_BGR2RGB)
img = PIL.Image.fromarray(img)
img = img.convert('RGB')
pixels = list(img.getdata())
pixels = [item for t in pixels for item in t]
self.image_list.append(pixels)
if(self.dir == "none"):
self.dir = dir
def cropAndResize(self, frame, i, d):
if(d[0][0] >= 0 and d[0][1] >= 0 and d[1][0] >= 0 and d[1][1] >= 0):
self.crop_img = frame[d[0][1]:d[1][1], d[0][0]: d[1][0]]
self.crop_img_re = cv2.resize(self.crop_img, (48, 48))
return self.crop_img_re
def closeArchiver(self):
loc = os.path.dirname(os.path.abspath(
__file__)) + "/../" + self.dir + "/out.csv"
with open(loc, 'w') as file:
writer = csv.writer(file)
writer.writerows(self.image_list)
def getCurrentDir(self, line, experiment, loader):
url = line
if(int(loader) == 0):
folder = re.findall("[^\=]*$", url)
folder = folder[0]
dir = "results/" + experiment + folder
elif(int(loader) == 1):
dir = "results/" + experiment + "imageStream"
elif(int(loader) == 2):
dir = "results/" + experiment + "videoStream"
return dir, url
```
#### File: faceTrack/src/guiVisualizer.py
```python
import cv2
import PIL
from PIL import ImageTk
import tkinter
from .Visualizer import Visualizer
class guiVisualizer(Visualizer):
def __init__(self, vis, log):
self.visualize = vis
self.window = log
self.first = True
self.panel = tkinter.Label()
self.basewidth = 400
self.skip = False
def setupWindow(self):
pass
def displayVideo(self, frame):
if(self.visualize == 1):
tempimg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
tempimg = PIL.Image.fromarray(tempimg)
wpercent = (self.basewidth / float(tempimg.size[0]))
hsize = int((float(tempimg.size[1]) * float(wpercent)))
tempimg = tempimg.resize((self.basewidth, hsize))
img = ImageTk.PhotoImage(tempimg)
if self.first:
self.panel = tkinter.Label(self.window.mainLog, image=img)
self.panel.grid(row=3, columnspan=3)
self.first = False
self.bFinish = tkinter.Button(
self.window.mainLog, text="Skip Current Video", command=self.end)
self.bFinish.grid(row=4, columnspan=3)
self.panel.configure(image=img)
self.panel.image = img
self.window.mainLog.update_idletasks()
self.window.mainLog.update()
if self.skip:
self.skip = False
return 1
else:
return 0
def end(self):
self.skip = True
def highlightFaces(self, frame, d):
if(self.visualize == 1):
cv2.rectangle(frame, (d[0][0], d[0][1]),
(d[1][0], d[1][1]), (255, 0, 0), 2)
def closeWindows(self):
pass
```
#### File: faceTrack/src/Logger.py
```python
class Logger:
def __init(self):
pass
def logFaceCoordinates(self, i, d, scores, idx, visualize):
pass
def logNumFaces(self, dets, percentage, vidid, filelength):
pass
def end(self):
pass
```
#### File: faceTrack/src/vidLoader.py
```python
import cv2
from .Loader import Loader
class vidLoader(Loader):
def loadList(self, inputfile):
list = []
list.append(inputfile)
return list
def loadStream(self, url):
cap = cv2.VideoCapture(url)
return cap, url
def getFileLength(self, file):
for i, l in enumerate(file):
pass
return i + 1
```
#### File: faceTrack/src/ytLoader.py
```python
import cv2
import pafy
from .Loader import Loader
class ytLoader(Loader):
def loadList(self, inputfile):
file = open(inputfile, "r")
return file
def loadStream(self, url):
video = pafy.new(url)
best = video.getbest(preftype="mp4")
cap = cv2.VideoCapture()
return cap, best.url
def getFileLength(self, file):
for i, l in enumerate(file):
pass
return i + 1
```
|
{
"source": "jd-au/askap-bulk-cutouts",
"score": 3
}
|
#### File: jd-au/askap-bulk-cutouts/prep_gaskap_abs.py
```python
import argparse
import csv
import datetime
import difflib
import glob
import math
import os
import shutil
import subprocess
import sys
import time
import warnings
from astropy.coordinates import SkyCoord
from astropy.io import ascii, votable
from astropy.table import Table, Column
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
class CommandFailedError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Script to prepare a GASKAP absorption cutout run for an ASKAP scheduing block")
parser.add_argument("-s", "--sbid", help="The id of the ASKAP scheduling block to be processed",
type=str, required=True)
parser.add_argument("-m", "--ms", help="The path to the measurement sets to be used for the cutouts. If omitted the pre-exiting beam details will be used",
type=str, required=False)
parser.add_argument("-c", "--catalogue", help="The path to the Selevy compnent catalogue for the scheduling block",
type=str, required=True)
parser.add_argument("-p", "--ms_pat", help="The pattern to be used to match the folders of the measurement sets top be used.",
type=str, default='*.ms')
parser.add_argument("--output_folder", help="The output folder which will contain the job configuration files",
default=None)
parser.add_argument("--status_folder", help="The status folder which will contain the job completion or failed files",
default='status')
parser.add_argument("--pbs", help="Run the jobs via PBS qsub command", default=False,
action='store_true')
parser.add_argument("-l", "--log_folder", help="The folder which will contain the stdout and stderr files from the jobs",
default='logs')
args = parser.parse_args()
return args
def run_os_cmd(cmd, failOnErr=True):
"""
Run an operating system command ensuring that it finishes successfully.
If the comand fails, the program will exit.
:param cmd: The command to be run
:return: None
"""
print(">", cmd)
sys.stdout.flush()
try:
retcode = subprocess.call(cmd, shell=True)
if retcode != 0:
message = "Command '"+cmd+"' failed with code " + str(retcode)
print(message, file=sys.stderr)
if failOnErr:
raise CommandFailedError(message)
except OSError as e:
message = "Command '" + cmd + "' failed " + e
print(message, file=sys.stderr)
if failOnErr:
raise CommandFailedError(message)
return None
def rename_columns(table):
names = np.asarray(table.colnames)
for name in names:
if name.startswith('col_'):
table.rename_column(name, name[4:])
def slice_strings(a,start,end):
if end is None:
if start > 0:
raise('end must be present when start is positive')
b = a.view((str,1)).reshape(len(a),-1)[:,start:]
return np.frombuffer(b.tostring(),dtype=(str,start*-1))
b = a.view((str,1)).reshape(len(a),-1)[:,start:end]
return np.frombuffer(b.tostring(),dtype=(str,end-start))
def get_beams_near_src(target_loc, beam_locs, beams, max_sep = 0.8*(1*u.deg)):
"""
Find the beams within a certain angular distance of the target location.
Parameters
----------
target_loc: SkyCoord
The sky location of the target.
beam_locs: SkyCoord[]
The locations of each beam.
beams: str[]
Array of beam names
max_sep: dimension
Maximum distance a beam centre can be from the target to be included (optional).
Returns
-------
List of beam names within the requested distance and a list of their distances from the target.
"""
beam_sep = beam_locs.separation(target_loc)
beams_covering_target = beam_sep < max_sep
src_beams = beams[beams_covering_target]
src_beam_sep = beam_sep[beams_covering_target]
return src_beams, src_beam_sep
def find_mismatches(file_list):
mismatches=set()
prev_value = None
for file_path in file_list:
if prev_value != None:
s = difflib.SequenceMatcher(None, prev_value, file_path)
for opcode in s.get_opcodes():
if opcode[0] != 'equal':
mismatches.add((opcode[1], opcode[2]))
prev_value = file_path
#print (sorted(mismatches))
unique_mismatches = []
for i, region in enumerate(sorted(mismatches)):
contained = False
for j, other in enumerate(sorted(mismatches)):
if i != j:
if region[0] >= other[0] and region[1] <= other[1]:
#print (region,"inside", other)
contained = True
if not contained:
unique_mismatches.append(region)
#print (sorted(unique_mismatches))
return unique_mismatches
def build_ms_pattern(ms_loc, ms_pat):
#file_list = glob.glob(ms_loc + '/**/*.ms', recursive=True)
search_path = '{}/*/{}'.format(ms_loc, ms_pat)
print ("Find measurement sets matching", search_path)
file_list = glob.glob(search_path)
#print (file_list)
mismatches=find_mismatches(file_list)
regions = []
prev_beam = None
for i,j in sorted(mismatches):
start = i
end = j
if start==end:
continue
if file_list[0][i:j].isnumeric():
#if not prev_beam is None and prev_beam[1] == i:
# start = prev_beam[0]
# regions.pop()
#elif not prev_beam is None and prev_beam[0] <= i and prev_beam[1] >= j:
# # This region is contained in the previous one - ignore
# continue
print("beam {} to {} is {}".format(start, end, file_list[0][i:j]))
prev_beam = (i,j)
regions.append(("{1}", start, end))
else:
print("interleave {} to {} is {}".format(i, j, file_list[0][i:j]))
regions.append(("{0}", i, j))
regions.reverse()
pattern = str(file_list[0])
for region in regions:
pattern = pattern[0:region[1]]+region[0]+pattern[region[2]:]
return pattern
def record_data_loc(sbid, ms_pat, data_loc_fname):
index = None
data_loc = ascii.read(data_loc_fname, guess=False, delimiter=',')
for idx, row in enumerate(data_loc):
if str(row['sbid']) == str(sbid):
index = idx
data_loc_new = Table(data_loc)
all_pat = data_loc['pattern'].data.tolist()
if index is None:
data_loc_new.add_row([str(sbid), ''])
all_pat.append(ms_pat)
else:
all_pat[index] = ms_pat
data_loc_new['pattern'] = Column(data=all_pat, name='pattern')
shutil.copyfile(data_loc_fname, data_loc_fname+'.bak')
ascii.write(data_loc_new, output=data_loc_fname, format='csv', overwrite=True)
print ("Recorded ms pattern of sbid {} as {}".format(sbid, ms_pat))
def get_target_list(catalogue, flux_peak_min=15):
# Read and filter catalogue
src_votable = votable.parse_single_table(catalogue, pedantic=False)
table = src_votable.to_table()
rename_columns(table)
targets = table[table['flux_peak']>flux_peak_min]
# Filter out non-unique component names
names, counts = np.unique(targets['component_name'], return_counts=True)
duplicates = names[counts > 1]
for comp_name in duplicates:
indexes = targets['component_name'] == comp_name
max_peak_flux = np.max(targets['flux_peak'][indexes])
to_remove = (targets['component_name'] == comp_name) & (targets['flux_peak'] < max_peak_flux)
idx_to_rem = np.arange(len(targets))[to_remove]
print ('Ignoring weaker duplicate sources', targets['component_id'][idx_to_rem].data )
targets.remove_rows(idx_to_rem)
print ("Found {} targets from {} sources".format(len(targets), len(table)))
return targets
def generate_beam_listing(sb_folder, sbid):
run_os_cmd('./list_beams.sh {}'.format(sbid))
beam_listing = '{0}/beam_listing_SB{1}.csv'.format(sb_folder, sbid)
return beam_listing
def find_beam_locs(beam_listing):
# Build the list of beam locations
beams = ascii.read(beam_listing, format='no_header', guess=False, delimiter=',')
names = ('col1','col2', 'col3', 'col4')
new_names = ('filename','field', 'ra_rad', 'dec_rad')
beams.rename_columns(names, new_names)
mismatches=find_mismatches(beams['filename'])
for region in mismatches:
if region[1]-region[0] ==1:
fn_il_loc = region
else:
fn_beam_loc = region
beam_id = slice_strings(beams['filename'], fn_beam_loc[0], fn_beam_loc[1])
#print (beam_id)
#interleave_id = slice_strings(beams['interleave'], -1, None)# beams['interleave'][:][-1:]
mismatches=find_mismatches(beams['field'])
il_loc = list(mismatches)[0]
interleave_id = slice_strings(beams['field'], il_loc[0], il_loc[1])# beams['interleave'][:][-1:]
file_interleave = slice_strings(beams['filename'], fn_il_loc[0], fn_il_loc[1])
ids = np.stack((beam_id, interleave_id), axis=-1)
unique_ids, unique_idx = np.unique(ids, axis=0, return_index=True)
beams['beam_id'] = beam_id
beams['interleave'] = interleave_id
beams['file_interleave'] = file_interleave
unique_beams = beams[beams['interleave'] == beams['file_interleave']]
u_beam_locs = SkyCoord(ra=unique_beams['ra_rad']*u.rad, dec=unique_beams['dec_rad']*u.rad, frame='icrs')
return unique_beams, u_beam_locs
def plot_beams_and_targets(sb_folder, targets, beams, beam_locs):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
im = ax.scatter(targets['ra_deg_cont'], targets['dec_deg_cont'], c=targets['flux_peak'], cmap='RdBu_r', vmin=15, vmax=100)
for il in 'ABC':
filt = beams['interleave'] == il
ax.scatter(beam_locs.ra.value[filt], beam_locs.dec.value[filt], marker='+')
# Plot footprints for one interleave
if il == 'A':
for loc in beam_locs[filt]:
c=Ellipse((loc.ra.value, loc.dec.value), width=0.8*2/math.cos(loc.dec.rad), height=0.8, fill=False, ls=':', zorder=-1)
ax.add_artist(c)
cb = fig.colorbar(im)
cb.set_label('Peak flux (mJy/beam)')
ax.grid()
plt.gca().invert_xaxis()
plt.xlabel('Right Ascension')
plt.ylabel('Declination')
plt.savefig(sb_folder+'/sources_beams.png', bbox_inches='tight')
def get_image_params_table(dest_folder, sbid, targets, beams, beam_locs, max_sep_close=0.55*u.deg, max_sep_far=0.8*u.deg):
comp_names = []
comp_ra = []
comp_dec = []
included_beam_nums = []
included_beam_interleaves = []
included_beam_ids = []
included_beam_sep = []
for tgt in targets:
target_loc = SkyCoord(ra=tgt['ra_deg_cont']*u.degree, dec=tgt['dec_deg_cont']*u.degree, frame='icrs')
src_beams, src_beam_sep = get_beams_near_src(target_loc, beam_locs, beams, max_sep=max_sep_close)
if len(src_beams) == 0:
#print ("No beams close to {}, checking out to {}".format(tgt['component_name'], max_sep_far))
src_beams, src_beam_sep = get_beams_near_src(target_loc, beam_locs, beams, max_sep=max_sep_far)
for i in range(len(src_beams)):
comp_names.append(tgt['component_name'])
comp_ra.append(tgt['ra_deg_cont'])
comp_dec.append(tgt['dec_deg_cont'])
included_beam_nums.append(src_beams['beam_id'].data[i])
included_beam_interleaves.append(src_beams['interleave'].data[i])
included_beam_ids.append(src_beams['beam_id'].data[i]+src_beams['interleave'].data[i])
included_beam_sep.append(src_beam_sep.to(u.deg).value[i])
image_params = Table()
image_params['component_name'] = comp_names
image_params['comp_ra'] = comp_ra
image_params['comp_dec'] = comp_dec
image_params['beam_nums'] = included_beam_nums
image_params['beam_interleaves'] = included_beam_interleaves
image_params['beam_ids'] = included_beam_ids
image_params['beam_sep'] = included_beam_sep
image_params_vot = votable.from_table(image_params)
filename = "{}/sb{}_srcs_image_params.vot".format(dest_folder, sbid)
votable.writeto(image_params_vot, filename)
print ("Produced VO table file {} with {} target beam combos.".format(filename, len(image_params)))
return image_params
def report_beam_usage(image_params):
ar = np.array(image_params['beam_ids'])
for i in range(36):
for interleave in ('A', 'B', 'C'):
key = '{:02d}{}'.format(i, interleave)
count = len(ar[ar==key])
if count == 0:
print ("Warning: Beam {} is unused!".format(key))
#print ("Total instances of beam usage {}".format(len(ar)))
print ('Mean targets per beam {:.2f}'.format(len(ar)/(36*3)))
mean_beams_per_source = len(ar) / len(np.unique(image_params['component_name']))
print ('Mean beams per target {:.2f}'.format(mean_beams_per_source))
def create_targets_csv(dest_folder, sbid, targets, image_params):
# Create the csv file of targets and included beams
csv_filename = '{}/targets_{}.csv'.format(dest_folder, sbid)
with open(csv_filename, 'w') as csvfile:
tgtwriter = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
tgtwriter.writerow(['index', 'component_name', 'ra', 'dec', 'beams'])
i = 1
for tgt in targets:
comp_name = tgt['component_name']
row = []
row.append(str(i))
row.append(comp_name)
row.append(tgt['ra_deg_cont'])
row.append(tgt['dec_deg_cont'])
for tgt_beam in image_params:
if comp_name == tgt_beam['component_name']:
row.append(tgt_beam['beam_ids'])
tgtwriter.writerow(row)
i+= 1
mean_beams_per_source = len(image_params) / len(np.unique(image_params['component_name']))
print ('Produced csv file {} with {} targets (mean {:.2f} beams/target).'.format(csv_filename, i-1, mean_beams_per_source))
def main():
# Parse command line options
args = parseargs()
start = time.time()
print("#### Started preparing GASKAP absorption cutout run for sbid {} at {} ####".format
(args.sbid, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
# Check locations
if args.ms and not os.path.exists(args.ms):
print("Could not find measurement sets at", args.ms)
return 1
if not os.path.exists(args.catalogue):
print("Could not find component catalogue at", args.catalogue)
return 1
sb_folder = 'sb{}'.format(args.sbid)
if not os.path.exists(sb_folder):
os.makedirs(sb_folder)
beam_listing = '{0}/beam_listing_SB{1}.csv'.format(sb_folder, args.sbid)
if not args.ms:
if not os.path.exists(beam_listing):
print("No measurement sets supplied and beam listing {} does not existm", beam_listing)
return 1
print ('Using pre-existing beam information at', beam_listing)
# Setup the data_loc file
warnings.simplefilter('ignore', category=AstropyWarning)
if args.ms:
ms_patterns = build_ms_pattern(args.ms, args.ms_pat)
record_data_loc(args.sbid, ms_patterns, 'data_loc.csv')
# Build the beam listing
if args.ms:
beam_listing = generate_beam_listing(sb_folder, args.sbid)
beams, beam_locs = find_beam_locs(beam_listing)
# Build the target list
targets = get_target_list(args.catalogue)
plot_beams_and_targets(sb_folder, targets, beams, beam_locs)
num_targets = len(targets)
image_params = get_image_params_table(sb_folder, args.sbid, targets, beams, beam_locs)
report_beam_usage(image_params)
create_targets_csv(sb_folder, args.sbid, targets, image_params)
# Report
end = time.time()
print('#### Processing completed at {} ####'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end))))
print('Processed {0} targets in {1:.0f} sec'.format(
num_targets, (end - start)))
return 0
if __name__ == '__main__':
exit(main())
```
|
{
"source": "jd-au/GASKAP-HI-Absorption-Pipeline",
"score": 3
}
|
#### File: jd-au/GASKAP-HI-Absorption-Pipeline/extract_emission.py
```python
import argparse
import csv
import os
import time
import warnings
import astropy.units as u
from astropy.io import fits, votable
from astropy.io.votable.tree import Param,Info
from astropy.io.votable import from_table, parse_single_table, writeto
from astropy.table import Table, Column
from astropy.utils.exceptions import AstropyWarning
import numpy as np
from spectral_cube import SpectralCube
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Produce emission spectra for a set of sources")
parser.add_argument("-s", "--sbid", help="The id of the ASKAP scheduling block to be processed",
type=int, required=True)
parser.add_argument("-c", "--catalogue", help="The catalgoue of source positions and characteristics",
required=True)
parser.add_argument("-e", "--emission", help="The HI emission cube to source emission data around each source.",
required=True)
parser.add_argument("-p", "--parent", help="The parent folder for the processing, will default to sbnnn/ where nnn is the sbid.",
required=False)
args = parser.parse_args()
return args
def read_targets(targets_file):
ids=[]
comp_names=[]
ras=[]
decs=[]
beams=[]
i=1
with open(targets_file, 'r') as csvfile:
tgt_reader = csv.reader(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in tgt_reader:
if (tgt_reader.line_num == 1):
# skip header
continue
#print (row)
ids.append(i)
comp_names.append(row[1])
ras.append(float(row[2]))
decs.append(float(row[3]))
beams.append(row[4:])
i+=1
table = Table()
table.add_column(Column(name='id', data=ids))
table.add_column(Column(name='comp_name', data=comp_names))
table.add_column(Column(name='ra', data=ras))
table.add_column(Column(name='dec', data=decs))
table.add_column(Column(name='beams', data=beams))
return table
def rename_columns(table):
names = np.asarray(table.colnames)
for name in names:
if name.startswith('col_'):
table.rename_column(name, name[4:])
def prep_folders(folders):
for folder in folders:
if not os.path.exists(folder):
os.mkdir(folder)
print ("Created " + folder)
def output_emission_spectrum(source, comp_name, velocity, em_mean, em_std, filename):
title = 'Emission for source #{} {}'.format(source['id'], comp_name)
em_out_tab = Table(meta={'name': title})
em_out_tab.add_column(Column(name='velocity', data=velocity, unit='m/s', description='LSRK velocity'))
em_out_tab.add_column(Column(name='em_mean', data=em_mean, unit='K', description='Mean brightness temperature'))
em_out_tab.add_column(Column(name='em_std', data=em_std, unit='K', description='Noise level in the brightness temperature'))
votable = from_table(em_out_tab)
votable.params.append(Param(votable, id='id', value=source['id'], datatype='int'))
votable.params.append(Param(votable, id='comp_name', value=comp_name, datatype='char', arraysize='*'))
votable.params.append(Param(votable, id='ra', value=source['ra'], unit='deg', datatype='double'))
votable.params.append(Param(votable, id='dec', value=source['dec'], unit='deg', datatype='double'))
writeto(votable, filename)
def extract_channel_slab(filename, chan_start, chan_end):
cube = SpectralCube.read(filename)
vel_cube = cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
slab = vel_cube[chan_start:chan_end,:, :].with_spectral_unit(u.km/u.s)
return slab
def extract_emission_around_source(slab, pos, radius_outer, radius_inner):
xp = np.int(pos[0])
yp = np.int(pos[1])
# Only pull spectra whose positions fall on the footprint of the cube (this should be all, right?)
if (xp < radius_outer) or (xp > slab.shape[2]-radius_outer) or (yp < radius_outer) or (yp > slab.shape[1]-radius_outer):
print ("Skipping")
empty_result = np.zeros(slab.shape[0])
return empty_result, empty_result
# Define pixel coordinates of a grid surrounding each target
center = (xp, yp)
imin = center[0] - radius_outer
imax = center[0] + radius_outer + 1
jmin = center[1] - radius_outer
jmax = center[1] + radius_outer + 1
# loop through and pile in all spectra which fall in the annulus, based on their distance
# from the central pixel
print (imin, imax, jmin, jmax)
sub_specx = []
for k in np.arange(imin, imax):
for j in np.arange(jmin, jmax):
kj = np.array([k, j])
dist = np.linalg.norm(kj - np.array(center))
if dist > radius_inner and dist <= radius_outer:
spec = slab[:, kj[1], kj[0]]
sub_specx = sub_specx + [spec]
#print (sub_specx)
# Compute the mean over all spectra
tb_mean = np.nanmean(sub_specx, axis=0)
# Estimate the uncertainty per channel via the standard deviation over all spectra
tb_std = np.nanstd(sub_specx, axis=0)
#print ('mean=',tb_mean)
return tb_mean, tb_std
def extract_emission_around_source_by_plane(slab, pos, radius_outer, radius_inner):
xp = np.int(pos[0])
yp = np.int(pos[1])
# Only pull spectra whose positions fall on the footprint of the cube (this should be all, right?)
if (xp < radius_outer) or (xp > slab.shape[2]-radius_outer) or (yp < radius_outer) or (yp > slab.shape[1]-radius_outer):
print ("Skipping")
empty_result = np.zeros(slab.shape[0])
return empty_result, empty_result
# Define pixel coordinates of a grid surrounding each target
center = (xp, yp)
imin = center[0] - radius_outer
imax = center[0] + radius_outer + 1
jmin = center[1] - radius_outer
jmax = center[1] + radius_outer + 1
# loop through and pile in all spectra which fall in the annulus, based on their distance
# from the central pixel
print (imin, imax, jmin, jmax)
sub_specx = []
num_channels = slab.shape[0]
cutout_center = (radius_outer, radius_outer)
for plane in range(num_channels):
cutout = slab[plane, jmin:jmax, imin:imax]
print (cutout)
idx = 0
for k in np.arange(imin, imax):
for j in np.arange(jmin, jmax):
kj = np.array([k-imin, j-jmin])
dist = np.linalg.norm(kj - np.array(cutout_center))
if dist > radius_inner and dist <= radius_outer:
value = slab[plane, kj[1], kj[0]]
if plane == 0:
spec = np.zeros((num_channels))
sub_specx = sub_specx + [spec]
spec = sub_specx[idx]
spec[plane] = value
idx += 1
#print (sub_specx)
# Compute the mean over all spectra
tb_mean = np.nanmean(sub_specx, axis=0)
# Estimate the uncertainty per channel via the standard deviation over all spectra
tb_std = np.nanstd(sub_specx, axis=0)
#print ('mean=',tb_mean)
return tb_mean, tb_std
def extract_emission_spectra(cube, spectra_table, slab_size=160):
# Read the cube
spec_cube = SpectralCube.read(cube, mode='readonly', memmap=False)
vel_cube = spec_cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
wcs = vel_cube.wcs.celestial
spec_len =vel_cube.shape[0]
header = fits.getheader(cube)
velocities = vel_cube.spectral_axis
# Identify the target pixels for each spectrum
pixcoord = wcs.wcs_world2pix(spectra_table['ra'],spectra_table['dec'], 0)
radius_outer = 8
radius_inner = 4
# Extract the spectra
start = time.time()
print(" ## Started emission spectra extract at {} ##".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))))
prev = start
tb_mean_all = []
tb_std_all = []
for s in spectra_table:
tb_mean_all.append(np.zeros(spec_len))
tb_std_all.append(np.zeros(spec_len))
# Extract using slabs
unit = None
prev = time.time()
for i in range(0,spec_len,slab_size):
max_idx = min(i+slab_size, spec_len)
#slab = extract_channel_slab(cube, i, max_idx)
slab = vel_cube[i:max_idx,:, :].with_spectral_unit(u.km/u.s)
print (slab)
unit = slab.unit
post_slab = time.time()
for j in range(len(pixcoord[0])):
pos = [pixcoord[0][j],pixcoord[1][j]]
tb_mean, tb_std = extract_emission_around_source(slab, pos, radius_outer, radius_inner)
tb_mean_all[j][i:max_idx] = tb_mean
tb_std_all[j][i:max_idx] = tb_std
checkpoint = time.time()
print ("Reading slab of channels {} to {}, took {:.2f} s".format(i, max_idx-1, post_slab-prev))
print ("Scanning slab of channels {} to {} for {} sources, took {:.2f} s".format(i, max_idx-1, len(pixcoord[0]), checkpoint-post_slab))
prev = checkpoint
end = time.time()
print(" ## Finished emission spectra extract at {}, took {:.2f} s ##".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)), end-start))
return tb_mean_all, tb_std_all, velocities.value
def output_emission_spectra(spectra_table, tb_mean_all, tb_std_all, velocities, spectra_folder):
print (velocities)
for idx, source in enumerate(spectra_table):
tb_mean = tb_mean_all[idx]
tb_std = tb_std_all[idx]
comp_name = source['comp_name']
if np.sum(tb_mean) == 0:
print ("Source {} has all no emission data".format(comp_name))
filename = '{}/{}_emission.vot'.format(spectra_folder, comp_name)
output_emission_spectrum(source, comp_name, velocities, tb_mean, tb_std, filename)
def main():
warnings.simplefilter('ignore', category=AstropyWarning)
args = parseargs()
start = time.time()
print("#### Started ASKAP emission spectra extraction for sbid {} at {} ####".format(args.sbid,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
parent_folder = 'sb{}/'.format(args.sbid)
if args.parent:
parent_folder = args.parent + '/'
if not os.path.exists(parent_folder):
print("Error: Folder {} does not exist.".format(parent_folder))
return 1
if not os.path.exists(args.emission):
print("Error: File {} does not exist.".format(args.emission))
return 1
figures_folder = parent_folder + 'figures/'
spectra_folder = parent_folder + 'spectra/'
prep_folders([spectra_folder])
targets = read_targets('targets_{}.csv'.format(args.sbid))
# Read and filter catalogue
src_votable = votable.parse(args.catalogue, pedantic=False)
selavy_table = src_votable.get_first_table().to_table()
rename_columns(selavy_table)
# Extract emission spectra
tb_mean_all, tb_std_all, velocities = extract_emission_spectra(args.emission, targets)
output_emission_spectra(targets, tb_mean_all, tb_std_all, velocities, spectra_folder)
# Report
end = time.time()
print('#### Processing completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Extracted %d spectra in %.02f s' %
(len(targets), end - start))
return 0
if __name__ == '__main__':
exit(main())
```
|
{
"source": "jd-au/gaskap-validation",
"score": 2
}
|
#### File: jd-au/gaskap-validation/gaskap-hi-validation.py
```python
from __future__ import print_function, division
import argparse
import csv
import datetime
import glob
import math
import os
import re
from string import Template
import shutil
import time
import warnings
import matplotlib
matplotlib.use('agg')
import aplpy
from astropy.constants import k_B
from astropy.coordinates import SkyCoord
from astropy.io import ascii, fits
from astropy.io.votable import parse, from_table, writeto
from astropy.io.votable.tree import Info
from astropy.table import Table, Column
import astropy.units as u
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import numpy as np
from radio_beam import Beam
from spectral_cube import SpectralCube
from statsmodels.tsa import stattools
from statsmodels.graphics.tsaplots import plot_pacf
import seaborn as sns
from validation import Bandpass, Diagnostics, SelfCal, Spectra
from validation_reporter import ValidationReport, ReportSection, ReportItem, ValidationMetric, output_html_report, output_metrics_xml
vel_steps = [-324, -280, -234, -189, -143, -100, -60, -15, 30, 73, 119, 165, 200, 236, 273, 311, 357, 399]
#emission_vel_range=[] # (165,200)*u.km/u.s
emission_vel_range=(119,165)*u.km/u.s
non_emission_val_range=(-100,-60)*u.km/u.s
figures_folder = 'figures'
METRIC_BAD = 3
METRIC_UNCERTAIN = 2
METRIC_GOOD = 1
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Produce a validation report for GASKAP HI observations. Either a cube or an image (or both) must be supplied to be validated.")
parser.add_argument("-c", "--cube", required=False, help="The HI spectral line cube to be checked.")
parser.add_argument("-i", "--image", required=False, help="The continuum image to be checked.")
parser.add_argument("-s", "--source_cat", required=False, help="The selavy source catalogue used for source identification.")
parser.add_argument("-b", "--beam_list", required=False, help="The csv file describing the positions of each beam (in radians).")
parser.add_argument("-d", "--duration", required=False, help="The duration of the observation in hours.", type=float, default=12.0)
parser.add_argument("-o", "--output", help="The folder in which to save the validation report and associated figures.", default='report')
parser.add_argument("-e", "--emvel", required=False, help="The low velocity bound of the velocity region where emission is expected.")
parser.add_argument("-n", "--nonemvel", required=False,
help="The low velocity bound of the velocity region where emission is not expected.", default='-100')
parser.add_argument("-N", "--noise", required=False, help="Use this fits image of the local rms. Default is to run BANE", default=None)
parser.add_argument("-r", "--redo", help="Rerun all steps, even if intermediate files are present.", default=False,
action='store_true')
parser.add_argument("--num_spectra", required=False, help="Number of sample spectra to create", type=int, default=15)
args = parser.parse_args()
return args
def get_str(value):
if isinstance(value, bytes):
return value.decode()
return value
def plot_histogram(file_prefix, xlabel, title):
data = fits.getdata(file_prefix+'.fits')
flat = data.flatten()
flat = flat[~np.isnan(flat)]
v =plt.hist(flat, bins=200, bottom=1, log=True, histtype='step')
plt.grid()
plt.xlabel(xlabel)
plt.ylabel('Count')
plt.title(title)
plt.savefig(file_prefix+'_hist.png', bbox_inches='tight')
plt.savefig(file_prefix+'_hist_sml.png', dpi=16, bbox_inches='tight')
plt.close()
def plot_map(file_prefix, title, cmap='magma', stretch='linear', pmax=99.75, colorbar_label=None):
fig = plt.figure(figsize=(5, 4.5))
gc = aplpy.FITSFigure(file_prefix+'.fits', figure=fig)
gc.show_colorscale(cmap=cmap, stretch=stretch, pmax=pmax)
gc.add_colorbar()
if colorbar_label:
gc.colorbar.set_axis_label_text(colorbar_label)
gc.add_grid()
gc.set_title(title)
gc.savefig(filename=file_prefix+'.png', dpi=200)
gc.savefig(filename=file_prefix+'.pdf', dpi=100)
gc.savefig(filename=file_prefix+'_sml.png', dpi=16 )
gc.close()
def plot_difference_map(hdu, file_prefix, title, vmin=None, vmax=None):
# Initiate a figure and axis object with WCS projection information
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111, projection=wcs)
no_nan_data = np.nan_to_num(hdu.data)
if vmin is None and vmax is None:
vmin=np.percentile(no_nan_data, 0.25)
vmax=np.percentile(no_nan_data, 99.75)
im = ax.imshow(hdu.data, cmap='RdBu_r',vmin=vmin,vmax=vmax, origin='lower')
#ax.invert_yaxis()
ax.set_xlabel("Right Ascension (degrees)", fontsize=16)
ax.set_ylabel("Declination (degrees)", fontsize=16)
ax.set_title(title, fontsize=16)
ax.grid(color = 'gray', ls = 'dotted', lw = 2)
cbar = plt.colorbar(im, pad=.07)
plt.savefig(file_prefix+'.png', bbox_inches='tight')
plt.savefig(file_prefix+'_sml.png', dpi=10, bbox_inches='tight')
plt.close()
def output_plot(mp, title, imagename):
mp.write('\n<h2>{}</h2>\n<br/>'.format(title))
mp.write('\n<a href="{}"><img width="800px" src="{}"></a>'.format(imagename, imagename))
mp.write('\n<br/>\n')
def output_map_page(filename, file_prefix, title):
with open(filename, 'w') as mp:
mp.write('<html>\n<head><title>{}</title>\n</head>'.format(title))
mp.write('\n<body>\n<h1>{}</h1>'.format(title))
output_plot(mp, 'Large Scale Emission Map', file_prefix + '_bkg.png')
output_plot(mp, 'Noise Map', file_prefix + '_rms.png')
output_plot(mp, 'Moment 0 Map', file_prefix + '.png')
mp.write('\n</body>\n</html>\n')
def convert_slab_to_jy(slab, header):
my_beam = Beam.from_fits_header(header)
restfreq = 1.420405752E+09*u.Hz
if 'RESTFREQ' in header.keys():
restfreq = header['RESTFREQ']*u.Hz
elif 'RESTFRQ' in header.keys():
restfreq = header['RESTFRQ']*u.Hz
if slab.unmasked_data[0,0,0].unit != u.Jy:
print ("Converting slab from {} to Jy".format(slab.unmasked_data[0,0,0].unit) )
print (slab)
slab.allow_huge_operations=True
slab = slab.to(u.Jy, equivalencies=u.brightness_temperature(my_beam, restfreq))
print (slab)
return slab
def convert_data_to_jy(data, header, verbose=False):
my_beam = Beam.from_fits_header(header)
restfreq = 1.420405752E+09*u.Hz
if 'RESTFREQ' in header.keys():
restfreq = header['RESTFREQ']*u.Hz
elif 'RESTFRQ' in header.keys():
restfreq = header['RESTFRQ']*u.Hz
if data[0].unit != u.Jy:
if verbose:
print ("Converting data from {} to Jy".format(data[0].unit) )
data = data.to(u.Jy, equivalencies=u.brightness_temperature(my_beam, restfreq))
return data
def get_vel_limit(vel_cube):
velocities = np.sort(vel_cube.spectral_axis)
return velocities[0], velocities[-1]
def extract_slab(filename, vel_start, vel_end):
cube = SpectralCube.read(filename)
vel_cube = cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
cube_vel_min, cube_vel_max = get_vel_limit(vel_cube)
if vel_start > cube_vel_max or vel_end < cube_vel_min:
return None
slab = vel_cube.spectral_slab(vel_start, vel_end)
header = fits.getheader(filename)
slab = convert_slab_to_jy(slab, header)
return slab
def extract_channel_slab(filename, chan_start, chan_end):
cube = SpectralCube.read(filename)
vel_cube = cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
slab = vel_cube[chan_start:chan_end,:, :].with_spectral_unit(u.km/u.s)
header = fits.getheader(filename)
return slab
def build_fname(example_name, suffix):
basename = os.path.basename(example_name)
prefix = os.path.splitext(basename)[0]
fname = prefix + suffix
return fname
def get_figures_folder(dest_folder):
return dest_folder + '/' + figures_folder + '/'
def get_bane_background(infile, outfile_prefix, plot_title_suffix, ncores=8, redo=False, plot=True):
background_prefix = outfile_prefix+'_bkg'
background_file = background_prefix + '.fits'
if redo or not os.path.exists(background_file):
cmd = "BANE --cores={0} --out={1} {2}".format(ncores, outfile_prefix, infile)
print (cmd)
os.system(cmd)
if plot:
plot_map(background_prefix, "Large scale emission in " + plot_title_suffix)
plot_histogram(background_prefix, 'Emission (Jy beam^{-1} km s^{-1})', "Emission for " + plot_title_suffix)
plot_map(outfile_prefix+'_rms', "Noise in "+ plot_title_suffix)
return background_file
def assess_metric(metric, threshold1, threshold2, low_good=False):
if metric < threshold1:
return METRIC_GOOD if low_good else METRIC_BAD
elif metric < threshold2:
return METRIC_UNCERTAIN
else:
return METRIC_BAD if low_good else METRIC_GOOD
def get_spectral_units(ctype, cunit, hdr):
spectral_conversion = 1
if not cunit in hdr:
if ctype.startswith('VEL') or ctype.startswith('VRAD'):
spectral_unit = 'm/s'
else:
spectral_unit = 'Hz'
else:
spectral_unit = hdr[cunit]
if spectral_unit == 'Hz':
spectral_conversion = 1e6
spectral_unit = 'MHz'
elif spectral_unit == 'kHz':
spectral_conversion = 1e3
spectral_unit = 'MHz'
elif spectral_unit == 'm/s':
spectral_conversion = 1e3
spectral_unit = 'km/s'
return spectral_unit, spectral_conversion
def calc_velocity_res(hdr):
spec_sys = hdr['SPECSYS']
axis = '3' if hdr['CTYPE3'] != 'STOKES' else '4'
spec_type = hdr['CTYPE'+axis]
spectral_unit, spectral_conversion = get_spectral_units(spec_type, 'CUNIT'+axis, hdr)
if 'CUNIT'+axis in hdr.keys():
spec_unit = hdr['CUNIT'+axis]
#elif spec_type == 'VRAD' or spec_type == 'VEL':
# spec_unit = 'm/s'
else:
spec_unit = None
spec_delt = hdr['CDELT'+axis]
print ('CDELT={}, CUNIT={}, spec_unit={}, conversion={}'.format(spec_delt, spec_unit, spectral_unit, spectral_conversion))
spec_res_km_s = np.abs(spec_delt) / spectral_conversion
if spectral_unit == 'MHz':
spec_res_km_s = spec_res_km_s/5e-4*0.1 # 0.5 kHz = 0.1 km/s
#elif spec_unit == 'Hz':
# spec_res_km_s = spec_res_km_s/500*0.1 # 0.5 kHz = 0.1 km/s
#elif spec_unit == 'kHz':
# spec_res_km_s = spec_res_km_s/0.5*0.1 # 0.5 kHz = 0.1 km/s
return spec_res_km_s
def report_observation(image, reporter, input_duration, sched_info, obs_metadata):
print('\nReporting observation based on ' + image)
hdr = fits.getheader(image)
w = WCS(hdr).celestial
sbid = hdr['SBID'] if 'SBID' in hdr else sched_info.sbid
project = hdr['PROJECT'] if 'PROJECT' in hdr else ''
proj_link = None
if project.startswith('AS'):
proj_link = "https://confluence.csiro.au/display/askapsst/{0}+Data".format(project)
date = hdr['DATE-OBS']
duration = float(hdr['DURATION'])/3600 if 'DURATION' in hdr else input_duration
naxis1 = int(hdr['NAXIS1'])
naxis2 = int(hdr['NAXIS2'])
pixcrd = np.array([[naxis1/2, naxis2/2]])
centre = w.all_pix2world(pixcrd,1)
centre = SkyCoord(ra=centre[0][0], dec=centre[0][1], unit="deg,deg").to_string(style='hmsdms',sep=':')
# spectral axis
spectral_unit = 'None'
spectral_range = ''
for i in range(3,int(hdr['NAXIS'])+1):
ctype = hdr['CTYPE'+str(i)]
if (ctype.startswith('VEL') or ctype.startswith('VRAD') or ctype.startswith('FREQ')):
key = 'CUNIT'+str(i)
spectral_unit, spectral_conversion = get_spectral_units(ctype, key, hdr)
step = float(hdr['CDELT'+str(i)])
#print ('step {} rval {} rpix {} naxis {}'.format(step, hdr['CRVAL'+str(i)], hdr['CRPIX'+str(i)], hdr['NAXIS'+str(i)]))
spec_start = (float(hdr['CRVAL'+str(i)]) - (step*(float(hdr['CRPIX'+str(i)])-1)))/spectral_conversion
if int(hdr['NAXIS'+str(i)]) > 1:
spec_end = spec_start + (step * (int(hdr['NAXIS'+str(i)]-1)))/spectral_conversion
if step > 0:
spectral_range = '{:0.3f} - {:0.3f}'.format(spec_start, spec_end)
else:
spectral_range = '{:0.3f} - {:0.3f}'.format(spec_end, spec_start)
spec_title = 'Spectral Range'
else:
centre_freq = (float(hdr['CRVAL'+str(i)]) - (step*(float(hdr['CRPIX'+str(i)])-1)))/spectral_conversion
spectral_range = '{:0.3f}'.format(centre_freq)
spec_title = 'Centre Freq'
# Field info
if obs_metadata:
field_names = ''
field_centres = ''
for i,field in enumerate(obs_metadata.fields):
if i > 0:
field_names += '<br/>'
field_centres += '<br/>'
field_names += field.name
field_centres += field.ra + ' ' + field.dec
else:
field_names = sched_info.field_name
field_centres = centre
footprint = sched_info.footprint
if footprint and sched_info.pitch:
footprint = "{}_{}".format(footprint, sched_info.pitch)
section = ReportSection('Observation')
section.add_item('SBID', value=sbid)
section.add_item('Project', value=project, link=proj_link)
section.add_item('Date', value=date)
section.add_item('Duration<br/>(hours)', value='{:.2f}'.format(duration))
section.add_item('Field(s)', value=field_names)
section.add_item('Field Centre(s)', value=field_centres)
section.add_item('Correlator<br/>Mode', value=sched_info.corr_mode)
section.add_item('Footprint', value=footprint)
section.add_item('{}<br/>({})'.format(spec_title, spectral_unit), value=spectral_range)
reporter.add_section(section)
reporter.project = project
return sbid
def report_cube_stats(cube, reporter):
print ('\nReporting cube stats')
hdr = fits.getheader(cube)
w = WCS(hdr).celestial
# Cube information
askapSoftVer = 'N/A'
askapPipelineVer = 'N/A'
history = hdr['history']
askapSoftVerPrefix = 'Produced with ASKAPsoft version '
askapPipelinePrefix = 'Processed with ASKAP pipeline version '
for row in history:
if row.startswith(askapSoftVerPrefix):
askapSoftVer = row[len(askapSoftVerPrefix):]
elif row.startswith(askapPipelinePrefix):
askapPipelineVer = row[len(askapPipelinePrefix):]
beam = 'N/A'
if 'BMAJ' in hdr:
beam_maj = hdr['BMAJ'] * 60 * 60
beam_min = hdr['BMIN'] * 60 * 60
beam = '{:.1f} x {:.1f}'.format(beam_maj, beam_min)
dims = []
for i in range(1,int(hdr['NAXIS'])+1):
dims.append(str(hdr['NAXIS'+str(i)]))
dimensions = ' x '.join(dims)
# self.area,self.solid_ang = get_pixel_area(fits, nans=True, ra_axis=self.ra_axis, dec_axis=self.dec_axis, w=w)
cube_name = os.path.basename(cube)
section = ReportSection('Image Cube', cube_name)
section.add_item('ASKAPsoft<br/>version', value=askapSoftVer)
section.add_item('Pipeline<br/>version', value=askapPipelineVer)
section.add_item('Synthesised Beam<br/>(arcsec)', value=beam)
section.add_item('Sky Area<br/>(deg2)', value='')
section.add_item('Dimensions', value=dimensions)
reporter.add_section(section)
return
def check_for_emission(cube, vel_start, vel_end, reporter, dest_folder, ncores=8, redo=False):
print ('\nChecking for presence of emission in {:.0f} < v < {:.0f}'.format(vel_start, vel_end))
# Extract a moment 0 map
slab = extract_slab(cube, vel_start, vel_end)
if slab is None:
print ("** No data for the emission range - skipping check **")
return
num_channels = slab.shape[0]
hdr = fits.getheader(cube)
spec_res_km_s = calc_velocity_res(hdr)
mom0 = slab.moment0()
prefix = build_fname(cube, '_mom0')
folder = get_figures_folder(dest_folder)
mom0_fname = folder + prefix + '.fits'
mom0.write(mom0_fname, overwrite=True)
hi_data = fits.open(mom0_fname)
plot_title_suffix = "emission region in " + os.path.basename(cube)
plot_difference_map(hi_data[0], folder+prefix, "Moment 0 map of " + plot_title_suffix)
# Produce the background plots
bkg_data = get_bane_background(mom0_fname, folder+prefix, plot_title_suffix, ncores=ncores, redo=redo)
map_page = folder + '/emission.html'
rel_map_page = get_figures_folder('.') + '/emission.html'
output_map_page(map_page, prefix, 'Emission Plots for ' + os.path.basename(cube))
hi_data = fits.open(folder + prefix+'_bkg.fits')
max_em = np.nanmax(hi_data[0].data)
max_em_per_kms = max_em / (spec_res_km_s * num_channels)
# assess
cube_name = os.path.basename(cube)
section = ReportSection('Presence of Emission', cube_name)
section.add_item('Velocity Range<br/>(km/s LSR)', value='{:.0f} to {:.0f}'.format(vel_start.value, vel_end.value))
section.add_item('Channels', value='{}'.format(num_channels))
section.add_item('Large Scale<br/>Emission Map', link=rel_map_page, image='figures/'+prefix+'_bkg_sml.png')
section.add_item('Emission Histogram', link='figures/'+prefix+'_bkg_hist.png', image='figures/'+prefix+'_bkg_hist_sml.png')
section.add_item('Max Emission<br/>(Jy beam<sup>-1</sup>)', value='{:.3f}'.format(max_em_per_kms))
reporter.add_section(section)
metric = ValidationMetric('Presence of Emission',
'Maximum large scale emission intensity in the velocity range where emission is expected.',
int(max_em_per_kms), assess_metric(max_em_per_kms, 12, 20))
reporter.add_metric(metric)
return
def check_for_non_emission(cube, vel_start, vel_end, reporter, dest_folder, ncores=8, redo=False):
print ('\nChecking for absence of emission in {:.0f} < v < {:.0f}'.format(vel_start, vel_end))
# Extract a moment 0 map
slab = extract_slab(cube, vel_start, vel_end)
if slab is None:
print ("** No data for the non-emission range - skipping check **")
return None
num_channels = slab.shape[0]
hdr = fits.getheader(cube)
spec_res_km_s = calc_velocity_res(hdr)
mom0 = slab.moment0()
prefix = build_fname(cube, '_mom0_off')
folder = get_figures_folder(dest_folder)
mom0_fname = folder + prefix + '.fits'
mom0.write(mom0_fname, overwrite=True)
hi_data = fits.open(mom0_fname)
plot_title_suffix = "non-emission region in " + os.path.basename(cube)
plot_difference_map(hi_data[0], folder+prefix, "Moment 0 map of " + plot_title_suffix)
# Produce the background plots
bkg_data = get_bane_background(mom0_fname, folder+prefix, plot_title_suffix, ncores=ncores, redo=redo)
map_page = folder + '/off_emission.html'
rel_map_page = get_figures_folder('.') + '/off_emission.html'
output_map_page(map_page, prefix, 'Off-line Emission Plots for ' + os.path.basename(cube))
hi_data = fits.open(folder+prefix+'_bkg.fits')
max_em = np.nanmax(hi_data[0].data)
max_em_per_kms = max_em / (spec_res_km_s * num_channels)
# assess
cube_name = os.path.basename(cube)
section = ReportSection('Absence of Off-line Emission', cube_name)
section.add_item('Velocity Range<br/>(km/s LSR)', value='{:.0f} to {:.0f}'.format(vel_start.value, vel_end.value))
section.add_item('Channels', value='{}'.format(num_channels))
section.add_item('Large Scale<br/>Emission Map', link=rel_map_page, image='figures/'+prefix+'_bkg_sml.png')
section.add_item('Emission Histogram', link='figures/'+prefix+'_bkg_hist.png', image='figures/'+prefix+'_bkg_hist_sml.png')
section.add_item('Max Emission<br/>(Jy beam<sup>-1</sup>)', value='{:.3f}'.format(max_em_per_kms))
reporter.add_section(section)
metric = ValidationMetric('Absence of Off-line Emission',
'Maximum large scale emission intensity in the velocity range where emission is not expected.',
int(max_em_per_kms), assess_metric(max_em_per_kms, 5, 12, low_good=True))
reporter.add_metric(metric)
return slab
def calc_theoretical_rms(chan_width, t_obs= 12*60*60, n_ant=36):
"""
Calculating the theoretical rms noise for ASKAP. Assuming natural weighting and not taking into account fraction of flagged data.
Based on ASKAP SEFD measurement in SB 9944.
Parameters
----------
chan_width : int
channel width in Hz
t_obs : int
duration of the observation in seconds
n_ant : int
Number of antennae
Returns
-------
rms : int
Theoretical RMS in mJy
"""
#cor_eff = 0.8 # correlator efficiency - WALLABY
cor_eff = 1.0 # correlator efficiency - assumed to be included in the SEFD
n_pol = 2.0 # Number of polarisation, npol = 2 for images in Stokes I, Q, U, or V
#sefd = 1700*u.Jy # As measured in SB 9944
sefd = 1800*u.Jy # Hotan et al 2021
rms_jy = sefd/(cor_eff*math.sqrt(n_pol*n_ant*(n_ant-1)*chan_width*t_obs))
return rms_jy.to(u.mJy).value
def measure_spectral_line_noise(slab, cube, vel_start, vel_end, reporter, dest_folder, duration, redo=False):
print ('\nMeasuring the spectral line noise levels across {:.0f} < v < {:.0f}'.format(vel_start, vel_end))
if slab is None:
print ("** No data for the non-emission range - skipping check **")
return
# Extract header details
hdr = fits.getheader(cube)
spec_sys = hdr['SPECSYS']
axis_num = '3' if hdr['CTYPE3'] != 'STOKES' else '4'
spec_type = hdr['CTYPE'+axis_num]
axis = spec_sys + ' ' + spec_type
spec_res_km_s = calc_velocity_res(hdr)
# Scale the noise to mJy / 5 kHz channel
std_data = np.nanstd(slab.unmasked_data[:], axis=0)
noise_5kHz = std_data / np.sqrt(1 / spec_res_km_s)
noise_5kHz = noise_5kHz.to(u.mJy) # Jy => mJy
# Extract the spectral line noise map
mom0_prefix = build_fname(cube, '_mom0_off')
folder = get_figures_folder(dest_folder)
mom0_fname = folder + mom0_prefix + '.fits'
prefix = build_fname(cube, '_spectral_noise')
noise_fname = folder + prefix + '.fits'
fits.writeto(noise_fname, noise_5kHz.value, fits.getheader(mom0_fname), overwrite=True)
# Produce the noise plots
cube_name = os.path.basename(cube)
plot_map(folder+prefix, "Spectral axis noise map for " + cube_name, cmap='mako_r', stretch='arcsinh',
colorbar_label=r'Noise level per 5 kHz channel (mJy beam$^{-1}$)')
plot_histogram(folder+prefix, r'Noise level per 5 kHz channel (mJy beam$^{-1}$)', 'Spectral axis noise for ' + cube_name)
median_noise_5kHz = np.nanmedian(noise_5kHz.value[noise_5kHz.value!=0.0])
theoretical_gaskap_noise = calc_theoretical_rms(5000, t_obs=duration*60*60) # mJy per 5 kHz for the observation duration
print ("Theoretical noise {:.3f} mJy/beam".format(theoretical_gaskap_noise))
median_ratio = median_noise_5kHz / theoretical_gaskap_noise
# assess
cube_name = os.path.basename(cube)
section = ReportSection('Spectral Line Noise', cube_name)
section.add_item('Velocity Range<br/>(km/s LSR)', value='{:.0f} to {:.0f}'.format(vel_start.value, vel_end.value))
section.add_item('Spectral Axis', value=axis)
section.add_item('Spectral Resolution<br/>(kms)', value='{}'.format(round(spec_res_km_s,2)))
section.add_item('Spectral Axis<br/>Noise Map', link='figures/'+prefix+'.png', image='figures/'+prefix+'_sml.png')
section.add_item('Spectral Axis<br/>Noise Histogram', link='figures/'+prefix+'_hist.png', image='figures/'+prefix+'_hist_sml.png')
section.add_item('Spectral Axis Noise<br/>(mJy per 5 kHz)', value='{:.3f}'.format(median_noise_5kHz))
section.add_item('Spectral Axis Noise<br/>(vs theoretical for {:.2f} hr)'.format(duration), value='{:.3f}'.format(median_ratio))
reporter.add_section(section)
metric = ValidationMetric('Spectral Noise',
'1-sigma spectral noise comparison to theoretical per 5 kHz channel for {:.2f} hr observation.'.format(duration),
round(median_ratio,3), assess_metric(median_ratio,
np.sqrt(2), np.sqrt(2)*2, low_good=True))
reporter.add_metric(metric)
return
def get_pixel_area(fits_file,flux=0,nans=False,ra_axis=0,dec_axis=1,w=None):
"""For a given image, get the area and solid angle of all non-nan pixels or all pixels below a certain flux (doesn't count pixels=0).
The RA and DEC axes follow the WCS convention (i.e. starting from 0).
Arguments:
----------
fits : astropy.io.fits
The primary axis of a fits image.
Keyword arguments:
------------------
flux : float
The flux in Jy, below which pixels will be selected.
nans : bool
Derive the area and solid angle of all non-nan pixels.
ra_axis : int
The index of the RA axis (starting from 0).
dec_axis : int
The index of the DEC axis (starting from 0).
w : astropy.wcs.WCS
A wcs object to use for reading the pixel sizes.
Returns:
--------
area : float
The area in square degrees.
solid_ang : float
The solid angle in steradians.
See Also:
---------
astropy.io.fits
astropy.wcs.WCS"""
if w is None:
w = WCS(fits_file.header)
#count the pixels and derive area and solid angle of all these pixels
if nans:
count = fits_file.data[(~np.isnan(fits_file.data)) & (fits_file.data != 0)].shape[0]
else:
count = fits_file.data[(fits_file.data < flux) & (fits_file.data != 0)].shape[0]
area = (count*np.abs(w.wcs.cdelt[ra_axis])*np.abs(w.wcs.cdelt[dec_axis]))
solid_ang = area*(np.pi/180)**2
return area,solid_ang
def report_image_stats(image, noise_file, reporter, dest_folder, diagnostics_dir, ncores=8, redo=False):
print ('\nReporting image stats')
fits_file = fits.open(image)
hdr = fits_file[0].header
w = WCS(hdr).celestial
fig_folder= get_figures_folder(dest_folder)
# Image information
askapSoftVer = 'N/A'
askapPipelineVer = 'N/A'
history = hdr['history']
askapSoftVerPrefix = 'Produced with ASKAPsoft version '
askapPipelinePrefix = 'Processed with ASKAP pipeline version '
for row in history:
if row.startswith(askapSoftVerPrefix):
askapSoftVer = row[len(askapSoftVerPrefix):]
elif row.startswith(askapPipelinePrefix):
askapPipelineVer = row[len(askapPipelinePrefix):]
beam = 'N/A'
if 'BMAJ' in hdr:
beam_maj = hdr['BMAJ'] * 60 * 60
beam_min = hdr['BMIN'] * 60 * 60
beam = '{:.1f} x {:.1f}'.format(beam_maj, beam_min)
# Analyse image data
area,solid_ang = get_pixel_area(fits_file[0], nans=False)
# if not noise_file:
# prefix = build_fname(image, '')
# folder = get_figures_folder(dest_folder)
# noise_file = get_bane_background(image, folder+prefix, redo=redo, plot=False)
# rms_map = fits.open(noise_file)[0]
img_data = fits_file[0].data
img_peak = np.max(img_data[~np.isnan(img_data)])
# rms_bounds = rms_map.data > 0
# img_rms = int(np.median(rms_map.data[rms_bounds])*1e6) #uJy
# img_peak_bounds = np.max(img_data[rms_bounds])
# img_peak_pos = np.where(img_data == img_peak_bounds)
# img_peak_rms = rms_map.data[img_peak_pos][0]
# dynamic_range = img_peak_bounds/img_peak_rms
#img_flux = np.sum(img_data[~np.isnan(img_data)]) / (1.133*((beam_maj * beam_min) / (img.raPS * img.decPS))) #divide by beam area
# Copy pipleine plots
field_src_plot = copy_existing_image(diagnostics_dir+'/image.i.SB*.cont.restored_sources.png', fig_folder)
image_name = os.path.basename(image)
section = ReportSection('Image', image_name)
section.add_item('ASKAPsoft<br/>version', value=askapSoftVer)
section.add_item('Pipeline<br/>version', value=askapPipelineVer)
section.add_item('Synthesised Beam<br/>(arcsec)', value=beam)
add_opt_image_section('Source Map', field_src_plot, fig_folder, dest_folder, section)
# section.add_item('Median r.m.s.<br/>(uJy)', value='{:.2f}'.format(img_rms))
# section.add_item('Image peak<br/>(Jy)', value='{:.2f}'.format(img_peak_bounds))
# section.add_item('Dynamic Range', value='{:.2f}'.format(dynamic_range))
section.add_item('Sky Area<br/>(deg2)', value='{:.2f}'.format(area))
reporter.add_section(section)
return
def set_velocity_range(emvelstr, nonemvelstr):
emvel = int(emvelstr)
if not emvel in vel_steps:
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
nonemvel = int(nonemvelstr)
if not nonemvel in vel_steps:
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
idx = vel_steps.index(emvel)
if idx +1 >= len(vel_steps):
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
# emission_vel_range=(vel_steps[idx],vel_steps[idx+1])*u.km/u.s
emission_vel_range[0]=vel_steps[idx]*u.km/u.s
emission_vel_range[1]=vel_steps[idx+1]*u.km/u.s
print ('\nSet emission velocity range to {:.0f} < v < {:.0f}'.format(emission_vel_range[0], emission_vel_range[1]))
idx = vel_steps.index(nonemvel)
if idx +1 >= len(vel_steps):
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
# emission_vel_range=(vel_steps[idx],vel_steps[idx+1])*u.km/u.s
non_emission_val_range[0]=vel_steps[idx]*u.km/u.s
non_emission_val_range[1]=vel_steps[idx+1]*u.km/u.s
print ('\nSet non emission velocity range to {:.0f} < v < {:.0f}'.format(non_emission_val_range[0], non_emission_val_range[1]))
def identify_periodicity(spectrum):
"""
Check if there are periodic features in a spectrum. This tests if there are patterns which are
present in the spectrum seperated by a specific number of channels (or lag). i.e. if the same
pattern repeats every so many channels. Only features with at least 3-sigma significance are
returned.
Arguments:
----------
spectrum : array-like
The numerical spectrum.
Returns:
--------
repeats: array
The lag intervals that have 3-sigma or greater periodic features
sigma: array
The significance of each repeat value, in sigma.
"""
# Use a partial auto-correlation function to identify repeated patterns
pacf = stattools.pacf(spectrum, nlags=min(50, len(spectrum)//5))
sd = np.std(pacf[1:])
significance= pacf/sd
indexes = (significance>3).nonzero()[0]
repeats = indexes[indexes>3]
return repeats, significance[repeats]
def plot_all_spectra(spectra, names, velocities, em_unit, vel_unit, figures_folder, prefix):
fig = None
if len(spectra) > 20:
fig = plt.figure(figsize=(18, 72))
else:
fig = plt.figure(figsize=(18, 12))
num_rows = math.ceil(len(spectra)/3)
for idx, spectrum in enumerate(spectra):
label = get_str(names[idx])
ax = fig.add_subplot(num_rows, 3, idx+1)
ax.plot(velocities, spectrum, linewidth=1)
ax.set_title(label)
ax.grid()
if idx > 2*num_rows:
ax.set_xlabel("$v_{LSRK}$ " + '({})'.format(vel_unit))
if idx % 3 == 0:
ax.set_ylabel(em_unit)
fig.tight_layout()
fig.savefig(figures_folder+'/'+prefix+'-spectra-individual.pdf')
def plot_overlaid_spectra(spectra, names, velocities, em_unit, vel_unit, figures_folder, cube_name, prefix):
fig = plt.figure(figsize=(18, 12))
axes = []
if len(spectra) > 36:
for i in range(1,4):
ax = fig.add_subplot(3,1,i)
axes.append(ax)
else:
ax = fig.add_subplot()
axes.append(ax)
for i, spec in enumerate(spectra):
label = get_str(names[i])
idx = 0
if len(axes) > 1:
interleave = label[-4]
idx = ord(interleave) - ord('A')
ax = axes[idx]
ax.plot(velocities, spec, label=label)
for idx, ax in enumerate(axes):
ax.set_xlabel("$v_{LSRK}$ " + '({})'.format(vel_unit))
ax.set_ylabel(em_unit)
ax.legend()
ax.grid()
if len(axes) > 1:
ax.set_title('Spectra for all beams in interleave {}'.format(chr(ord('A')+idx)))
else:
ax.set_title('Spectra for {} brightest sources in {}'.format(len(spectra), cube_name))
plt.savefig(figures_folder+'/'+prefix+'-spectra.png')
plt.savefig(figures_folder+'/'+prefix+'-spectra_sml.png', dpi=16)
def output_spectra_page(filename, prefix, title):
with open(filename, 'w') as mp:
mp.write('<html>\n<head><title>{}</title>\n</head>'.format(title))
mp.write('\n<body>\n<h1>{}</h1>'.format(title))
output_plot(mp, 'All Spectra', prefix + '-spectra.png')
output_plot(mp, 'Individual Spectra', prefix + '-spectra-individual.pdf')
mp.write('\n</body>\n</html>\n')
def plot_periodic_spectrum(spectrum, fig, name):
ax = fig.add_subplot(211)
ax.plot(spectrum)
ax.set_title('Spectrum for ' + name)
ax.grid()
ax = fig.add_subplot(212)
plot_pacf(spectrum, lags=50, ax=ax)
fig.tight_layout()
def output_periodic_spectra_page(filename, prefix, title, periodic, detections):
with open(filename, 'w') as mp:
mp.write('<html>\n<head><title>{}</title>\n</head>'.format(title))
mp.write('\n<body>\n<h1>{}</h1>'.format(title))
for idx, src_name in enumerate(periodic):
output_plot(mp, src_name, prefix + '{}_periodicity.png'.format(src_name))
mp.write('<p>{}</p>'.format(detections[idx]))
mp.write('\n</body>\n</html>\n')
def save_spectum(name, velocities, fluxes, ra, dec, spectra_folder):
spec_table = Table(
[velocities, fluxes],
names=['Velocity', 'Emission'],
meta={'ID': name, 'RA' : ra, 'Dec': dec})
votable = from_table(spec_table)
votable.infos.append(Info('RA', 'RA', ra))
votable.infos.append(Info('Dec', 'Dec', dec))
writeto(votable, '{}/{}.vot'.format(spectra_folder, name))
def extract_spectra(cube, source_cat, dest_folder, reporter, num_spectra, beam_list, slab_size=40):
print('\nExtracting spectra for the {} brightest sources in {} and beams listed in {}'.format(
num_spectra, source_cat, beam_list))
# Prepare the output folders
spectra_folder = dest_folder + '/spectra'
if not os.path.exists(spectra_folder):
os.makedirs(spectra_folder)
figures_folder = dest_folder + '/figures'
# Read the source list and identify the brightest sources
bright_srcs = []
bright_src_pos = []
if source_cat:
votable = parse(source_cat, pedantic=False)
sources = votable.get_first_table()
srcs_tab = sources.to_table()
for key in ('component_name', 'col_component_name'):
if key in srcs_tab.keys():
comp_name_key = key
break
bright_idx = np.argsort(sources.array['flux_peak'])[-num_spectra:]
bright_srcs = sources.array[bright_idx]
bright_srcs.sort(order=comp_name_key)
for idx, src in enumerate(bright_srcs):
pos = SkyCoord(ra=src['ra_deg_cont']*u.deg, dec=src['dec_deg_cont']*u.deg)
bright_src_pos.append(pos)
# Read the beams
beams = []
if beam_list:
beams = ascii.read(beam_list)
beams.add_column(Column(name='pos', data=np.empty((len(beams)), dtype=object)))
beams.add_column(Column(name='name', data=np.empty((len(beams)), dtype=object)))
for beam in beams:
name = '{}-{:02d}'.format(beam['col1'], beam['col2'])
pos = SkyCoord(ra=beam['col3']*u.rad, dec=beam['col4']*u.rad)
beam['name'] = name
beam['pos'] = pos
# Read the cube
spec_cube = SpectralCube.read(cube)
vel_cube = spec_cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
wcs = vel_cube.wcs.celestial
spec_len =vel_cube.shape[0]
header = fits.getheader(cube)
# Identify the target pixels for each spectrum
pix_pos_bright = []
pix_pos_beam = []
for idx, source in enumerate(bright_srcs):
pos = pos = bright_src_pos[idx]
pixel = pos.to_pixel(wcs=wcs)
rnd = np.round(pixel)
pix_pos_bright.append((int(rnd[0]), int(rnd[1])))
for source in beams:
pos = source['pos']
pixel = pos.to_pixel(wcs=wcs)
rnd = np.round(pixel)
pix_pos_beam.append((int(rnd[0]), int(rnd[1])))
# Extract the spectra
start = time.time()
print(" ## Started spectra extract at {} ##".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))))
prev = start
spectra_bright = []
for p in pix_pos_bright:
spectra_bright.append(np.zeros(spec_len))
spectra_beam = []
for p in pix_pos_beam:
spectra_beam.append(np.zeros(spec_len))
# Extract using slabs
unit = None
prev = time.time()
for i in range(0,spec_len,slab_size):
max_idx = min(i+slab_size, spec_len)
slab = extract_channel_slab(cube, i, max_idx)
checkpoint = time.time()
print (slab)
unit = slab.unit
for j, pos in enumerate(pix_pos_bright):
data = slab[:,pos[1], pos[0]]
#data = convert_data_to_jy(data, header)
spectra_bright[j][i:max_idx] = data.value
for j, pos in enumerate(pix_pos_beam):
data = slab[:,pos[1], pos[0]]
spectra_beam[j][i:max_idx] = data.value
print ("Scanning slab of channels {} to {}, took {:.2f} s".format(i, max_idx-1, checkpoint-prev))
prev = checkpoint
end = time.time()
print(" ## Finished spectra extract at {}, took {:.2f} s ##".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)), end-start))
# Save the spectra
names = bright_srcs['component_name']
for idx, spec in enumerate(spectra_bright):
name = get_str(names[idx])
pos = bright_src_pos[idx]
save_spectum(name, vel_cube.spectral_axis.to(u.km/u.s), spec*vel_cube.unit, pos.ra.deg, pos.dec.deg, spectra_folder)
for idx, spec in enumerate(spectra_beam):
name = beams[idx]['name']
pos = beams[idx]['pos']
save_spectum(name, vel_cube.spectral_axis.to(u.km/u.s), spec*vel_cube.unit, pos.ra.deg, pos.dec.deg, spectra_folder)
# Plot the spectra
em_unit = str(vel_cube.unit)
velocities = vel_cube.spectral_axis.to(u.km/u.s)
plot_overlaid_spectra(spectra_bright, names, velocities, em_unit, 'km/s', figures_folder, os.path.basename(cube), 'bright')
plot_all_spectra(spectra_bright, names, velocities, em_unit, 'km/s', figures_folder, 'bright')
bright_spectra_file = figures_folder+'/bright_spectra.html'
output_spectra_page(bright_spectra_file, './bright', "Spectra for 15 Brightest Sources")
if beam_list:
beam_names = beams['name']
spec_res_hz = Spectra.get_spec_resolution(header)
print ('Spec res (hz) {}'.format(spec_res_hz))
theoretical_noise = calc_theoretical_rms(spec_res_hz)
print ('Theoretical noise (mJy) {}'.format(theoretical_noise))
plot_overlaid_spectra(spectra_beam, beam_names, velocities, em_unit, 'km/s', figures_folder, os.path.basename(cube), 'beam')
Spectra.plot_beam_locs(cube, beams, theoretical_noise, figures_folder+'/beam_comparison', spectra_folder)
plot_all_spectra(spectra_beam, beam_names, velocities, em_unit, 'km/s', figures_folder, 'beam')
beam_spectra_file = figures_folder+'/beam_spectra.html'
output_spectra_page(beam_spectra_file, './beam', "Spectra for centre of each beam")
# Check for periodicity in the spectra
num_bright_periodic = 0
bright_periodic = []
detections = []
for idx, spec in enumerate(spectra_bright):
if spec.any():
repeats, sig = identify_periodicity(spec)
if len(repeats)>0:
num_bright_periodic += 1
name = get_str(names[idx])
bright_periodic.append(name)
fig = plt.figure(figsize=(8, 6))
plot_periodic_spectrum(spec, fig, name)
fig.savefig(figures_folder+'/{}_periodicity.png'.format(name))
detections.append("Detected periodicity with lag {} of significance {}".format(repeats, sig))
print ("Spectrum for {} has periodicity with lag {} of signficance {}".format(name, repeats, sig))
bright_periodic_str = 'None' if len(bright_periodic) == 0 else '<br/>'.join(bright_periodic)
output_periodic_spectra_page(figures_folder+'/periodic_spectra.html', './', "Spectra with Periodic Features", bright_periodic, detections)
# Output the report
cube_name = os.path.basename(cube)
section = ReportSection('Spectra', cube_name)
section.add_item('Bright Source Spectra', link='figures/bright_spectra.html', image='figures/bright-spectra_sml.png')
section.add_item('Spectra wth periodic features', link='figures/periodic_spectra.html', value=bright_periodic_str)
if beam_list:
section.add_item('Beam Centre Spectra', link='figures/beam_spectra.html', image='figures/beam-spectra_sml.png')
section.add_item('Beam Noise Levels', link='figures/beam_comparison.png', image='figures/beam_comparison_sml.png')
reporter.add_section(section)
metric = ValidationMetric('Spectra periodicity',
'Number of spectra with repeated patterns with more than 3-sigma significance ',
num_bright_periodic, assess_metric(num_bright_periodic,
1, 5, low_good=True))
reporter.add_metric(metric)
def copy_existing_image(image_pattern, fig_folder):
paths = glob.glob(image_pattern)
if len(paths) == 0:
return None
# Copy the file with default permnisisons and metadata
new_name = fig_folder + "/" + os.path.basename(paths[0])
shutil.copyfile(paths[0], new_name)
return new_name
def add_opt_image_section(title, image_path, fig_folder, dest_folder, section, thumb_size_x=140, thumb_size_y=140):
if image_path == None:
section.add_item(title, value='N/A')
return
img_thumb, img_thumb_rel = Diagnostics.make_thumbnail(image_path, fig_folder, dest_folder, size_x=thumb_size_y,
size_y=thumb_size_y)
image_path_rel = os.path.relpath(image_path, dest_folder)
section.add_item(title, link=image_path_rel, image=img_thumb_rel)
def add_opt_mult_image_section(title, image_paths, fig_folder, dest_folder, section, thumb_size_x=140, thumb_size_y=140):
if image_paths is None:
section.add_item(title, value='N/A')
return
rel_paths = []
rel_thumbs = []
found = False
for image_path in image_paths:
if image_path:
found = True
img_thumb, img_thumb_rel = Diagnostics.make_thumbnail(image_path, fig_folder, dest_folder,
size_x=thumb_size_x, size_y=thumb_size_y)
image_path_rel = os.path.relpath(image_path, dest_folder)
rel_thumbs.append(img_thumb_rel)
rel_paths.append(image_path_rel)
if found:
section.add_item(title, link=rel_paths, image=rel_thumbs)
else:
section.add_item(title, value='N/A')
def report_calibration(diagnostics_dir, dest_folder, reporter):
print('\nReporting calibration from ' + diagnostics_dir)
fig_folder= get_figures_folder(dest_folder)
bandpass, cal_sbid = Bandpass.get_cal_bandpass(diagnostics_dir)
# Plot bandpasses
bp_by_ant_fig = Bandpass.plot_bandpass_by_antenna(bandpass, cal_sbid, fig_folder, 'Calibration')
#bp_by_ant_thumb, bp_by_ant_thumb_rel = Diagnostics.make_thumbnail(bp_by_ant_fig, fig_folder, dest_folder)
#bp_by_ant_fig_rel = os.path.relpath(bp_by_ant_fig, dest_folder)
bp_by_beam_fig = Bandpass.plot_bandpass_by_beam(bandpass, cal_sbid, fig_folder, 'Calibration')
bp_by_beam_thumb, bp_by_beam_thumb_rel = Diagnostics.make_thumbnail(bp_by_beam_fig, fig_folder, dest_folder)
bp_by_beam_fig_rel = os.path.relpath(bp_by_beam_fig, dest_folder)
# Include the pipeline diagnostics
amp_diag_img = copy_existing_image(diagnostics_dir+'/amplitudesDiagnostics_'+str(cal_sbid)+'.png', fig_folder)
phase_diag_img = copy_existing_image(diagnostics_dir+'/phasesDiagnostics_'+str(cal_sbid)+'.png', fig_folder)
cal_param_pdf = copy_existing_image(diagnostics_dir+'/calparameters_*_bp_SB'+str(cal_sbid)+'.smooth.pdf', fig_folder)
cal_param_pdf_rel = os.path.relpath(cal_param_pdf, dest_folder) if cal_param_pdf else None
# Output the report
section = ReportSection('Calibration', '')
section.add_item('Cal SBID', cal_sbid)
add_opt_image_section('Bandpass by Antenna', bp_by_ant_fig, fig_folder, dest_folder, section)
add_opt_image_section('Bandpass by Beam', bp_by_beam_fig, fig_folder, dest_folder, section)
add_opt_image_section('Amplitude Diagnostics', amp_diag_img, fig_folder, dest_folder, section)
add_opt_image_section('Phase Diagnostics', phase_diag_img, fig_folder, dest_folder, section)
if cal_param_pdf_rel:
section.add_item('Parameters', value="pdf", link=cal_param_pdf_rel)
reporter.add_section(section)
def report_diagnostics(diagnostics_dir, sbid, dest_folder, reporter, sched_info, obs_metadata, short_len=500, long_len=2000):
print('\nReporting diagnostics')
fig_folder= get_figures_folder(dest_folder)
is_closepack = sched_info.footprint == None or sched_info.footprint.startswith('closepack')
# Extract metadata
chan_width, cfreq, nchan = Diagnostics.get_freq_details(diagnostics_dir)
chan_width_kHz = round(chan_width/1000., 3) # convert Hz to kHz
theoretical_rms_mjy = np.zeros(len(obs_metadata.fields))
total_rows = sum([field.num_rows for field in obs_metadata.fields])
for idx, field in enumerate(obs_metadata.fields):
field_tobs = obs_metadata.tobs * field.num_rows / total_rows
theoretical_rms_mjy[idx] = calc_theoretical_rms(chan_width, t_obs=field_tobs)
# Extract flagging details
flag_stat_beams, n_flag_ant_beams, ant_flagged_in_all, pct_integ_flagged, baseline_flag_pct, pct_each_integ_flagged, bad_chan_pct_count = Diagnostics.get_flagging_stats(
diagnostics_dir, fig_folder)
print("Antenna flagged in all:", ant_flagged_in_all)
flagged_ant_desc = ", ".join(ant_flagged_in_all) if len(ant_flagged_in_all) > 0 else 'None'
pct_short_base_flagged, pct_medium_base_flagged, pct_long_base_flagged = Diagnostics.calc_flag_percent(
baseline_flag_pct, short_len=short_len, long_len=long_len)
pct_chan_unflagged = Diagnostics.calc_pct_channels_unflagged(bad_chan_pct_count)
# Extract beam RMS
beam_exp_rms = Diagnostics.calc_beam_exp_rms(flag_stat_beams, theoretical_rms_mjy)
rms_min = np.min(beam_exp_rms)
rms_max = np.max(beam_exp_rms)
rms_range_pct = round((rms_max-rms_min)/rms_min*100,1)
# Plot beam stats
beam_nums = Diagnostics.get_beam_numbers_closepack()
flagged_vis_fig = Diagnostics.plot_flag_stat(flag_stat_beams, beam_nums, sbid, fig_folder, closepack=is_closepack)
flagged_ant_fig = Diagnostics.plot_flag_ant(n_flag_ant_beams, beam_nums, sbid, fig_folder, closepack=is_closepack)
beam_exp_rms_fig = Diagnostics.plot_beam_exp_rms(beam_exp_rms, beam_nums, sbid, fig_folder, closepack=is_closepack)
baseline_fig = Diagnostics.plot_baselines(baseline_flag_pct, fig_folder, sbid, short_len=short_len, long_len=long_len)
flag_ant_file_rel = os.path.relpath(fig_folder+'/flagged_antenna.txt', dest_folder)
integ_flag_fig = Diagnostics.plot_integrations(pct_each_integ_flagged, sbid, fig_folder)
flag_pct_dist_fig = Diagnostics.plot_flagging_distribution(bad_chan_pct_count, sbid, fig_folder)
# Output the report
section = ReportSection('Diagnostics', '')
section.add_item('Completely Flagged Antennas', flagged_ant_desc, link=flag_ant_file_rel)
section.add_item('Integrations Completely<br/>Flagged (%)', pct_integ_flagged)
add_opt_image_section('Flagging over Time', integ_flag_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
add_opt_image_section('Channel Flagging', flag_pct_dist_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
section.add_item('Short Baselines<br/>Flagged (%)', pct_short_base_flagged)
section.add_item('Medium Baselines<br/>Flagged (%)', pct_medium_base_flagged)
section.add_item('Long Baselines<br/>Flagged (%)', pct_long_base_flagged)
add_opt_image_section('Baselines', baseline_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
section.start_new_row()
section.add_item('Channel Width (kHz)', chan_width_kHz)
add_opt_image_section('Flagged Visibilities', flagged_vis_fig, fig_folder, dest_folder, section) #, thumb_size_x=140, thumb_size_y=70)
add_opt_image_section('Flagged Antennas', flagged_ant_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
add_opt_image_section('Expected RMS per channel', beam_exp_rms_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
reporter.add_section(section)
metric = ValidationMetric('Flagged Short Baselines',
'Percent of short baselines ({}m or less) flagged across all integrations and all beams'.format(short_len),
pct_short_base_flagged, assess_metric(pct_short_base_flagged,
20, 40, low_good=True))
reporter.add_metric(metric)
metric = ValidationMetric('Flagged Long Baselines',
'Percent of long baselines ({}m or more) flagged across all integrations and all beams'.format(long_len),
pct_long_base_flagged, assess_metric(pct_long_base_flagged,
30, 45, low_good=True))
reporter.add_metric(metric)
metric = ValidationMetric('Unflagged Integrations',
'Percent of integrations with less than 5% of channels flagged',
pct_chan_unflagged, assess_metric(pct_chan_unflagged,
70, 50))
reporter.add_metric(metric)
metric = ValidationMetric('Expected RMS Difference',
'The percentage change of expected RMS across the field.',
rms_range_pct, assess_metric(rms_range_pct,
10, 30, low_good=True))
reporter.add_metric(metric)
def report_self_cal(cube, image, obs_metadata, dest_folder, reporter):
print('\nReporting self calibration')
fig_folder= get_figures_folder(dest_folder)
field_plots = []
field_names = ""
total_bad_beams = 0
max_bad_ant = 0
for i,field in enumerate(obs_metadata.fields):
if i > 0:
field_names += '<br/>'
field_names += field.name
folder = Diagnostics.find_subdir(cube, image, field.name)
if folder:
sc = SelfCal.prepare_self_cal_set(folder)
plots = SelfCal.plot_self_cal_set(sc, fig_folder)
field_plots.append(plots)
num_bad_beams, num_bad_ant = SelfCal.calc_phase_stability(sc)
print("In field {} found {} bad beams and {} bad antennas".format(field.name, num_bad_beams, num_bad_ant))
total_bad_beams += num_bad_beams
max_bad_ant = max(max_bad_ant, num_bad_ant)
else:
field_plots.append([None, None, None])
plot_array = np.asarray(field_plots)
print ("Overall found {} bad beams and {} bad antennas.".format(total_bad_beams, max_bad_ant))
# Output the report
section = ReportSection('Self Calibration', '')
section.add_item('Field(s)', value=field_names)
add_opt_mult_image_section('Phase Stability', plot_array[:,0], fig_folder, dest_folder, section)
add_opt_mult_image_section('Phase Summary', plot_array[:,1], fig_folder, dest_folder, section)
add_opt_mult_image_section('All Phases', plot_array[:,2], fig_folder, dest_folder, section)
reporter.add_section(section)
metric = ValidationMetric('Bad Phase Antenna',
'Number of unflagged antenna with bad phase solutions',
max_bad_ant, assess_metric(max_bad_ant,
1, 3, low_good=True))
reporter.add_metric(metric)
def main():
start = time.time()
print("#### Started validation at {} ####".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))))
#ignore astropy warnings
warnings.simplefilter('ignore', AstropyWarning)
# Parse command line options
args = parseargs()
dest_folder = args.output
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
figures_folder = dest_folder + '/figures'
if not os.path.exists(figures_folder):
os.makedirs(figures_folder)
if args.cube and (not os.path.exists(args.cube) or not os.path.isfile(args.cube)):
raise ValueError('Cube {} could not be found or is not a file.'.format(args.cube))
if args.image and (not os.path.exists(args.image) or not os.path.isfile(args.image)):
raise ValueError('Image {} could not be found or is not a file.'.format(args.image))
if not args.cube and not args.image:
raise ValueError('You must supply either an image or a cube to validate.')
if args.source_cat and (not os.path.exists(args.source_cat) or not os.path.isfile(args.source_cat)):
raise ValueError('Source catalogue {} could not be found or is not a file.'.format(args.source_cat))
if args.emvel:
set_velocity_range(args.emvel, args.nonemvel)
if args.cube:
print ('\nChecking quality level of GASKAP HI cube:', args.cube)
obs_img = args.cube
metrics_subtitle = 'GASKAP HI Validation Metrics'
else:
print ('\nChecking quality level of ASKAP image:', args.image)
obs_img = args.image
metrics_subtitle = 'ASKAP Observation Diagnostics Metrics'
cube_name = os.path.basename(obs_img)
reporter = ValidationReport('GASKAP Validation Report: {}'.format(cube_name), metrics_subtitle=metrics_subtitle)
sched_info = Diagnostics.get_sched_info(obs_img)
diagnostics_dir = Diagnostics.find_diagnostics_dir(args.cube, args.image)
obs_metadata = Diagnostics.get_metadata(diagnostics_dir) if diagnostics_dir else None
sbid = report_observation(obs_img, reporter, args.duration, sched_info, obs_metadata)
if args.cube:
report_cube_stats(args.cube, reporter)
check_for_emission(args.cube, emission_vel_range[0], emission_vel_range[1], reporter, dest_folder, redo=args.redo)
slab = check_for_non_emission(args.cube, non_emission_val_range[0], non_emission_val_range[1], reporter, dest_folder, redo=args.redo)
measure_spectral_line_noise(slab, args.cube, non_emission_val_range[0], non_emission_val_range[1], reporter, dest_folder, args.duration, redo=args.redo)
if args.source_cat or args.beam_list:
extract_spectra(args.cube, args.source_cat, dest_folder, reporter, args.num_spectra, args.beam_list)
if args.image:
report_image_stats(args.image, args.noise, reporter, dest_folder, diagnostics_dir, redo=args.redo)
if diagnostics_dir:
report_calibration(diagnostics_dir, dest_folder, reporter)
report_diagnostics(diagnostics_dir, sbid, dest_folder, reporter, sched_info, obs_metadata)
if obs_metadata:
report_self_cal(args.cube, args.image, obs_metadata, dest_folder, reporter)
print ('\nProducing report to', dest_folder)
output_html_report(reporter, dest_folder)
output_metrics_xml(reporter, dest_folder)
end = time.time()
print("#### Completed validation at {} ####".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))))
print('\nChecks completed in {:.02f} s'.format((end - start)))
return 0
if __name__ == '__main__':
exit(main())
```
#### File: gaskap-validation/validation/SelfCal.py
```python
import glob
import os
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from casacore.tables import *
import seaborn as sns
class SelfCalSolutions:
# phase is [time, beam, ant, pol]
def __init__(self):
"""Initialises parameters for reading a selfcal table
"""
self.nsol = None
self.nant = None
self.nbeam = 36
self.npol = None
# selfcal is an array in order [time, beam, ant, pol] of phase angle and amplitude value
self.selfcal = None
self.selfcal_times = None
self.selfcal_flags = None
self.field = None
def load(self, base_dir):
flist = glob.glob(base_dir + "/cont_gains*tab")
flist.sort()
filename = flist[0]
print (filename)
pos = filename.find("beam")
if pos == -1:
raise Exception("Can't find beam information in " + filename)
wildcard = filename[:pos+4] + "??" + filename[pos+6:]
flist = glob.glob(wildcard)
flist.sort()
first_beam = flist[0]
tb = table(first_beam, readonly=True, ack=False)
t_vals = tb.getcol("TIME")
sc_vals = tb.getcol("GAIN",1,1)
self.selfcal_times = t_vals[1:]
self.nsol = t_vals.shape[0] - 1
gain_shape = sc_vals.shape
self.npol = gain_shape[3]
self.nant = gain_shape[2]
tb.close()
self.selfcal = np.zeros((self.nsol, 36, self.nant, self.npol), dtype=np.complex)
self.selfcal_flags = np.zeros((self.nsol, 36, self.nant, self.npol), dtype=np.bool)
for beam in range(self.nbeam):
fname = wildcard.replace("??", "%02d" %(beam))
if os.path.exists(fname) == False:
continue
tb = table(fname, readonly=True, ack=False)
t_vals = tb.getcol("TIME", 1, self.nsol)
sc_vals = tb.getcol("GAIN", 1, self.nsol)
flag_vals = tb.getcol("GAIN_VALID", 1, self.nsol)
for index in range(self.nsol):
self.selfcal[index, beam] = sc_vals[index, 0, :, :]
self.selfcal_flags[index, beam] = np.invert(flag_vals[index, 0, :, :])
self.selfcal[np.where(self.selfcal_flags)] = np.nan
self.field = os.path.basename(base_dir)
print("Read %d solutions, %d antennas, %d beams, %d polarisations" %(self.nsol, self.nant, self.nbeam, self.npol))
def plotGains(self, ant, outFile = None):
fig = plt.figure(figsize=(14, 14))
amplitudes = np.abs(self.selfcal)
phases = np.angle(self.selfcal, deg=True)
times = np.array(range(self.nsol))
plt.subplot(1, 1, 1)
if self.nant == 36:
plt.title("ak%02d" %(ant+1), fontsize=8)
else:
plt.title("ant%02d" %(ant), fontsize=8)
for beam in range(self.nbeam):
plt.plot(times, phases[:,beam,ant,0], marker=None, label="beam %d" %(beam))
# plt.plot(times, phases[:,ant,beam,1], marker=None, color="red")
plt.ylim(-200.0, 200.0)
#rms = np.sqrt(np.mean(np.square(phases[:,beam,ant,0])))
#print ("ant ak{:02d} beam {:02d} rms={:.2f}".format(ant+1, beam, rms))
plt.legend()
plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile)
plt.close()
def _plot_ant_phase(sc, ant, outFile = None):
fig = plt.figure(figsize=(14, 14))
amplitudes = np.abs(sc.selfcal)
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
ax = plt.subplot(1, 1, 1)
if sc.nant == 36:
plt.title("ak%02d" %(ant+1), fontsize=8)
else:
plt.title("ant%02d" %(ant), fontsize=8)
low = np.nanpercentile(phases[:,:,ant,0], 2.5, axis=(1))
high = np.nanpercentile(phases[:,:,ant,0], 97.5, axis=(1))
colours = sns.color_palette()
ax.plot(np.nanmedian(phases[:,:,ant,0], axis=(1)), color=colours[0], label='median')
ax.fill_between(range(phases.shape[0]), low, high, color=colours[0], alpha= .2, label=r'95\% range')
ax.plot(np.nanmax(phases[:,:,ant,0], axis=(1)), color=colours[1], ls=':', label='maximum')
ax.plot(np.nanmin(phases[:,:,ant,0], axis=(1)), color=colours[1], ls=':', label='minimum')
plt.ylim(-200.0, 200.0)
plt.legend()
plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile)
plt.close()
def _plot_rms_map(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
#rms = np.sqrt(np.nanmean(np.square(phases[:,:,:,:]), axis=0))
rms = np.std(phases[:,:,:,:], axis=0)
print (np.nanmin(rms), np.nanmedian(rms), np.nanmax(rms))
ant_list = ['ak{:02}'.format(i+1) for i in range(36)]
beam_list = ['b{:02}'.format(i) for i in range(36)]
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
for i, ax in enumerate(axs):
sns.heatmap(rms[:,:,i].transpose(), ax=ax, cmap='GnBu', square=True, xticklabels=beam_list, yticklabels=ant_list,
vmin=0, vmax=40, linewidths=.5, cbar_kws={"shrink": .9, "label": 'Phase Standard Deviation (deg)'})
ax.set_title('Self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Beam')
axs[0].set_ylabel(r'Antenna')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_summary_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
colours = sns.color_palette()
for i, ax in enumerate(axs):
low = np.nanpercentile(phases[:,:,:,i], 2.5, axis=(1,2))
high = np.nanpercentile(phases[:,:,:,i], 97.5, axis=(1,2))
low_ant = np.nanmin(np.nanpercentile(phases[:,:,:,i], 2.5, axis=(1)), axis=1)
high_ant = np.nanmax(np.nanpercentile(phases[:,:,:,i], 97.5, axis=(1)), axis=1)
low_med = np.nanmin(np.nanmedian(phases[:,:,:,i], axis=(1)), axis=1)
high_med = np.nanmax(np.nanmedian(phases[:,:,:,i], axis=(1)), axis=1)
print (low.shape, low_ant.shape)
ax.fill_between(range(phases.shape[0]), low_ant, high_ant, color=colours[2], alpha= .2, label="95 percentile range")
ax.plot(np.nanmedian(phases[:,:,:,i], axis=(1,2)), color=colours[0], label="median")
ax.fill_between(range(phases.shape[0]), low_med, high_med, color=colours[0], alpha= .4, label="median range")
ax.plot(np.nanmax(phases[:,:,:,i], axis=(1,2)), color=colours[1], ls=':', alpha=.6, label="maximum")
ax.plot(np.nanmin(phases[:,:,:,i], axis=(1,2)), color=colours[1], ls=':', alpha=.6, label="minimum")
ax.set_title('Self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Time (Integration number)')
ax.set_ylim(-200.0, 200.0)
ax.legend()
axs[0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_median_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
colours = sns.color_palette()
for i, ax in enumerate(axs):
means = np.nanmedian(phases[:,:,:,i], axis=1)
for ant in range(36):
if ant > 30:
ax.plot(means[:,ant], label="ak%02d" %(ant+1), lw=2, zorder=2)
else:
ax.plot(means[:,ant], color='grey', lw=1, zorder=1)
ax.set_title('Median self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Time (Integration number)')
axs[0].legend()
axs[0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_ant_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
colours = sns.color_palette()
for i, ax in enumerate(axs):
means = np.nanmedian(phases[:,:,:,i], axis=1)
for ant in range(36):
if ant > 30:
ax.plot(means[:,ant], label="ak%02d" %(ant+1), lw=2, zorder=2)
else:
ax.plot(means[:,ant], color='grey', lw=1, zorder=1)
ax.set_title('Median self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Time (Integration number)')
axs[0].legend()
axs[0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_all_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(6, 12, figsize=(40,16))
pols = ['XX', 'YY']
colours = sns.color_palette()
for i, pol in enumerate(pols):
for ant in range(36):
ax = axs[ant // 6, i*6+ant%6]
for beam in range(sc.nbeam):
ax.plot(times, phases[:,beam,ant,i], marker=None, label="beam %d" %(beam))
ax.set_ylim(-200.0, 200.0)
ax.set_title('Phases for ak%02d pol %s' % (ant+1, pol[i]))
#ax.set_xlabel(r'Time (Integration number)')
#axs[0].legend()
axs[0,0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight', dpi=300)
plt.close()
def _plot_amp_rms_map(sc, field, outFile = None):
amplitudes = np.absolute(sc.selfcal)
times = np.array(range(sc.nsol))
rms = np.std(amplitudes[:,:,:,:], axis=0)
print (np.nanmin(rms), np.nanmedian(rms), np.nanmax(rms))
ant_list = ['ak{:02}'.format(i+1) for i in range(36)]
beam_list = ['b{:02}'.format(i) for i in range(36)]
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
for i, ax in enumerate(axs):
sns.heatmap(rms[:,:,i].transpose(), ax=ax, cmap='GnBu', square=True, xticklabels=beam_list, yticklabels=ant_list,
vmin=0, vmax=0.1, linewidths=.5, cbar_kws={"shrink": .9, "label": 'Bandpass Standard Deviation (Jy)'})
ax.set_title('Bandpass stability for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Beam')
axs[0].set_ylabel(r'Antenna')
#plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def prepare_self_cal_set(folder):
"""
Prepare a set of self cal solutions for analysis.
Parameters
----------
folder: path
Path to the folder containing the self cal solution files. Normally named after the field/interleave.
Returns
-------
The SelfCalSolutions object for use by other calls.
"""
sc = SelfCalSolutions()
sc.load(folder)
return sc
def plot_self_cal_set(sc, fig_folder):
"""
Produce plots for a set of self calibration solutions for a field.
Parameters
----------
sc: SelfCalSolutions
The loaded self cal solutions object for the field/interleave.
fig_folder: string
Path to the folder we should put any plots or reports in.
Returns
-------
The paths to the RMS map plot and the summary plot produced for this field.
"""
rms_map_plot = fig_folder + '/sc_heatmap_{}.png'.format(sc.field)
summary_plot = fig_folder + '/sc_summary_{}.png'.format(sc.field)
all_phases_plot = fig_folder + '/sc_phases_{}.png'.format(sc.field)
_plot_rms_map(sc, sc.field, rms_map_plot)
_plot_summary_phases(sc, sc.field, summary_plot)
_plot_all_phases(sc, sc.field, all_phases_plot)
return rms_map_plot, summary_plot, all_phases_plot
def calc_phase_stability(sc, phase_rms_max=40):
"""
Calculate summary statistics of the phase stability as recorded in the self-cal solution.
Parameters
----------
sc: SelfCalSolutions
The loaded self cal solutions object for the field/interleave.
phase_rms_max: double
The maximum allowed median rms before a beam or antenna is classified as bad.
Returns
-------
The number of bad beams and bad antennas.
"""
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
rms = np.std(phases[:,:,:,:], axis=0)
# phase is [time, beam, ant, pol]
bad_beams = []
bad_ant = []
for i in range(2): # polarisations XX and YY
bad_ant.append(np.median(rms[:,:,i], axis=0) >= phase_rms_max)
bad_beams.append(np.median(rms[:,:,i], axis=1) >= phase_rms_max)
bad_ant_either = bad_ant[0] | bad_ant[1]
bad_beam_either = bad_beams[0] | bad_beams[1]
print('ants', bad_ant_either)
print('beams', bad_beam_either)
return np.sum(bad_beam_either), np.sum(bad_ant_either)
def find_field_folder(cube, image, field_name):
potential_parent_dirs = []
if cube:
potential_parent_dirs.append(os.path.dirname(cube))
if image:
potential_parent_dirs.append(os.path.dirname(image))
for parent in potential_parent_dirs:
field_dir = parent + '/' + field_name
if os.path.isdir(field_dir):
return field_dir
return None
```
|
{
"source": "jdauphar/ad5372_gui",
"score": 3
}
|
#### File: jdauphar/ad5372_gui/ad5372.py
```python
class ad5372:
def __init__(current_register="X1A", reference_voltage=5):
self.current_register = current_register
self.reference_voltage = reference_voltage
def set_reference_voltage(self, voltage):
self.reference_voltage = voltage
def get_reference_voltage(self):
return self.reference_voltage
def change_current_register(self):
if self.current_register == "X1A":
self.current_register = "X1B"
else:
self.current_register = "X1A"
def get_current_register(self):
return self.current_register
def format_voltages(self, data_list): # need to convert strings to numbers between 0 and 65535
for channel in data_list:
channel[1]
def send_voltages(self, data_list):
# data list should be list of [int , string] of length 32
voltage_list = self.format_voltages(data_list)
# starting with group0, channel0. then group0, channel1, and so on
mode_address_byte = np.uint8(200) # start on group 0 channel 0: 11001000
for value in voltage_list:
self.spi.transfer2(mode_address_byte) # address
self.spi.transfer2(np.uint8(value >> 8) ) # high data byte
self.spi.transfer2(np.uint8(value)) # low data byte
mode_address_byte+=1
```
|
{
"source": "jd-au/RadioAbsTools",
"score": 2
}
|
#### File: RadioAbsTools/RadioAbsTools/cube_tools.py
```python
import math
import os
from astropy.coordinates import SkyCoord, Angle
from astropy.io import fits, votable
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Rectangle
import numpy as np
import numpy.core.records as rec
_allowed_weights = ['square', 'linear', 'none']
class IslandRange(object):
def __init__(self, isle_id):
self.isle_id = isle_id
def read_sources(filename, min_sn=10, min_flux=0.02):
print ("Extracting sources from " + filename)
sources = []
if not os.path.exists(filename):
print ("Warning: File %s does not exist, skipping source read." % \
filename)
return sources
src_votable = votable.parse(filename, pedantic=False)
results = src_votable.get_first_table().array
for row in results:
id = str(row['island']) + "-" + str(row['source'])
ra = row['ra']
dec = row['dec']
rms = row['local_rms']
flux = row['peak_flux']
sn = flux / rms
print ("Found source %s at %.4f, %.4f with flux %.4f and rms of %.4f "
"giving S/N of %.4f" % (id, ra, dec, flux, rms, sn))
if sn > min_sn and flux > min_flux:
src = dict(zip(results.dtype.names,row))
src['id'] = id
src['sn'] = sn
#sources.append([ra, dec, id, flux, row['island']])
sources.append(src)
else:
print ("Ignoring source at %.4f, %.4f due to low S/N of %.4f or "
"flux of %.4f" % (ra, dec, sn, flux))
return sources
def read_islands(filename):
print ("Extracting islands from " + filename)
islands = {}
if not os.path.exists(filename):
print ("Warning: File %s does not exist, skipping island read." % \
filename)
return {}
isle_votable = votable.parse(filename, pedantic=False)
results = isle_votable.get_first_table().array
for row in results:
islands[row['island']] = row
return islands
def calc_island_ranges(islands, pixel_size):
island_ranges = []
for island in islands.values():
ir = IslandRange(island['island'])
ra = island['ra']
dec = island['dec']
ra_width = abs(island['x_width'] * pixel_size[0])
dec_width = abs(island['y_width'] * pixel_size[1])
ir.min_ra = ra - (ra_width/2)
ir.max_ra = ra + (ra_width/2)
ir.min_dec = dec - (dec_width/2)
ir.max_dec = dec + (dec_width/2)
#print("Island %d goes from %f to %f (%d*%f)/ %f to %f (%d*%f)" % (
# island['island'], ir.min_ra, ir.max_ra, island['x_width'], pixel_size[0], ir.min_dec, ir.max_dec,
# island['y_width'], pixel_size[1]))
island_ranges.append(ir)
return island_ranges
def find_edges(fluxes, num_edge_chan):
"""
Seek from the edges to find where the data starts for this set of fluxes.
This accounts for an optional number of channels in the data which have no
data recorded.
:param fluxes: The array of fluxes to be checked.
:param num_edge_chan: The number of edge channels with data to be skipped
:return: The index of the first and last cell to have data.
"""
l_edge = 0
r_edge = len(fluxes)-1
while l_edge < len(fluxes)-1 and fluxes[l_edge] == 0:
l_edge += 1
while r_edge > 0 and fluxes[r_edge] == 0:
r_edge -= 1
return l_edge + num_edge_chan, r_edge - num_edge_chan
def get_weighting_array(data, velocities, continuum_start_vel, continuum_end_vel, weighting='square'):
"""
Calculate the mean of the continuum values. This is based on precalculated regions where there is no gas expected.
:param data: A cubelet to be analysed, should be a 3D array of flux values.
:param velocities: A numpy array of velocity values in m/s
:param continuum_start_vel: The lower bound of the continuum velocity range (in m/s)
:param continuum_end_vel: The upper bound of the continuum velocity range (in m/s)
:param weighting: The weighting scheme to use, one of square, linear, none
:return: A 2D array of weighting values for the
"""
if (continuum_start_vel > np.max(velocities)) or (continuum_end_vel < np.min(velocities)):
raise Exception("Continuum range {} to {} is outside of the data velocity range {} to {}".format(
continuum_start_vel, continuum_end_vel, np.min(velocities), np.max(velocities)))
continuum_range = np.where(continuum_start_vel < velocities)
if len(continuum_range) == 0:
return np.zeros(data.shape[1:2])
if weighting not in _allowed_weights:
raise Exception("Weighting must by one of ", ', '.join(_allowed_weights))
bin_start = continuum_range[0][0]
continuum_range = np.where(velocities < continuum_end_vel)
bin_end = continuum_range[0][-1]
# print("Using bins %d to %d (velocity range %d to %d) out of %d" % (
# bin_start, bin_end, continuum_start_vel, continuum_end_vel, len(velocities)))
# print(data.shape)
continuum_sample = np.array(data[bin_start:bin_end, :, :])
continuum_sample[continuum_sample<0]=0
# print ("...gave sample of", continuum_sample)
mean_cont = np.nanmean(continuum_sample, axis=0)
mean_cont[mean_cont<0]=0
if weighting == 'square':
mean_sq = mean_cont ** 2
sum_sq = np.nansum(mean_sq)
weights = mean_sq / sum_sq
elif weighting == 'linear':
weights = mean_cont / np.nansum(mean_cont)
else:
# No weights, just trim to ellipse
weights = mean_cont
weights[weights>0]=1
weights = weights/np.sum(weights)
# print("Got weighting of {} from {} and {}".format(weighting, mean_sq, sum_sq))
return weights
def point_in_ellipse(origin, point, a, b, pa_rad, verbose=False):
"""
Identify if the point is inside the ellipse.
:param origin A SkyCoord defining the centre of the ellipse.
:param point A SkyCoord defining the point to be checked.
:param a The semi-major axis in arcsec of the ellipse
:param b The semi-minor axis in arcsec of the ellipse
:param pa_rad The position angle of the ellipse. This is the angle of the major axis measured in radians East of
North (or CCW from the y axis).
"""
# Convert point to be in plane of the ellipse, accounting for distortions at high declinations
p_ra_dist = (point.icrs.ra.degree - origin.icrs.ra.degree)* math.cos(origin.icrs.dec.rad)
p_dec_dist = point.icrs.dec.degree - origin.icrs.dec.degree
# Calculate the angle and radius of the test opoint relative to the centre of the ellipse
# Note that we reverse the ra direction to reflect the CCW direction
radius = math.sqrt(p_ra_dist**2 + p_dec_dist**2)
diff_angle = (math.pi/2 + pa_rad) if p_dec_dist == 0 else math.atan(p_ra_dist / p_dec_dist) - pa_rad
# Obtain the point position in terms of the ellipse major and minor axes
minor = radius * math.sin(diff_angle)
major = radius * math.cos(diff_angle)
if verbose:
print ('point relative to ellipse centre angle:{} deg radius:{:.4f}" maj:{:.2f}" min:{:.2f}"'.format(math.degrees(diff_angle), radius*3600,
major*3600, minor*3600))
a_deg = a / 3600.0
b_deg = b / 3600.0
# Calc distance from origin relative to a and b
dist = math.sqrt((major / a_deg) ** 2 + (minor / b_deg) ** 2)
if verbose:
print("Point %s is %f from ellipse %f, %f, %f at %s." % (point, dist, a, b, math.degrees(pa_rad), origin))
return round(dist,3) <= 1.0
def get_integrated_spectrum(image, w, src, velocities, continuum_start_vel, continuum_end_vel, radius=None,
plot_weight_path=None, weighting='square'):
"""
Calculate the integrated spectrum of the component.
:param image: The image's data array
:param w: The image's world coordinate system definition
:param src: The details of the component being processed, must have ra, dec, a, b, pa and comp_name keys
:param velocities: A numpy array of velocity values in m/s
:param continuum_start_vel: The lower bound of the continuum velocity range (in m/s)
:param continuum_end_vel: The upper bound of the continuum velocity range (in m/s)
:param radius: The radius of the box around the source centre where data will be checked for membership of the
source ellipse. Default is to use the semi-major axis of the source.
:param plot_weight_path: The path to which diagnostic plots are output. Default is not to output plots.
:param weighting: The weighting scheme to use, one of square, linear, none
:return: An array of average flux/pixel across the component at each velocity step
"""
if weighting not in _allowed_weights:
raise Exception("Weighting must by one of ", ', '.join(_allowed_weights))
if plot_weight_path:
print ("getting spectrum for source " + str(src))
has_stokes = len(image.shape) > 3
pix = w.wcs_world2pix(src['ra'], src['dec'], 0, 0, 1) if has_stokes else w.wcs_world2pix(src['ra'], src['dec'], 0, 1)
x_coord = int(np.round(pix[0])) - 1 # 266
y_coord = int(np.round(pix[1])) - 1 # 197
if not radius:
radius = math.ceil(src['a'])
#print("Translated %.4f, %.4f to %d, %d" % (
# src['ra'], src['dec'], x_coord, y_coord))
#print (w)
y_min = max(0,y_coord - radius)
y_max = min(image.shape[-2]-1, y_coord + radius)
x_min = max(0,x_coord - radius)
x_max = min(image.shape[-1]-1, x_coord + radius)
data = np.copy(image[0, :, y_min:y_max+1, x_min:x_max+1]) if has_stokes else np.copy(image[:, y_min:y_max+1, x_min:x_max+1])
if plot_weight_path:
# non wcs plot
fig, ax = plt.subplots(1, 1, figsize=(9, 3))
ax.imshow(np.nansum(data, axis=0), origin='lower')
plt.title(src['comp_name'])
fname = plot_weight_path + '/'+ src['comp_name'] + '_data.png'
print ('Plotting data to ' + fname)
plt.savefig(fname, bbox_inches='tight')
plt.close()
# wcs plot
plt.subplot(projection=w.celestial)
if has_stokes:
plt.imshow(image[0,10,:,:], origin='lower')
else:
plt.imshow(image[10,:,:], origin='lower')
plt.grid(color='white', ls='solid')
fname = plot_weight_path + '/'+ src['comp_name'] + '_image_wcs.png'
print ('Plotting image wcs to ' + fname)
plt.savefig(fname, bbox_inches='tight')
plt.close()
origin = SkyCoord(src['ra'], src['dec'], frame='icrs', unit="deg")
pa_rad = math.radians(src['pa'])
total_pixels = (y_max-y_min +1) * (x_max-x_min +1)
outside_pixels = 0
for x in range(x_min, x_max+1):
for y in range(y_min, y_max+1):
eq_pos = w.wcs_pix2world(x, y, 0, 0, 0) if has_stokes else w.wcs_pix2world(x, y, 0, 0)
point = SkyCoord(eq_pos[0], eq_pos[1], frame='icrs', unit="deg")
in_ellipse = point_in_ellipse(origin, point, src['a'], src['b'], pa_rad)
if not in_ellipse:
data[:, y-y_min, x-x_min] = 0
outside_pixels += 1
#print (point.ra, point.dec, x, y, in_ellipse)
# print("Found {} pixels out of {} inside the component {} at {} {}".format(total_pixels - outside_pixels, total_pixels,
# src['comp_name'],
# point.galactic.l.degree,
# point.galactic.b.degree))
weights = get_weighting_array(data, velocities, continuum_start_vel, continuum_end_vel, weighting=weighting)
integrated = np.nansum(data * weights, axis=(1, 2))
inside_pixels = total_pixels - outside_pixels
if inside_pixels <= 0:
print ("Error: No data for component!")
else:
integrated /= inside_pixels
if plot_weight_path:
fig, ax = plt.subplots(1, 1, figsize=(9, 3))
pos = ax.imshow(weights, origin='lower')
fig.colorbar(pos, ax=ax)
plt.title(src['comp_name'])
fname = plot_weight_path + '/'+ src['comp_name'] + '_weights.png'
print ('Plotting weights to ' + fname)
print ('Ellipse ra={} dec={} pa={:.03f} deg {:.03f}pi rad'.format(src['ra'], src['dec'], src['pa'], pa_rad/math.pi))
plt.savefig(fname, bbox_inches='tight')
plt.close()
return integrated
def extract_spectra(fits_filename, src_filename, isle_filename, continuum_start_vel, continuum_end_vel, num_edge_chan = 10):
#num_edge_chan = 10
#fits_filename = "{0}/1420/magmo-{1}_1420_sl_restor.fits".format(daydirname,
# field)
#src_filename = "{0}/{1}_src_comp.vot".format(daydirname, field)
#isle_filename = "{0}/{1}_src_isle.vot".format(daydirname, field)
spectra = dict()
source_ids = dict()
if not os.path.exists(fits_filename):
print ("Warning: File %s does not exist, skipping extraction." % \
fits_filename)
return spectra, source_ids, []
sources = read_sources(src_filename)
islands = read_islands(isle_filename)
hdulist = fits.open(fits_filename)
image = hdulist[0].data
header = hdulist[0].header
w = WCS(header)
index = np.arange(header['NAXIS3'])
beam_maj = header['BMAJ'] * 60 * 60
beam_min = header['BMIN'] * 60 * 60
beam_area = math.radians(header['BMAJ']) * math.radians(header['BMIN'])
# print ("Beam was %f x %f arcsec giving area of %f radians^2." % (beam_maj, beam_min, beam_area))
ranges = calc_island_ranges(islands, (header['CDELT1'], header['CDELT2']))
velocities = w.wcs_pix2world(10,10,index[:],0,0)[2]
for src in sources:
c = SkyCoord(src['ra'], src['dec'], frame='icrs', unit="deg")
img_slice = get_integrated_spectrum(image, w, src, velocities, continuum_start_vel, continuum_end_vel)
l_edge, r_edge = find_edges(img_slice, num_edge_chan)
# print("Using data range %d - %d out of %d channels." % (
# l_edge, r_edge, len(img_slice)))
# plotSpectrum(np.arange(slice.size), slice)
spectrum_array = rec.fromarrays(
[np.arange(img_slice.size)[l_edge:r_edge],
velocities[l_edge:r_edge],
img_slice[l_edge:r_edge]],
names='plane,velocity,flux')
spectra[c.galactic.l] = spectrum_array
# isle = islands.get(src['island'], None)
src_map = {'id': src['id'], 'flux': src['peak_flux'], 'pos': c, 'beam_area': beam_area}
src_map['a'] = src['a']
src_map['b'] = src['b']
src_map['pa'] = src['pa']
# print (src_map)
source_ids[c.galactic.l] = src_map
del image
del header
hdulist.close()
return spectra, source_ids, ranges
```
#### File: RadioAbsTools/RadioAbsTools/spectrum_tools.py
```python
import matplotlib.pyplot as plt
import numpy as np
def get_mean_continuum(velocity, flux, continuum_start_vel, continuum_end_vel, verbose=False):
"""
Calculate the mean of the continuum values. This is based on precalculated regions where there is no gas expected.
:param velocity: The velocity values of the spectrum to be analysed (in m/s).
:param flux: The flux values of the spectrum to be analysed.
:param continuum_start_vel: The lower bound of the continuum velocity range (in m/s)
:param continuum_end_vel: The upper bound of the continuum velocity range (in m/s)
:param verbose: If True, log details of the calculation.
:return: A pair of float which is the mean continuum flux and the standard deviation of the optical depth.
"""
vel_filt = (continuum_start_vel < velocity) & (continuum_end_vel > velocity)
if np.sum(vel_filt) ==0:
return None, None
continuum_sample = flux[vel_filt]
# print ("...gave sample of", continuum_sample)
mean_cont = np.mean(continuum_sample)
sd_cont = np.std(continuum_sample/mean_cont)
return mean_cont, sd_cont
def plot_absorption_spectrum(velocity, optical_depth, filename, title, con_start_vel, con_end_vel, sigma_tau, range=None, figure=None):
"""
Output a plot of opacity vs LSR velocity to a specified file.
:param velocity: The velocity data (in m/s)
:param optical_depth: The opacity values for each velocity step
:param filename: The file the plot should be written to. Should be
an .eps or .pdf file.
:param title: The title for the plot
:param con_start_vel: The minimum velocity that the continuum was measured at.
:param con_end_vel: The maximum velocity that the continuum was measured at.
:param sigma_tau: The absorption noise level for each channel - will be plotted as an envelope (optional)
:param range: The velocity range to be plotted, or None to plot the whole velocity range (default)
:param figure: Figure to be used for the plot, or None if a new figure is to be created (default)
"""
fig = figure if figure else plt.figure(figsize=(8, 4.8))
plt.plot(velocity/1000, optical_depth, lw=1)
if range:
plt.xlim(range)
if len(sigma_tau) > 0:
tau_max = 1 + sigma_tau
tau_min = 1 - sigma_tau
plt.fill_between(velocity/1000, tau_min, tau_max, facecolor='lightgray', color='lightgray')
plt.axhline(1, color='r')
plt.axvline(con_start_vel/1000, color='g', linestyle='dashed')
plt.axvline(con_end_vel/1000, color='g', linestyle='dashed')
plt.xlabel(r'Velocity relative to LSR (km/s)')
plt.ylabel(r'$e^{(-\tau)}$')
plt.title(title)
plt.grid(True)
plt.savefig(filename)
#plt.show()
plt.close()
return
def calc_rating(opacity_range, max_s_max_n, continuum_sd):
"""
Calculates the Brown et al 2014 (2014ApJS..211...29B) quality rating for a spectrum. Note that two tests, the
absorption uncertainty envelope and number of channels of emission are not currently included.
:param opacity_range: The range of exp(-tau) values.
:param max_s_max_n: The ratio of maximum signal to maximum noise.
:param continuum_sd: standard deviation of absorpton in off-line channels
:return: A quality rating from A to D
"""
rating_codes = 'ABCDEF'
rating = 0
if opacity_range > 1.5:
rating += 1
if max_s_max_n < 3:
rating += 1
if continuum_sd*3 > 1:
rating += 1
return rating_codes[rating]
def rate_spectrum(opacity, continuum_sd):
min_opacity = np.min(opacity)
max_opacity = np.max(opacity)
opacity_range = max_opacity - min_opacity
max_s_max_n = (1 - min_opacity) / (max_opacity - 1)
rating = calc_rating(opacity_range, max_s_max_n, continuum_sd)
return rating, opacity_range, max_s_max_n
```
#### File: RadioAbsTools/tests/test_cube_tools.py
```python
from __future__ import print_function, division
import math
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
from RadioAbsTools import cube_tools
def _pixel_in_ellipse(ellipse_centre, x, y, a, b, pa, dec_offset=0*u.deg, expected=True):
point = SkyCoord((19-x)*u.arcsec, (19-y)*u.arcsec+dec_offset)
result = cube_tools.point_in_ellipse(ellipse_centre, point, a, b, pa)
if expected is not None: # and expected != result:
print ("Details for point x:{} y:{} at point {} arcsec".format(x, y, point.to_string(style='decimal', unit=u.arcsec)))
cube_tools.point_in_ellipse(ellipse_centre, point, a, b, pa, verbose=True)
return result
def _plot_grid(ellipse_centre, a, b, pa, dec_offset=0*u.deg):
grid = np.zeros((20,20))
for x in range(grid.shape[0]):
for y in range(grid.shape[1]):
if _pixel_in_ellipse(ellipse_centre, x, y, a, b, pa, dec_offset=dec_offset, expected=None):
grid[y,x] = 1
#grid[12,5] = 2
print (grid)
def test_point_in_ellipse_vertical():
# Define an ellipse centred at cell 10,10 in a 20x20 arcsec grid at low declination
ellipse_centre = SkyCoord(9*u.arcsec, 9*u.arcsec)
a = 6 # arcsec
b = 3 # arcsec
pa = 0*u.deg.to(u.rad) # rad
_plot_grid(ellipse_centre, a, b, pa)
assert _pixel_in_ellipse(ellipse_centre, 10, 10, a, b, pa), "Centre should be in ellipse"
assert _pixel_in_ellipse(ellipse_centre, 10, 4, a, b, pa), "Top of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 10, 16, a, b, pa), "Bottom of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 9, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 11, 10, a, b, pa), "Right of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 7, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 13, 10, a, b, pa), "Right of ellipse should be inside"
assert not _pixel_in_ellipse(ellipse_centre, 6, 10, a, b, pa, expected=False), "Off left of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 14, 10, a, b, pa, expected=False), "Off right of ellipse should be outside"
def test_point_in_ellipse_horizontal():
# Define an ellipse centred at cell 10,10 in a 20x20 arcsec grid at low declination
ellipse_centre = SkyCoord(9*u.arcsec, 9*u.arcsec)
a = 6 # arcsec
b = 3 # arcsec
pa = 90*u.deg.to(u.rad) # rad
_plot_grid(ellipse_centre, a, b, pa)
assert _pixel_in_ellipse(ellipse_centre, 10, 10, a, b, pa), "Centre should be in ellipse"
assert _pixel_in_ellipse(ellipse_centre, 10, 7, a, b, pa), "Top of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 10, 13, a, b, pa), "Bottom of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 5, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 15, 10, a, b, pa), "Right of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 4, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 16, 10, a, b, pa), "Right of ellipse should be inside"
assert not _pixel_in_ellipse(ellipse_centre, 3, 10, a, b, pa, expected=False), "Off left of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 17, 10, a, b, pa, expected=False), "Off right of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 6, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 14, a, b, pa, expected=False), "Below ellipse should be outside"
def test_point_in_ellipse_diagonal():
# Define an ellipse centred at cell 10,10 in a 20x20 arcsec grid at low declination
ellipse_centre = SkyCoord(9*u.arcsec, 9*u.arcsec)
a = 6 # arcsec
b = 3 # arcsec
pa = 45*u.deg.to(u.rad) # rad
_plot_grid(ellipse_centre, a, b, pa)
assert _pixel_in_ellipse(ellipse_centre, 10, 10, a, b, pa), "Centre should be in ellipse"
assert _pixel_in_ellipse(ellipse_centre, 6, 6, a, b, pa), "Top-left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 14, 14, a, b, pa), "Bottom-right of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 7, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 13, 10, a, b, pa), "Right of ellipse should be inside"
assert not _pixel_in_ellipse(ellipse_centre, 3, 10, a, b, pa, expected=False), "Top-right mirror of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 17, 10, a, b, pa, expected=False), "Bottom-left mirror of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 6, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 14, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 9, 5, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 11, 15, a, b, pa, expected=False), "Below ellipse should be outside"
def test_point_in_ellipse_acute():
# Define an ellipse centred at cell 10,10 in a 20x20 arcsec grid at low declination
ellipse_centre = SkyCoord(9*u.arcsec, 9*u.arcsec)
a = 6 # arcsec
b = 3 # arcsec
pa = 20*u.deg.to(u.rad) # rad
_plot_grid(ellipse_centre, a, b, pa)
assert _pixel_in_ellipse(ellipse_centre, 10, 10, a, b, pa), "Centre should be in ellipse"
assert _pixel_in_ellipse(ellipse_centre, 8, 5, a, b, pa), "Top-left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 13, 14, a, b, pa), "Bottom-right of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 7, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 13, 10, a, b, pa), "Right of ellipse should be inside"
assert not _pixel_in_ellipse(ellipse_centre, 3, 10, a, b, pa, expected=False), "Top-right mirror of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 17, 10, a, b, pa, expected=False), "Bottom-left mirror of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 4, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 16, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 9, 4, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 11, 16, a, b, pa, expected=False), "Below ellipse should be outside"
def test_point_in_ellipse_obtuse():
# Define an ellipse centred at cell 10,10 in a 20x20 arcsec grid at low declination
ellipse_centre = SkyCoord(9*u.arcsec, 9*u.arcsec)
a = 6 # arcsec
b = 3 # arcsec
pa = 160*u.deg.to(u.rad) # rad
_plot_grid(ellipse_centre, a, b, pa)
assert _pixel_in_ellipse(ellipse_centre, 10, 10, a, b, pa), "Centre should be in ellipse"
assert _pixel_in_ellipse(ellipse_centre, 12, 5, a, b, pa), "Top-right of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 8, 14, a, b, pa), "Bottom-left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 7, 10, a, b, pa), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 12, 10, a, b, pa), "Right of ellipse should be inside"
assert not _pixel_in_ellipse(ellipse_centre, 3, 10, a, b, pa, expected=False), "Top-right mirror of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 17, 10, a, b, pa, expected=False), "Bottom-left mirror of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 4, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 10, 16, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 9, 4, a, b, pa, expected=False), "Above ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 11, 16, a, b, pa, expected=False), "Below ellipse should be outside"
def test_point_in_ellipse_vertical_high_dec():
# Define an ellipse centred at cell 10,10 in a 20x20 arcsec grid at low declination
dec_offset = -70*u.deg
ellipse_centre = SkyCoord(9*u.arcsec, 9*u.arcsec+dec_offset)
a = 6 # arcsec
b = 3 # arcsec
pa = 0*u.deg.to(u.rad) # rad
_plot_grid(ellipse_centre, a, b, pa, dec_offset=dec_offset)
# Note our test grid doesn't include the distortion due to declination so we expect the ellipse to wider instead
assert _pixel_in_ellipse(ellipse_centre, 10, 10, a, b, pa, dec_offset=dec_offset), "Centre should be in ellipse"
assert _pixel_in_ellipse(ellipse_centre, 10, 4, a, b, pa, dec_offset=dec_offset), "Top of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 10, 16, a, b, pa, dec_offset=dec_offset), "Bottom of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 2, 10, a, b, pa, dec_offset=dec_offset), "Left of ellipse should be inside"
assert _pixel_in_ellipse(ellipse_centre, 18, 10, a, b, pa, dec_offset=dec_offset), "Right of ellipse should be inside"
assert not _pixel_in_ellipse(ellipse_centre, 1, 10, a, b, pa, dec_offset=dec_offset, expected=False), "Off left of ellipse should be outside"
assert not _pixel_in_ellipse(ellipse_centre, 19, 10, a, b, pa, dec_offset=dec_offset, expected=False), "Off right of ellipse should be outside"
def test_get_weighting_array():
# array is order z, y, x
data = np.zeros((40,10,10))
# outer ring
data[:,3:7,3:7] = 1
data[:,4:6,4:6] = 2
data_slice = data[0,:,:]
print (data_slice)
velocities = np.linspace(60, 99, num=40)*1000
weights = cube_tools.get_weighting_array(data, velocities, 60*(u.km/u.s).to(u.m/u.s), 70*(u.km/u.s).to(u.m/u.s))
print (weights)
assert data_slice.shape == weights.shape
assert round(np.sum(weights),3) == 1
assert np.allclose(weights[data_slice == 0], 0)
assert np.allclose(weights[data_slice == 2], 0.142857)
assert np.allclose(weights[data_slice == 1], 0.035714)
# Check that data outside the continuum range are ignored
data[10:,:,:] = 1000
weights = cube_tools.get_weighting_array(data, velocities, 60*(u.km/u.s).to(u.m/u.s), 70*(u.km/u.s).to(u.m/u.s))
assert data_slice.shape == weights.shape
assert round(np.sum(weights),3) == 1
assert np.allclose(weights[data_slice == 0], 0)
assert np.allclose(weights[data_slice == 2], 0.142857)
assert np.allclose(weights[data_slice == 1], 0.035714)
# Check that missing planes are ignored
data[5,:,:] = 0
weights = cube_tools.get_weighting_array(data, velocities, 60*(u.km/u.s).to(u.m/u.s), 70*(u.km/u.s).to(u.m/u.s))
assert data_slice.shape == weights.shape
assert round(np.sum(weights),3) == 1
assert np.allclose(weights[data_slice == 0], 0)
assert np.allclose(weights[data_slice == 2], 0.142857)
assert np.allclose(weights[data_slice == 1], 0.035714)
data[6,:,:] = np.nan
weights = cube_tools.get_weighting_array(data, velocities, 60*(u.km/u.s).to(u.m/u.s), 70*(u.km/u.s).to(u.m/u.s))
assert data_slice.shape == weights.shape
assert round(np.sum(weights),3) == 1
assert np.allclose(weights[data_slice == 0], 0)
assert np.allclose(weights[data_slice == 2], 0.142857)
assert np.allclose(weights[data_slice == 1], 0.035714)
# Check that all continuum planes are used
data[7,3:7,3:7] = 3
weights = cube_tools.get_weighting_array(data, velocities, 60*(u.km/u.s).to(u.m/u.s), 70*(u.km/u.s).to(u.m/u.s))
assert data_slice.shape == weights.shape
assert round(np.sum(weights),3) == 1
assert np.allclose(weights[data_slice == 0], 0)
assert np.allclose(weights[data_slice == 2], 0.117036)
assert np.allclose(weights[data_slice == 1], 0.044321)
```
|
{
"source": "jd-au/thor-hi",
"score": 3
}
|
#### File: jd-au/thor-hi/filter_cube.py
```python
from __future__ import print_function, division
import argparse
import sys
import time
from astropy.io import fits
import numpy as np
import pyfftw
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(
description="Filter the large scale emission from an imag cube using Fourier transforms",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input", help="The name of the file to be filtered.")
parser.add_argument("output", help="The name of the filtered file to be produced.")
parser.add_argument("-r", "--radius", help="The radius of the filter to apply to the centre of the Fourier image.",
default=20, type=int)
parser.add_argument("-t", "--threads", help="The number of threads to be used for the Fourier transform.",
default=4, type=int)
args = parser.parse_args()
return args
def do_fftw(image, threads=2):
"""
Calculate the Fourier transform of the input 2 dimensional image using the
pyFFTW library.
:param image: The square float64 image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The fourier transform.
"""
image_in = pyfftw.empty_aligned(image.shape, dtype='float64')
image_in[:] = image
fft_object = pyfftw.builders.fft2(image_in, axes=(0, 1), threads=threads)
image_out = fft_object()
return image_out
def do_ifftw(image, threads=2):
"""
Calculate the inverse Fourier transform of the input 2 dimensional Fourier image using the
pyFFTW library.
:param image: The square complex128 image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The fourier transform.
"""
image_in = pyfftw.empty_aligned(image.shape, dtype='complex128')
image_in[:] = image
fft_object = pyfftw.builders.ifft2(image_in, axes=(0, 1), threads=threads)
image_out = fft_object()
return image_out
def fft_image(image, threads=4):
"""
Produce a processed Fourier transform of the input image. The image must be
square and of type float64 and real only. The Fourier transform will be
shifted to have the zero-frequency component in the centre of the image.
:param image: The square image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The centred complex Fourier transform.
"""
#ft_img = np.fft.fft2(image)
ft_img = do_fftw(image, threads)
#print(ft_img.shape)
ft_shift = np.fft.fftshift(ft_img)
return ft_shift
def ifft_image(ft_shift, threads=4):
"""
Invert a Fourier transform of an image. The resulting image will be
square and of type complex128. The real aspect of this image will represent the image.
The Fourier transform will be unshifted to move the zero-frequency component away from the centre of the image.
:param ft_shift: The centred complex Fourier transform.
:param threads: The number of threads to be used by pyFFTW.
:return: The complex inverse Fourier transformed image.
"""
unshifted = np.fft.ifftshift(ft_shift)
#inverted = np.fft.ifft2(unshifted)
inverted = do_ifftw(unshifted, threads=threads)
return inverted
def filter_plane(plane, radius=20, threads=4):
# Prepare the spatial slice for fft
start = time.time()
flipped = np.concatenate((plane, np.fliplr(plane)), axis=1)
mirrored = np.concatenate((flipped, np.flipud(flipped)), axis=0)
x_pad = (mirrored.shape[0] - mirrored.shape[1]) // 2
padded = np.lib.pad(mirrored, ((0, 0), (x_pad, x_pad)), 'constant')
prep_end = time.time()
print(' Prep for plane took %.02f s' % (prep_end - start))
sys.stdout.flush()
# Do the fft
ft_img = fft_image(padded, threads)
ft_end = time.time()
print(' FFT for plane took %.02f s' % (ft_end - prep_end))
sys.stdout.flush()
# Filter out the large scsle emission
centre_y = ft_img.shape[0] // 2
centre_x = ft_img.shape[1] // 2
ft_img[centre_y - radius:centre_y + radius, centre_x - radius:centre_x + radius] = 0
# Invert the fft to get back the image
inverted = ifft_image(ft_img, threads)
ift_end = time.time()
print(' iFFT for plane took %.02f s' % (ift_end - ft_end))
sys.stdout.flush()
post_psd_2d = inverted.real
centre_y = post_psd_2d.shape[0] // 2
centre_x = post_psd_2d.shape[1] // 2
post_plane = post_psd_2d[:centre_y, x_pad:centre_x].astype(np.float32)
return post_plane
def filter_image(image, radius=40, threads=4):
#pyfftw.interfaces.cache.enable()
filtered = np.zeros(image.shape, dtype=np.float32)
for idx in range(image.shape[0]):
print("Processing plane", idx)
sys.stdout.flush()
plane = image[idx, :, :]
post_plane = filter_plane(plane, radius, threads)
filtered[idx, :, :] = post_plane
return filtered
def load_image(filename):
hdulist = fits.open(filename, memmap=True)
image = hdulist[0].data
print("Image shape is", image.shape)
header = hdulist[0].header
return image, header
def save_image(filename, image, header, radius):
header['history'] = "Emission filtered with radius {} Fourier filter.".format(radius)
hdu = fits.PrimaryHDU(image, header)
hdu.writeto(filename, overwrite=True)
def main():
"""
Main script for filter_cube
:return: The exit code
"""
args = parseargs()
start = time.time()
print("#### Started filtering of cube {} at {} ####".format(args.input,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
# Filter the image
orig_image, header = load_image(args.input)
filtered = filter_image(orig_image, radius=args.radius, threads=args.threads)
save_image(args.output, filtered, header, args.radius)
# Report
end = time.time()
print('#### Filtering completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Filtering took %.02f s' %
(end - start))
return 0
# Run the script if it is called from the command line
if __name__ == "__main__":
exit(main())
```
|
{
"source": "JDavid17/CoolCompiler",
"score": 3
}
|
#### File: CoolCompiler/src/cool2cil.py
```python
import src.ast as ast
import src.cil_nodes as cil_node
import src.visitor as visitor
import src.scope as Scope
from src.scope import Scope
from src.type import ctype
class vTable:
"""
{type:{f_name: int}}
"""
pass
BACKUP_REGISTER = 't0'
class CILScope:
def __init__(self, classname, parentScope=None):
self.classname = classname
self.parentScope = parentScope
self.vars = []
def add_var(self, var):
self.vars.append(var)
def get_real_name(self, var_name: str):
for name in self.vars:
if var_name == name.split('@')[0]:
return name
if self.parentScope:
return self.parentScope.get_real_name(var_name)
return ''
class Unique_name_generator:
def __init__(self):
self.name_keys = {}
def generate(self, var: str, just_int=False):
value = 1
if var in self.name_keys:
self.name_keys[var] += 1
value = self.name_keys[var]
else:
self.name_keys[var] = 1
if just_int:
return value
return f'{var}@{str(value)}'
def reset(self):
self.name_keys = {}
class Cool2cil:
def _build_tree(self, scope):
t = scope.get_types().copy()
t.pop('SELF_TYPE')
tree = {i: [] for i in t}
for i in t:
if t[i].parent:
tree[t[i].parent.name] += [t[i].name]
return tree
def dfs(self, ty):
t = {x: [-1, -1] for x in ty}
self._dfs('Object', ty, 0, t)
return t
def _dfs(self, node, dic_type, time, tree):
tree[node][0] = time
n_t = time + 1
for i in dic_type[node]:
n_t = self._dfs(i, dic_type, n_t, tree)
tree[node][1] = n_t
return n_t + 1
def calc_static(self, type):
for i in self.dtpe:
if i.cType == type:
return len(i.attributes)
def sort_type(self, type_list: dict):
type_list = list(type_list.values())
type_map = {}
bit_mask = [False] * len(type_list)
for i in range(len(type_list)):
ctype = type_list[i]
type_map[ctype.name] = i
bit_mask[type_map['Object']] = True
sorted = {}
self_type = type_list[type_map['SELF_TYPE']]
sorted[self_type.name] = self_type
object_type = type_list[type_map['Object']]
sorted[object_type.name] = object_type
for i in range(len(type_list)):
ctype = type_list[i]
if ctype.name == 'Object' or ctype.name == 'SELF_TYPE':
continue
if not bit_mask[i]:
self.sort_type_(ctype, type_list, type_map, bit_mask, sorted)
return sorted
def sort_type_(self, ctype, type_list, type_map, mask, sort_list):
mask[type_map[ctype.name]] = True
parent = ctype.parent
iparent = type_map[parent.name]
if not mask[iparent]:
self.sort_type_(parent, type_list, type_map, mask, sort_list)
sort_list[ctype.name] = ctype
def get_node_by_type(self, t, classes):
for i in classes:
if t == i.name:
return i
def replace(self, method, tp):
t = []
rep = False
for i in range(len(tp.methods)):
if (list(method.keys())[0]).split('.')[1] != (list(tp.methods[i].keys())[0]).split('.')[1]:
t.append(tp.methods[i])
else:
rep = True
t.append(method)
if not rep:
t.append(method)
tp.methods = t
def __init__(self):
self.tree = None
self.constructors = {}
self.data = []
self.dtpe = []
self.code = []
self.vtable = vTable()
self.name_generator = Unique_name_generator()
self.keys_generator = Unique_name_generator()
def _dispatch(self, ctype, method):
for i in self.dtpe:
if i.cType == ctype:
for j in range(len(i.methods)):
if i.methods[j].split('.')[1] == method:
return j
def _att_offset(self, ct, att):
for i in self.dtpe:
if i.cType == ct:
for j in range(len(i.attributes)):
if i.attributes[j] == att:
return j
@visitor.on('node')
def visit(self, node, scope):
pass
@visitor.when(ast.Program)
def visit(self, node: ast.Program, _):
scope_root = Scope(None, None)
for classDef in node.classes:
new_type = ctype(classDef.name)
scope_root.createType(new_type)
for classDef in node.classes:
a = scope_root.getType(classDef.name)
a.parent = scope_root.getType(classDef.parent)
new_types = self.sort_type(scope_root.get_types())
node.classes = list(map(lambda x: self.get_node_by_type(x, node.classes), list(new_types.keys())[6:]))
scope_root.set_types(new_types)
t = scope_root.get_types()
self.tree = self._build_tree(scope_root)
self.tree = self.dfs(self.tree)
for i in list(new_types.keys())[1:6]:
new_types[i].fix_methods()
tp = new_types[i].methods
if i != 'Object':
new_types[i].methods = []
new_types[i].add_method(*tuple(new_types['Object'].methods + tp))
for j in node.classes:
scope = Scope(j.name, scope_root)
methods = filter(lambda x: type(x) is ast.ClassMethod, j.features)
methods = list(methods)
attribs = filter(lambda x: type(x) is ast.ClassAttribute, j.features)
attribs = list(attribs)
p_type = scope.getType(j.parent)
scope.getType(j.name).add_method(*tuple(scope.getType('Object').methods))
scope.getType(j.name).add_attrib(*tuple(p_type.attributes))
for i in p_type.methods:
self.replace(i, scope.getType(j.name))
for i in attribs:
scope.getType(j.name).add_attrib({i.name: scope.getType(i.attr_type)})
for i in methods:
m = {f'{j.name}.{i.name}': {
'formal_params': {
t.name: scope.getType(t.param_type) for t in i.formal_params
},
'return_type': scope.getType(i.return_type),
'body': i.body
}}
self.replace(m, scope.getType(j.name))
for i in list(t.keys())[1:]:
meths = []
for m in t[i].methods:
meths.append(list(m.keys())[0])
attrs = []
for a in t[i].attributes:
attrs.append(list(a.keys())[0])
t1 = self.tree[t[i].name]
self.dtpe.append(cil_node.DotType(t[i].name, attrs, meths, t1[0], t1[1]))
self.dtpe[1].attributes.append('#')
self.dtpe[2].attributes.append('#')
self.dtpe[3].attributes.append('#')
# m = None
# classofp = []
# for i in node.classes:
# if i.name != 'Main':
# classofp.append(i)
# else:
# m = i
# classofp.append(m)
# node.classes = classofp
for i in node.classes:
self.visit(i, None)
pass
@visitor.when(ast.Class)
def visit(self, node: ast.Class, scope: CILScope):
self.constructors[node.name] = ast.ClassMethod(node.name,[],node.name,ast.Block([]))
if node.parent not in ['IO', 'Object']:
self.constructors[node.name].body.expr_list += self.constructors[node.parent].body.expr_list[:-1]
attrs = filter(lambda x: type(x) is ast.ClassAttribute, node.features)
attrs = list(attrs)
for attr in attrs:
self.visit(attr, CILScope(node.name))
methods = filter(lambda x: type(x) is ast.ClassMethod, node.features)
methods = list(methods)
self.constructors[node.name].body.expr_list.append(ast.Self())
methods.append(self.constructors[node.name])
for method in methods:
self.visit(method, CILScope(node.name))
@visitor.when(ast.ClassAttribute)
def visit(self, node: ast.ClassAttribute, scope):
if node.init_expr:
self.constructors[scope.classname].body.expr_list.append(ast.Assignment(ast.Object(node.name), node.init_expr))
elif node.static_type.name == 'Int':
self.constructors[scope.classname].body.expr_list.append(ast.Assignment(ast.Object(node.name), ast.Integer(0)))
elif node.static_type.name == 'Bool':
self.constructors[scope.classname].body.expr_list.append(ast.Assignment(ast.Object(node.name), ast.Boolean(False)))
elif node.static_type.name == 'String':
self.constructors[scope.classname].body.expr_list.append(ast.Assignment(ast.Object(node.name), ast.String("")))
@visitor.when(ast.ClassMethod)
def visit(self, node: ast.ClassMethod, scope: CILScope):
self.name_generator.reset()
params = ['self']
for p in node.formal_params:
redefinition = self.name_generator.generate(p.name)
params.append(redefinition)
scope.add_var(redefinition)
var, exprs = self.visit(node.body, scope)
method = cil_node.CILMethod(node.name, scope.classname, params, var, exprs)
self.code.append(method)
@visitor.when(ast.Assignment)
def visit(self, node: ast.Assignment, scope: CILScope):
real_name = scope.get_real_name(node.instance.name)
expr = self.visit(node.expr, scope)
if real_name == '':
assignment_node = cil_node.CILSetAttr(self._att_offset(scope.classname, node.instance.name))
else:
assignment_node = cil_node.CILAssignment(real_name)
return expr[0], expr[1] + [assignment_node]
@visitor.when(ast.Block)
def visit(self, node: ast.Block, scope: CILScope):
var = []
codes = []
for expr in node.expr_list:
tmp = self.visit(expr, scope)
var += tmp[0]
codes += tmp[1]
codes.append(cil_node.CILBlock(len(node.expr_list)))
return var, codes
@visitor.when(ast.DynamicDispatch)
def visit(self, node: ast.DynamicDispatch, scope: CILScope):
args = []
var = []
codes = []
for item in node.arguments:
tmp = self.visit(item, scope)
codes += tmp[1]
if item.static_type.name in ['Int', 'Bool']:
codes += [cil_node.CILDynamicDispatch(0, self._dispatch(item.static_type.name, "copy"))]
tmp = self.visit(node.instance, scope)
codes += tmp[1]
t = node.instance.static_type.name
if t == 'SELF_TYPE':
t = scope.classname
codes.append(cil_node.CILDynamicDispatch(len(node.arguments), self._dispatch(t, node.method)))
return var, codes
@visitor.when(ast.StaticDispatch)
def visit(self, node: ast.StaticDispatch, scope: CILScope):
args = []
var = []
codes = []
for item in node.arguments:
tmp = self.visit(item, scope)
codes += tmp[1]
if item.static_type.name in ['Int', 'Bool']:
codes += [cil_node.CILDynamicDispatch(0, self._dispatch(item.static_type.name, "copy"))]
tmp = self.visit(node.instance, scope)
codes += tmp[1]
codes.append(cil_node.CILStaticDispatch(len(node.arguments), node.dispatch_type, node.method))
return var, codes
@visitor.when(ast.NewObject)
def visit(self, node: ast.NewObject, scope):
return [], [cil_node.CILNew(node.type, self.calc_static(node.static_type.name))]
@visitor.when(ast.Integer)
def visit(self, node: ast.Integer, scope):
return [], [cil_node.CILInteger(node.content)]
@visitor.when(ast.Boolean)
def visit(self, node: ast.Boolean, scope):
value = 1 if node.content else 0
return [], [cil_node.CILBoolean(value)]
@visitor.when(ast.String)
def visit(self, node: ast.String, scope):
self.data.append(node.content)
return [], [cil_node.CILString(len(self.data)-1)]
@visitor.when(ast.Addition)
def visit(self, node: ast.Addition, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
return fst[0] + snd[0], [cil_node.CILArithm(fst[1], snd[1], '+')]
@visitor.when(ast.Subtraction)
def visit(self, node: ast.Subtraction, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
return fst[0] + snd[0], [cil_node.CILArithm(fst[1], snd[1], '-')]
@visitor.when(ast.Multiplication)
def visit(self, node: ast.Multiplication, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
return fst[0] + snd[0], [cil_node.CILArithm(fst[1], snd[1], '*')]
@visitor.when(ast.Division)
def visit(self, node: ast.Division, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
return fst[0] + snd[0], [cil_node.CILArithm(fst[1], snd[1], '/')]
@visitor.when(ast.Equal)
def visit(self, node: ast.Equal, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
if node.first.static_type.name in ['Int', 'Bool']:
return fst[0] + snd[0], [cil_node.CILEq(fst[1], snd[1])]
if node.first.static_type.name == 'String':
return fst[0] + snd[0], [cil_node.CILEqString(fst[1], snd[1])]
return fst[0] + snd[0], [cil_node.CILEqObject(fst[1], snd[1])]
@visitor.when(ast.LessThan)
def visit(self, node: ast.LessThan, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
return fst[0] + snd[0], [cil_node.CILBoolOp(fst[1], snd[1], '<')]
@visitor.when(ast.LessThanOrEqual)
def visit(self, node: ast.LessThanOrEqual, scope):
fst = self.visit(node.first, scope)
snd = self.visit(node.second, scope)
return fst[0] + snd[0], [cil_node.CILBoolOp(fst[1], snd[1], '<=')]
@visitor.when(ast.IntegerComplement)
def visit(self, node: ast.IntegerComplement, scope):
t = self.visit(node.integer_expr, scope)
return t[0], [cil_node.CILNArith(t[1])]
@visitor.when(ast.BooleanComplement)
def visit(self, node: ast.BooleanComplement, scope):
t = self.visit(node.boolean_expr, scope)
return t[0], [cil_node.CILNBool(t[1])]
@visitor.when(ast.Let)
def visit(self, node: ast.Let, scope: CILScope):
new_scope = CILScope(scope.classname, scope)
var = []
codes = []
for item in node.declarations:
tmp = self.visit(item, new_scope)
var += tmp[0]
codes += tmp[1]
tmp = self.visit(node.body, new_scope)
var += tmp[0]
codes += tmp[1]
return var, codes
@visitor.when(ast.Formal)
def visit(self, node: ast.Formal, scope: CILScope):
new_name = self.name_generator.generate(node.name)
scope.add_var(new_name)
var = [new_name]
codes = []
if node.init_expr:
tmp = self.visit(node.init_expr, scope)
var += tmp[0]
codes += tmp[1]
codes.append(cil_node.CILFormal(new_name))
elif node.static_type.name in ['Bool', 'Int', 'String']:
codes = [
cil_node.CILInteger(0), cil_node.CILFormal(new_name)
] if node.static_type.name != 'String' else [
cil_node.CILNewString(), cil_node.CILFormal(new_name)
]
else:
codes = [cil_node.CILFormal(new_name, False)]
return var, codes
@visitor.when(ast.If)
def visit(self, node: ast.If, scope: CILScope):
predicate_visit = self.visit(node.predicate, scope)
if_visit = self.visit(node.then_body, scope)
else_visit = self.visit(node.else_body, scope)
int_key = self.keys_generator.generate('if', True)
return predicate_visit[0] + if_visit[0] + else_visit[0], \
[cil_node.CILIf(predicate_visit[1], if_visit[1], else_visit[1], int_key)]
@visitor.when(ast.WhileLoop)
def visit(self, node: ast.WhileLoop, scope: CILScope):
predicate_visit = self.visit(node.predicate, scope)
body_visit = self.visit(node.body, scope)
int_key = self.keys_generator.generate('while', True)
return predicate_visit[0] + body_visit[0], \
[cil_node.CILWhile(predicate_visit[1],body_visit[1], int_key)]
@visitor.when(ast.Object)
def visit(self, node: ast.Object, scope: CILScope):
real_name = scope.get_real_name(node.name)
if real_name == '':
return [], [cil_node.CILGetAttr(self._att_offset(scope.classname, node.name))]
return [], [cil_node.CILGetLocal(real_name)]
@visitor.when(ast.IsVoid)
def visit(self, node: ast.IsVoid, scope):
exp = self.visit(node.expr, scope)
key = self.keys_generator.generate("isvoid@isvoid", True)
return exp[0], exp[1] + [cil_node.CILIsVoid(key)]
@visitor.when(ast.Case)
def visit(self, node: ast.Case, scope):
instance = self.visit(node.expr, scope)
actions = []
local = instance[0]
case_key = self.keys_generator.generate("case", True)
for i in node.actions:
t = self.visit(i, scope)
local += t[0]
action_key = self.keys_generator.generate("case.action", True)
action = t[1]
action[0].set_case_tag(case_key)
action[0].set_action_tag(action_key)
actions.append(action)
return local, [cil_node.CILCase(instance[1], actions, case_key)]
@visitor.when(ast.Action)
def visit(self, node: ast.Action, scope):
new_scope = CILScope(scope.classname, scope)
new_name = self.name_generator.generate(node.name)
new_scope.add_var(new_name)
t = self.visit(node.body, new_scope)
return [new_name] + t[0], [cil_node.CILAction(new_name, node.action_type, t[1])]
@visitor.when(ast.Self)
def visit(self, node: ast.Self, scope: CILScope):
return [], [cil_node.CILSelf()]
```
#### File: CoolCompiler/src/lexer.py
```python
import ply.lex as lex
from ply.lex import TOKEN
class CoolLexer(object):
"""
CoolLexer class.
"""
def __init__(self,
build_lexer=True,
debug=False,
lextab="lextab",
optimize=True,
outputdir="",
debuglog=None,
errorlog=None):
self.lexer = None # ply lexer instance
self.tokens = () # ply tokens collection
self.reserved = {} # ply reserved keywords map
self.last_token = None # last returned token
# Save Flags - PRIVATE PROPERTIES
self._debug = debug
self._lextab = lextab
self._optimize = optimize
self._outputdir = outputdir
self._debuglog = debuglog
self._errorlog = errorlog
# Build lexer if build_lexer flag is set to True
if build_lexer is True:
self.build(debug=debug, lextab=lextab, optimize=optimize, outputdir=outputdir, debuglog=debuglog,
errorlog=errorlog)
# ################################# READONLY #####################################
@property
def tokens_collection(self):
"""
Collection of COOL Syntax Tokens.
:return: Tuple.
"""
return (
# Identifiers
"ID", "TYPE",
# Primitive Types
"INTEGER", "STRING", "BOOLEAN",
# Literals
"LPAREN", "RPAREN", "LBRACE", "RBRACE", "COLON", "COMMA", "DOT", "SEMICOLON", "AT",
# Operators
"PLUS", "MINUS", "MULTIPLY", "DIVIDE", "EQ", "LT", "LTEQ", "ASSIGN", "INT_COMP",
# Special Operators
"ARROW"
)
@property
def basic_reserved(self):
"""
Map of Basic-COOL reserved keywords.
:return: dict.
"""
return {
"case": "CASE",
"class": "CLASS",
"else": "ELSE",
"esac": "ESAC",
"fi": "FI",
"if": "IF",
"in": "IN",
"inherits": "INHERITS",
"isvoid": "ISVOID",
"let": "LET",
"loop": "LOOP",
"new": "NEW",
"of": "OF",
"not": "NOT",
"pool": "POOL",
"self": "SELF",
"then": "THEN",
"while": "WHILE"
}
@property
def builtin_types(self):
"""
A map of the built-in types.
:return dict
"""
return {
"Bool": "BOOL_TYPE",
"Int": "INT_TYPE",
"IO": "IO_TYPE",
"Main": "MAIN_TYPE",
"Object": "OBJECT_TYPE",
"String": "STRING_TYPE",
"SELF_TYPE": "SELF_TYPE"
}
# ################################ PRIVATE #######################################
# ################# START OF LEXICAL ANALYSIS RULES DECLARATION ####################
# Ignore rule for single line comments
t_ignore_SINGLE_LINE_COMMENT = r"\-\-[^\n]*"
###
# SIMPLE TOKENS
t_LPAREN = r'\(' # (
t_RPAREN = r'\)' # )
t_LBRACE = r'\{' # {
t_RBRACE = r'\}' # }
t_COLON = r'\:' # :
t_COMMA = r'\,' # ,
t_DOT = r'\.' # .
t_SEMICOLON = r'\;' # ;
t_AT = r'\@' # @
t_MULTIPLY = r'\*' # *
t_DIVIDE = r'\/' # /
t_PLUS = r'\+' # +
t_MINUS = r'\-' # -
t_INT_COMP = r'~' # ~
t_LT = r'\<' # <
t_EQ = r'\=' # =
t_LTEQ = r'\<\=' # <=
t_ASSIGN = r'\<\-' # <-
t_ARROW = r'\=\>' # =>
@TOKEN(r"(true|false)")
def t_BOOLEAN(self, t):
t.value = True if t.value == "true" else False
return t
@TOKEN(r"\d+")
def t_INTEGER(self, t):
t.value = int(t.value)
return t
@TOKEN(r"[A-Z][a-zA-Z_0-9]*")
def t_TYPE(self, t):
t.type = self.basic_reserved.get(t.value, 'TYPE')
return t
@TOKEN(r"[a-z_][a-zA-Z_0-9]*")
def t_ID(self, t):
# Check for reserved words
t.type = self.basic_reserved.get(t.value, 'ID')
return t
@TOKEN(r"\n+")
def t_newline(self, t):
t.lexer.lineno += len(t.value)
def t_error(self, t):
"""
Error Handling and Reporting Rule.
"""
# print("Illegal character! Line: {0}, character: {1}".format(t.lineno, t.value[0]))
print("Illegal character! Line: {0}, character: {1}".format(t.lineno, t.value[0]))
t.lexer.skip(1)
# Ignore Whitespace Character Rule
t_ignore = ' \t\r\f'
# ################# LEXER STATES ######################################
@property
def states(self):
return (
("STRING", "exclusive"),
("COMMENT", "exclusive")
)
###
# THE STRING STATE
@TOKEN(r"\"")
def t_start_string(self, t):
t.lexer.begin("STRING")
t.lexer.string_backslashed = False
t.lexer.stringbuf = ""
@TOKEN(r"\n")
def t_STRING_newline(self, t):
t.lexer.lineno += 1
if not t.lexer.string_backslashed:
print("String newline not escaped")
t.lexer.skip(1)
else:
t.lexer.string_backslashed = False
@TOKEN(r"\"")
def t_STRING_end(self, t):
if not t.lexer.string_backslashed:
t.lexer.begin("INITIAL")
t.value = t.lexer.stringbuf
t.type = "STRING"
return t
else:
t.lexer.stringbuf += '"'
t.lexer.string_backslashed = False
@TOKEN(r"[^\n]")
def t_STRING_anything(self, t):
if t.lexer.string_backslashed:
if t.value == 'b':
t.lexer.stringbuf += '\b'
elif t.value == 't':
t.lexer.stringbuf += '\t'
elif t.value == 'n':
t.lexer.stringbuf += '\n'
elif t.value == 'f':
t.lexer.stringbuf += '\f'
elif t.value == '\\':
t.lexer.stringbuf += '\\'
else:
t.lexer.stringbuf += t.value
t.lexer.string_backslashed = False
else:
if t.value != '\\':
t.lexer.stringbuf += t.value
else:
t.lexer.string_backslashed = True
# STRING ignored characters
t_STRING_ignore = ''
# STRING error handler
def t_STRING_error(self, t):
print("Illegal character! Line: {0}, character: {1}".format(t.lineno, t.value[0]))
t.lexer.skip(1)
###
# THE COMMENT STATE
@TOKEN(r"\(\*")
def t_start_comment(self, t):
t.lexer.begin("COMMENT")
t.lexer.comment_count = 0
@TOKEN(r"\(\*")
def t_COMMENT_startanother(self, t):
t.lexer.comment_count += 1
@TOKEN(r"\*\)")
def t_COMMENT_end(self, t):
if t.lexer.comment_count == 0:
t.lexer.begin("INITIAL")
else:
t.lexer.comment_count -= 1
# COMMENT ignored characters
t_COMMENT_ignore = ''
# COMMENT error handler
def t_COMMENT_error(self, t):
t.lexer.skip(1)
# ################# END OF LEXICAL ANALYSIS RULES DECLARATION ######################
def build(self, **kwargs):
"""
Builds the CoolLexer instance
:param:
:return: None
"""
# Parse the parameters
if kwargs is None or len(kwargs) == 0:
debug, lextab, optimize, outputdir, debuglog, errorlog = \
self._debug, self._lextab, self._optimize, self._outputdir, self._debuglog, self._errorlog
else:
debug = kwargs.get("debug", self._debug)
lextab = kwargs.get("lextab", self._lextab)
optimize = kwargs.get("optimize", self._optimize)
outputdir = kwargs.get("outputdir", self._outputdir)
debuglog = kwargs.get("debuglog", self._debuglog)
errorlog = kwargs.get("errorlog", self._errorlog)
self.reserved = self.basic_reserved.keys()
self.tokens = self.tokens_collection + tuple(self.basic_reserved.values())
# Build internal ply.lex instance
self.lexer = lex.lex(module=self, lextab=lextab, debug=debug, optimize=optimize, outputdir=outputdir,
debuglog=debuglog, errorlog=errorlog)
def input(self, cool_program_source_code: str):
if self.lexer is None:
raise Exception("Lexer was not built. Try calling the build() method first, and then tokenize().")
self.lexer.input(cool_program_source_code)
def token(self):
if self.lexer is None:
raise Exception("Lexer was not built. Try building the lexer with the build() method.")
self.last_token = self.lexer.token()
return self.last_token
def clone_ply_lexer(self):
a_clone = self.lexer.clone()
return a_clone
@staticmethod
def test(program_source_code: str):
temp_lexer = CoolLexer()
temp_lexer.input(program_source_code)
iter_token_stream = iter([some_token for some_token in temp_lexer])
del temp_lexer
return iter_token_stream
# ################### ITERATOR PROTOCOL ############################################
def __iter__(self):
return self
def __next__(self):
t = self.token()
if t is None:
raise StopIteration
return t
def next(self):
return self.__next__()
def make_lexer(**kwargs) -> CoolLexer:
"""
Utility function.
:return: CoolLexer object.
"""
a_lexer = CoolLexer(**kwargs)
a_lexer.build()
return a_lexer
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print("Usage: ./lexer.py program.cl")
exit()
elif not str(sys.argv[1]).endswith(".cl"):
print("Cool program source code files must end with .cl extension.")
print("Usage: ./lexer.py program.cl")
exit()
input_file = sys.argv[1]
with open(input_file, encoding="utf-8") as file:
cool_program_code = file.read()
lexer = make_lexer()
lexer.input(cool_program_code)
for token in lexer:
print(token)
```
|
{
"source": "jdavidagudelo/django-social-auth-corrected",
"score": 2
}
|
#### File: backends/pipeline/user.py
```python
from uuid import uuid4
from social_auth.utils import setting, module_member
from social_auth.models import UserSocialAuth
slugify = module_member(setting('SOCIAL_AUTH_SLUGIFY_FUNCTION',
'django.template.defaultfilters.slugify'))
def get_username(details, user=None,
user_exists=UserSocialAuth.simple_user_exists,
*args, **kwargs):
"""Return an username for new user. Return current user username
if user was given.
"""
if user:
return {'username': UserSocialAuth.user_username(user)}
email_as_username = setting('SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL', False)
uuid_length = setting('SOCIAL_AUTH_UUID_LENGTH', 16)
do_slugify = setting('SOCIAL_AUTH_SLUGIFY_USERNAMES', False)
if email_as_username and details.get('email'):
username = details['email']
elif details.get('username'):
username = unicode(details['username'])
else:
username = uuid4().get_hex()
max_length = UserSocialAuth.username_max_length()
short_username = username[:max_length - uuid_length]
final_username = UserSocialAuth.clean_username(username[:max_length])
if do_slugify:
final_username = slugify(final_username)
# Generate a unique username for current user using username
# as base but adding a unique hash at the end. Original
# username is cut to avoid any field max_length.
while user_exists(username=final_username):
username = short_username + uuid4().get_hex()[:uuid_length]
username = username[:max_length]
final_username = UserSocialAuth.clean_username(username)
if do_slugify:
final_username = slugify(final_username)
return {'username': final_username}
def create_user(backend, details, response, uid, username, user=None, *args,
**kwargs):
"""Create user. Depends on get_username pipeline."""
if user:
return {'user': user}
if not username:
return None
# Avoid hitting field max length
email = details.get('email')
original_email = None
if email and UserSocialAuth.email_max_length() < len(email):
original_email = email
email = ''
return {
'user': UserSocialAuth.create_user(username=username, email=email),
'original_email': original_email,
'is_new': True
}
def _ignore_field(name, is_new=False):
return name in ('username', 'id', 'pk') or \
(not is_new and
name in setting('SOCIAL_AUTH_PROTECTED_USER_FIELDS', []))
def mongoengine_orm_maxlength_truncate(backend, details, user=None,
is_new=False, *args, **kwargs):
"""Truncate any value in details that corresponds with a field in the user
model. Add this entry to the pipeline before update_user_details"""
if user is None:
return
out = {}
names = list(user._fields.keys())
for name, value in details.iteritems():
if name in names and not _ignore_field(name, is_new):
max_length = user._fields[name].max_length
try:
if max_length and len(value) > max_length:
value = value[:max_length]
except TypeError:
pass
out[name] = value
return {'details': out}
def django_orm_maxlength_truncate(backend, details, user=None, is_new=False,
*args, **kwargs):
"""Truncate any value in details that corresponds with a field in the user
model. Add this entry to the pipeline before update_user_details"""
if user is None:
return
out = {}
names = user._meta.get_all_field_names()
for name, value in details.iteritems():
if name in names and not _ignore_field(name, is_new):
max_length = user._meta.get_field(name).max_length
try:
if max_length and len(value) > max_length:
value = value[:max_length]
except TypeError:
pass
out[name] = value
return {'details': out}
def update_user_details(backend, details, response, user=None, is_new=False,
*args, **kwargs):
"""Update user details using data from provider."""
if user is None:
return
changed = False # flag to track change
email = details.get('email', None)
if isinstance(email, dict):
email = email.get('email', '')
details['email'] = email
for name, value in details.iteritems():
# do not update username, it was already generated, do not update
# configured fields if user already existed
if not _ignore_field(name, is_new):
if value and value != getattr(user, name, None):
setattr(user, name, value)
changed = True
if changed:
user.save()
```
|
{
"source": "jdavidagudelo/django-social-auth-fix",
"score": 3
}
|
#### File: backends/contrib/jawbone.py
```python
from urllib2 import Request, urlopen
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.exceptions import AuthCanceled, AuthUnknownError
# Jawbone configuration
JAWBONE_SERVER = 'https://jawbone.com/'
JAWBONE_AUTHORIZATION_URL = '%s/auth/oauth2/auth' % JAWBONE_SERVER
JAWBONE_ACCESS_TOKEN_URL = '%s/auth/oauth2/token' % JAWBONE_SERVER
JAWBONE_CHECK_AUTH = '%s/nudge/api/users/@me' % JAWBONE_SERVER
class JawboneBackend(OAuthBackend):
name = 'jawbone'
def get_user_id(self, details, response):
return response['data']['xid']
def get_user_details(self, response):
"""Return user details from Jawbone account"""
firstName = response['data'].get('first', '')
lastName = response['data'].get('last', '')
dob = response['data'].get('dob', '')
gender = response['data'].get('gender', '')
height = response['data'].get('height', '')
weight = response['data'].get('weight', '')
return {'username': firstName + ' ' + lastName,
'first_name': firstName,
'last_name': lastName,
'dob': dob,
'gender': gender,
'height': height,
'weight': weight}
class JawboneAuth(BaseOAuth2):
"""Jawbone OAuth mechanism"""
AUTHORIZATION_URL = JAWBONE_AUTHORIZATION_URL
ACCESS_TOKEN_URL = JAWBONE_ACCESS_TOKEN_URL
SERVER_URL = JAWBONE_SERVER
AUTH_BACKEND = JawboneBackend
SETTINGS_KEY_NAME = 'JAWBONE_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'JAWBONE_CONSUMER_SECRET'
SCOPE_SEPARATOR = ' '
# Look at http://developer.github.com/v3/oauth/
SCOPE_VAR_NAME = 'JAWBONE_EXTENDED_PERMISSIONS'
REDIRECT_STATE = False
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = JAWBONE_CHECK_AUTH
headers = {'Authorization': 'Bearer ' + access_token}
request = Request(url, headers=headers)
try:
return simplejson.load(urlopen(request))
except ValueError:
return None
def process_error(self, data):
error = self.request.GET.get('error', '')
if error:
if error == 'access_denied':
raise AuthCanceled(self)
else:
raise AuthUnknownError(self, 'Jawbone error was %s' % error)
return super(JawboneAuth, self).process_error(data)
# Backend definition
BACKENDS = {
'jawbone': JawboneAuth,
}
```
#### File: backends/contrib/rdio.py
```python
import urllib
from oauth2 import Request as OAuthRequest, SignatureMethod_HMAC_SHA1
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend, BaseOAuth2
from social_auth.utils import dsa_urlopen
class RdioBaseBackend(OAuthBackend):
def get_user_id(self, details, response):
return response['key']
def get_user_details(self, response):
return {
'username': response['username'],
'first_name': response['firstName'],
'last_name': response['lastName'],
'fullname': response['displayName'],
}
class RdioOAuth1Backend(RdioBaseBackend):
"""Rdio OAuth authentication backend"""
name = 'rdio-oauth1'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
]
@classmethod
def tokens(cls, instance):
token = super(RdioOAuth1Backend, cls).tokens(instance)
if token and 'access_token' in token:
token = dict(tok.split('=')
for tok in token['access_token'].split('&'))
return token
class RdioOAuth2Backend(RdioBaseBackend):
name = 'rdio-oauth2'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
('refresh_token', 'refresh_token', True),
('token_type', 'token_type', True),
]
class RdioOAuth1(ConsumerBasedOAuth):
AUTH_BACKEND = RdioOAuth1Backend
REQUEST_TOKEN_URL = 'http://api.rdio.com/oauth/request_token'
AUTHORIZATION_URL = 'https://www.rdio.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.rdio.com/oauth/access_token'
RDIO_API_BASE = 'http://api.rdio.com/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH1_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH1_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
}
request = self.oauth_post_request(access_token, self.RDIO_API_BASE,
params=params)
response = dsa_urlopen(request.url, request.to_postdata())
json = '\n'.join(response.readlines())
try:
return simplejson.loads(json)['result']
except ValueError:
return None
def oauth_post_request(self, token, url, params):
"""Generate OAuth request, setups callback url"""
if 'oauth_verifier' in self.data:
params['oauth_verifier'] = self.data['oauth_verifier']
request = OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=url,
parameters=params,
http_method='POST')
request.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, token)
return request
class RdioOAuth2(BaseOAuth2):
AUTH_BACKEND = RdioOAuth2Backend
AUTHORIZATION_URL = 'https://www.rdio.com/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://www.rdio.com/oauth2/token'
RDIO_API_BASE = 'https://www.rdio.com/api/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH2_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH2_SECRET'
SCOPE_VAR_NAME = 'RDIO2_PERMISSIONS'
EXTRA_PARAMS_VAR_NAME = 'RDIO2_EXTRA_PARAMS'
def user_data(self, access_token, *args, **kwargs):
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
'access_token': access_token,
}
response = dsa_urlopen(self.RDIO_API_BASE, urllib.urlencode(params))
try:
return simplejson.load(response)['result']
except ValueError:
return None
# Backend definition
BACKENDS = {
'rdio-oauth1': RdioOAuth1,
'rdio-oauth2': RdioOAuth2
}
```
#### File: backends/contrib/yahoo.py
```python
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend
from social_auth.exceptions import AuthUnknownError
# Google OAuth base configuration
YAHOO_OAUTH_SERVER = 'api.login.yahoo.com'
REQUEST_TOKEN_URL = 'https://api.login.yahoo.com/oauth/v2/get_request_token'
AUTHORIZATION_URL = 'https://api.login.yahoo.com/oauth/v2/request_auth'
ACCESS_TOKEN_URL = 'https://api.login.yahoo.com/oauth/v2/get_token'
class YahooOAuthBackend(OAuthBackend):
"""Yahoo OAuth authentication backend"""
name = 'yahoo-oauth'
EXTRA_DATA = [
('guid', 'id'),
('access_token', 'access_token'),
('expires', 'expires')
]
def get_user_id(self, details, response):
return response['guid']
def get_user_details(self, response):
"""Return user details from Yahoo Profile"""
fname = response.get('givenName')
lname = response.get('familyName')
if 'emails' in response:
email = response.get('emails')[0]['handle']
else:
email = ''
return {'username': response.get('nickname'),
'email': email,
'fullname': '%s %s' % (fname, lname),
'first_name': fname,
'last_name': lname}
class YahooOAuth(ConsumerBasedOAuth):
AUTHORIZATION_URL = AUTHORIZATION_URL
REQUEST_TOKEN_URL = REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = ACCESS_TOKEN_URL
AUTH_BACKEND = YahooOAuthBackend
SETTINGS_KEY_NAME = 'YAHOO_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'YAHOO_CONSUMER_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
guid = self._get_guid(access_token)
url = 'http://social.yahooapis.com/v1/user/%s/profile?format=json' \
% guid
request = self.oauth_request(access_token, url)
response = self.fetch_response(request)
try:
return simplejson.loads(response)['profile']
except ValueError:
raise AuthUnknownError('Error during profile retrieval, '
'please, try again later')
def _get_guid(self, access_token):
"""
Beause you have to provide GUID for every API request
it's also returned during one of OAuth calls
"""
url = 'http://social.yahooapis.com/v1/me/guid?format=json'
request = self.oauth_request(access_token, url)
response = self.fetch_response(request)
try:
json = simplejson.loads(response)
return json['guid']['value']
except ValueError:
raise AuthUnknownError('Error during user id retrieval, '
'please, try again later')
# Backend definition
BACKENDS = {
'yahoo-oauth': YahooOAuth
}
```
|
{
"source": "jdavidagudelo/django-userena-ce",
"score": 3
}
|
#### File: umessages/tests/test_forms.py
```python
from django.contrib.auth import get_user_model
from django.test import TestCase
from userena.contrib.umessages.forms import ComposeForm
class ComposeFormTests(TestCase):
""" Test the compose form. """
fixtures = ["users"]
def test_invalid_data(self):
"""
Test the save method of :class:`ComposeForm`
We don't need to make the ``to`` field sweat because we have done that
in the ``fields`` test.
"""
invalid_data_dicts = [
# No body
{
"data": {"to": "john", "body": ""},
"error": ("body", ["This field is required."]),
}
]
for invalid_dict in invalid_data_dicts:
form = ComposeForm(data=invalid_dict["data"])
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors[invalid_dict["error"][0]], invalid_dict["error"][1]
)
def test_save_msg(self):
""" Test valid data """
valid_data = {"to": "<NAME>", "body": "Body"}
form = ComposeForm(data=valid_data)
self.assertTrue(form.is_valid())
# Save the form.
sender = get_user_model().objects.get(username="jane")
msg = form.save(sender)
# Check if the values are set correctly
self.assertEqual(msg.body, valid_data["body"])
self.assertEqual(msg.sender, sender)
self.assertTrue(msg.sent_at)
# Check recipients
self.assertEqual(msg.recipients.all()[0].username, "jane")
self.assertEqual(msg.recipients.all()[1].username, "john")
```
#### File: django-userena-ce/userena/forms.py
```python
import random
from collections import OrderedDict
from hashlib import sha1
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from userena import settings as userena_settings
from userena.models import UserenaSignup
from userena.utils import get_profile_model
attrs_dict = {"class": "required"}
USERNAME_RE = r"^[\.\w]+$"
class SignupForm(forms.Form):
"""
Form for creating a new user account.
Validates that the requested username and e-mail is not already in use.
Also requires the password to be entered twice.
"""
username = forms.RegexField(
regex=USERNAME_RE,
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={
"invalid": _(
"Username must contain only letters, numbers, dots and underscores."
)
},
)
email = forms.EmailField(
widget=forms.TextInput(attrs=dict(attrs_dict, maxlength=75)), label=_("Email")
)
password1 = forms.CharField(
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Create password"),
)
password2 = forms.CharField(
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Repeat password"),
)
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(
username__iexact=self.cleaned_data["username"]
)
except get_user_model().DoesNotExist:
pass
else:
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(
user__username__iexact=self.cleaned_data["username"]
).exclude(
activation_key=userena_settings.USERENA_ACTIVATED
):
raise forms.ValidationError(
_(
"This username is already taken but not confirmed. Please check your email for verification steps."
)
)
raise forms.ValidationError(_("This username is already taken."))
if (
self.cleaned_data["username"].lower()
in userena_settings.USERENA_FORBIDDEN_USERNAMES
):
raise forms.ValidationError(_("This username is not allowed."))
return self.cleaned_data["username"]
def clean_email(self):
""" Validate that the e-mail address is unique. """
if get_user_model().objects.filter(email__iexact=self.cleaned_data["email"]):
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(
user__email__iexact=self.cleaned_data["email"]
).exclude(
activation_key=userena_settings.USERENA_ACTIVATED
):
raise forms.ValidationError(
_(
"This email is already in use but not confirmed. Please check your email for verification steps."
)
)
raise forms.ValidationError(
_("This email is already in use. Please supply a different email.")
)
return self.cleaned_data["email"]
def clean(self):
"""
Validates that the values entered into the two password fields match.
Note that an error here will end up in ``non_field_errors()`` because
it doesn't apply to a single field.
"""
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
def save(self):
""" Creates a new user and account. Returns the newly created user. """
username, email, password = (
self.cleaned_data["username"],
self.cleaned_data["email"],
self.cleaned_data["password1"],
)
new_user = UserenaSignup.objects.create_user(
username,
email,
password,
not userena_settings.USERENA_ACTIVATION_REQUIRED,
userena_settings.USERENA_ACTIVATION_REQUIRED,
)
return new_user
class SignupFormOnlyEmail(SignupForm):
"""
Form for creating a new user account but not needing a username.
This form is an adaptation of :class:`SignupForm`. It's used when
``USERENA_WITHOUT_USERNAME`` setting is set to ``True``. And thus the user
is not asked to supply an username, but one is generated for them. The user
can than keep sign in by using their email.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self.fields["username"]
def save(self):
""" Generate a random username before falling back to parent signup form """
while True:
username = sha1(str(random.random()).encode("utf-8")).hexdigest()[:5]
try:
get_user_model().objects.get(username__iexact=username)
except get_user_model().DoesNotExist:
break
self.cleaned_data["username"] = username
return super().save()
class SignupFormTos(SignupForm):
""" Add a Terms of Service button to the ``SignupForm``. """
tos = forms.BooleanField(
widget=forms.CheckboxInput(attrs=attrs_dict),
label=_("I have read and agree to the Terms of Service"),
error_messages={"required": _("You must agree to the terms to register.")},
)
def identification_field_factory(label, error_required):
"""
A simple identification field factory which enable you to set the label.
:param label:
String containing the label for this field.
:param error_required:
String containing the error message if the field is left empty.
"""
return forms.CharField(
label=label,
widget=forms.TextInput(attrs=attrs_dict),
max_length=75,
error_messages={"required": error_required},
)
class AuthenticationForm(forms.Form):
"""
A custom form where the identification can be a e-mail address or username.
"""
identification = identification_field_factory(
_("Email or username"), _("Either supply us with your email or username.")
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
)
remember_me = forms.BooleanField(
widget=forms.CheckboxInput(),
required=False,
label=_("Remember me for %(days)s")
% {"days": _(userena_settings.USERENA_REMEMBER_ME_DAYS[0])},
)
def __init__(self, *args, **kwargs):
""" A custom init because we need to change the label if no usernames is used """
super().__init__(*args, **kwargs)
# Dirty hack, somehow the label doesn't get translated without declaring
# it again here.
self.fields["remember_me"].label = _("Remember me for %(days)s") % {
"days": _(userena_settings.USERENA_REMEMBER_ME_DAYS[0])
}
if userena_settings.USERENA_WITHOUT_USERNAMES:
self.fields["identification"] = identification_field_factory(
_("Email"), _("Please supply your email.")
)
def clean(self):
"""
Checks for the identification and password.
If the combination can't be found will raise an invalid sign in error.
"""
identification = self.cleaned_data.get("identification")
password = self.cleaned_data.get("password")
if identification and password:
user = authenticate(identification=identification, password=password)
if user is None:
raise forms.ValidationError(
_(
"Please enter a correct username or email and password. Note that both fields are case-sensitive."
)
)
return self.cleaned_data
class ChangeEmailForm(forms.Form):
email = forms.EmailField(
widget=forms.TextInput(attrs=dict(attrs_dict, maxlength=75)),
label=_("New email"),
)
def __init__(self, user, *args, **kwargs):
"""
The current ``user`` is needed for initialisation of this form so
that we can check if the email address is still free and not always
returning ``True`` for this query because it's the users own e-mail
address.
"""
super().__init__(*args, **kwargs)
if not isinstance(user, get_user_model()):
raise TypeError(
"user must be an instance of %s" % get_user_model().__name__
)
else:
self.user = user
def clean_email(self):
""" Validate that the email is not already registered with another user """
if self.cleaned_data["email"].lower() == self.user.email:
raise forms.ValidationError(_("You're already known under this email."))
if (
get_user_model()
.objects.filter(email__iexact=self.cleaned_data["email"])
.exclude(email__iexact=self.user.email)
):
raise forms.ValidationError(
_("This email is already in use. Please supply a different email.")
)
return self.cleaned_data["email"]
def save(self):
"""
Save method calls :func:`user.change_email()` method which sends out an
email with an verification key to verify and with it enable this new
email address.
"""
return self.user.userena_signup.change_email(self.cleaned_data["email"])
class EditProfileForm(forms.ModelForm):
""" Base form used for fields that are always required """
first_name = forms.CharField(label=_("First name"), max_length=30, required=False)
last_name = forms.CharField(label=_("Last name"), max_length=30, required=False)
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
# Put the first and last name at the top
new_order = [
("first_name", self.fields["first_name"]),
("last_name", self.fields["last_name"]),
]
new_order.extend(list(self.fields.items())[:-2])
self.fields = OrderedDict(new_order)
class Meta:
model = get_profile_model()
exclude = ["user"]
def save(self, force_insert=False, force_update=False, commit=True):
profile = super().save(commit=commit)
# Save first and last name
user = profile.user
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
user.save()
return profile
class ActivationForm(forms.Form):
"""Form for activating an account."""
pass
```
#### File: django-userena-ce/userena/mail.py
```python
import re
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.translation import gettext as _
from django.core.mail import EmailMultiAlternatives
from userena import settings as userena_settings
from html2text import html2text
def send_mail(
subject,
message_plain,
message_html,
email_from,
email_to,
custom_headers={},
attachments=(),
):
"""
Build the email as a multipart message containing
a multipart alternative for text (plain, HTML) plus
all the attached files.
"""
if not message_plain and not message_html:
raise ValueError(_("Either message_plain or message_html should be not None"))
if not message_plain:
message_plain = html2text(message_html)
message = {}
message["subject"] = subject
message["body"] = message_plain
message["from_email"] = email_from
message["to"] = email_to
if attachments:
message["attachments"] = attachments
if custom_headers:
message["headers"] = custom_headers
msg = EmailMultiAlternatives(**message)
if message_html:
msg.attach_alternative(message_html, "text/html")
msg.send()
def wrap_attachment():
pass
class UserenaConfirmationMail(object):
_message_txt = "userena/emails/{0}_email_message{1}.txt"
_message_html = "userena/emails/{0}_email_message{1}.html"
_subject_txt = "userena/emails/{0}_email_subject{1}.txt"
def __init__(self, context):
self.context = context
def generate_mail(self, type_mail, version=""):
self.type_mail = type_mail
self.message_txt = self._message_txt.format(type_mail, version)
self.message_html = self._message_html.format(type_mail, version)
self.subject_txt = self._subject_txt.format(type_mail, version)
self.subject = self._subject()
self.message_html = self._message_in_html()
self.message = self._message_in_txt()
def send_mail(self, email):
send_mail(
self.subject,
self.message,
self.message_html,
settings.DEFAULT_FROM_EMAIL,
[email],
)
def _message_in_html(self):
if userena_settings.USERENA_HTML_EMAIL:
return render_to_string(self.message_html, self.context)
return None
def _message_in_txt(self):
if (
not userena_settings.USERENA_HTML_EMAIL
or not self.message_html
or userena_settings.USERENA_USE_PLAIN_TEMPLATE
):
return render_to_string(self.message_txt, self.context)
return None
def _subject(self):
subject = render_to_string(self.subject_txt, self.context)
subject = "".join(subject.splitlines())
return subject
```
#### File: userena/tests/test_migrations.py
```python
import pytest
from django.core.management import call_command
@pytest.mark.django_db
def test_make_migration(capsys):
""" Ensure that migrations do not need to be made """
call_command("makemigrations")
out, err = capsys.readouterr()
assert out == "No changes detected\n"
assert err == ""
```
#### File: userena/tests/tests_middleware.py
```python
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.test import TestCase
from userena.tests.profiles.models import Profile
from userena.middleware import UserenaLocaleMiddleware
from userena import settings as userena_settings
from userena.utils import get_user_profile, get_profile_model
User = get_user_model()
def has_profile(user):
"""Test utility function to check if user has profile"""
profile_model = get_profile_model()
try:
profile = user.get_profile()
except AttributeError:
related_name = profile_model._meta.get_field("user").related_query_name()
profile = getattr(user, related_name, None)
except profile_model.DoesNotExist:
profile = None
return bool(profile)
class UserenaLocaleMiddlewareTests(TestCase):
""" Test the ``UserenaLocaleMiddleware`` """
fixtures = ["users", "profiles"]
def _get_request_with_user(self, user):
""" Fake a request with an user """
request = HttpRequest()
request.META = {"SERVER_NAME": "testserver", "SERVER_PORT": 80}
request.method = "GET"
request.session = {}
# Add user
request.user = user
return request
def test_preference_user(self):
""" Test the language preference of two users """
users = ((1, "nl"), (2, "en"))
for pk, lang in users:
user = User.objects.get(pk=pk)
profile = get_user_profile(user=user)
req = self._get_request_with_user(user)
# Check that the user has this preference
self.assertEqual(profile.language, lang)
# Request should have a ``LANGUAGE_CODE`` with dutch
UserenaLocaleMiddleware().process_request(req)
self.assertEqual(req.LANGUAGE_CODE, lang)
def test_without_profile(self):
""" Middleware should do nothing when a user has no profile """
# Delete the profile
Profile.objects.get(pk=1).delete()
user = User.objects.get(pk=1)
# User shouldn't have a profile
self.assertFalse(has_profile(user))
req = self._get_request_with_user(user)
UserenaLocaleMiddleware().process_request(req)
self.assertFalse(hasattr(req, "LANGUAGE_CODE"))
def test_without_language_field(self):
""" Middleware should do nothing if the profile has no language field """
userena_settings.USERENA_LANGUAGE_FIELD = "non_existant_language_field"
user = User.objects.get(pk=1)
req = self._get_request_with_user(user)
# Middleware should do nothing
UserenaLocaleMiddleware().process_request(req)
self.assertFalse(hasattr(req, "LANGUAGE_CODE"))
```
|
{
"source": "jdavidagudelo/tensorflow-models",
"score": 2
}
|
#### File: official/benchmark/benchmark_uploader.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from google.cloud import bigquery
from google.cloud import exceptions
import tensorflow as tf
class BigQueryUploader(object):
"""Upload the benchmark and metric info from JSON input to BigQuery. """
def __init__(self, gcp_project=None, credentials=None):
"""Initialized BigQueryUploader with proper setting.
Args:
gcp_project: string, the name of the GCP project that the log will be
uploaded to. The default project name will be detected from local
environment if no value is provided.
credentials: google.auth.credentials. The credential to access the
BigQuery service. The default service account credential will be
detected from local environment if no value is provided. Please use
google.oauth2.service_account.Credentials to load credential from local
file for the case that the test is run out side of GCP.
"""
self._bq_client = bigquery.Client(
project=gcp_project, credentials=credentials)
def upload_benchmark_run_json(
self, dataset_name, table_name, run_id, run_json):
"""Upload benchmark run information to Bigquery.
Args:
dataset_name: string, the name of bigquery dataset where the data will be
uploaded.
table_name: string, the name of bigquery table under the dataset where
the data will be uploaded.
run_id: string, a unique ID that will be attached to the data, usually
this is a UUID4 format.
run_json: dict, the JSON data that contains the benchmark run info.
"""
run_json["model_id"] = run_id
self._upload_json(dataset_name, table_name, [run_json])
def upload_benchmark_metric_json(
self, dataset_name, table_name, run_id, metric_json_list):
"""Upload metric information to Bigquery.
Args:
dataset_name: string, the name of bigquery dataset where the data will be
uploaded.
table_name: string, the name of bigquery table under the dataset where
the metric data will be uploaded. This is different from the
benchmark_run table.
run_id: string, a unique ID that will be attached to the data, usually
this is a UUID4 format. This should be the same as the benchmark run_id.
metric_json_list: list, a list of JSON object that record the metric info.
"""
for m in metric_json_list:
m["run_id"] = run_id
self._upload_json(dataset_name, table_name, metric_json_list)
def upload_benchmark_run_file(
self, dataset_name, table_name, run_id, run_json_file):
"""Upload benchmark run information to Bigquery from input json file.
Args:
dataset_name: string, the name of bigquery dataset where the data will be
uploaded.
table_name: string, the name of bigquery table under the dataset where
the data will be uploaded.
run_id: string, a unique ID that will be attached to the data, usually
this is a UUID4 format.
run_json_file: string, the file path that contains the run JSON data.
"""
with tf.gfile.GFile(run_json_file) as f:
benchmark_json = json.load(f)
self.upload_benchmark_run_json(
dataset_name, table_name, run_id, benchmark_json)
def upload_metric_file(
self, dataset_name, table_name, run_id, metric_json_file):
"""Upload metric information to Bigquery from input json file.
Args:
dataset_name: string, the name of bigquery dataset where the data will be
uploaded.
table_name: string, the name of bigquery table under the dataset where
the metric data will be uploaded. This is different from the
benchmark_run table.
run_id: string, a unique ID that will be attached to the data, usually
this is a UUID4 format. This should be the same as the benchmark run_id.
metric_json_file: string, the file path that contains the metric JSON
data.
"""
with tf.gfile.GFile(metric_json_file) as f:
metrics = []
for line in f:
metrics.append(json.loads(line.strip()))
self.upload_benchmark_metric_json(
dataset_name, table_name, run_id, metrics)
def _upload_json(self, dataset_name, table_name, json_list):
# Find the unique table reference based on dataset and table name, so that
# the data can be inserted to it.
table_ref = self._bq_client.dataset(dataset_name).table(table_name)
errors = self._bq_client.insert_rows_json(table_ref, json_list)
if errors:
tf.logging.error(
"Failed to upload benchmark info to bigquery: {}".format(errors))
def insert_run_status(self, dataset_name, table_name, run_id, run_status):
"""Insert the run status in to Bigquery run status table."""
query = ("INSERT {ds}.{tb} "
"(run_id, status) "
"VALUES('{rid}', '{status}')").format(
ds=dataset_name, tb=table_name, rid=run_id, status=run_status)
try:
self._bq_client.query(query=query).result()
except exceptions.GoogleCloudError as e:
tf.logging.error("Failed to insert run status: %s", e)
def update_run_status(self, dataset_name, table_name, run_id, run_status):
"""Update the run status in in Bigquery run status table."""
query = ("UPDATE {ds}.{tb} "
"SET status = '{status}' "
"WHERE run_id = '{rid}'").format(
ds=dataset_name, tb=table_name, status=run_status, rid=run_id)
try:
self._bq_client.query(query=query).result()
except exceptions.GoogleCloudError as e:
tf.logging.error("Failed to update run status: %s", e)
```
#### File: adversarial_logit_pairing/datasets/dataset_factory.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import imagenet_input
from . import tiny_imagenet_input
def get_dataset(dataset_name, split, batch_size, image_size, is_training):
"""Returns dataset.
Args:
dataset_name: name of the dataset, "imagenet" or "tiny_imagenet".
split: name of the split, "train" or "validation".
batch_size: size of the minibatch.
image_size: size of the one side of the image. Output images will be
resized to square shape image_size*image_size.
is_training: if True then training preprocessing is done, otherwise eval
preprocessing is done.
Raises:
ValueError: if dataset_name is invalid.
Returns:
dataset: instance of tf.data.Dataset with the dataset.
num_examples: number of examples in given split of the dataset.
num_classes: number of classes in the dataset.
bounds: tuple with bounds of image values. All returned image pixels
are between bounds[0] and bounds[1].
"""
if dataset_name == 'tiny_imagenet':
dataset = tiny_imagenet_input.tiny_imagenet_input(
split, batch_size, image_size, is_training)
num_examples = tiny_imagenet_input.num_examples_per_epoch(split)
num_classes = 200
bounds = (-1, 1)
elif dataset_name == 'imagenet':
dataset = imagenet_input.imagenet_input(
split, batch_size, image_size, is_training)
num_examples = imagenet_input.num_examples_per_epoch(split)
num_classes = 1001
bounds = (-1, 1)
else:
raise ValueError('Invalid dataset %s' % dataset_name)
return dataset, num_examples, num_classes, bounds
```
#### File: astronet/util/configdict_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from research.astronet.astronet.util import configdict
class ConfigDictTest(absltest.TestCase):
def setUp(self):
super(ConfigDictTest, self).setUp()
self._config = configdict.ConfigDict({
"int": 1,
"float": 2.0,
"bool": True,
"str": "hello",
"nested": {
"int": 3,
},
"double_nested": {
"a": {
"int": 3,
},
"b": {
"float": 4.0,
}
}
})
def testAccess(self):
# Simple types.
self.assertEqual(1, self._config.int)
self.assertEqual(1, self._config["int"])
self.assertEqual(2.0, self._config.float)
self.assertEqual(2.0, self._config["float"])
self.assertTrue(self._config.bool)
self.assertTrue(self._config["bool"])
self.assertEqual("hello", self._config.str)
self.assertEqual("hello", self._config["str"])
# Single nested config.
self.assertEqual(3, self._config.nested.int)
self.assertEqual(3, self._config["nested"].int)
self.assertEqual(3, self._config.nested["int"])
self.assertEqual(3, self._config["nested"]["int"])
# Double nested config.
self.assertEqual(3, self._config["double_nested"].a.int)
self.assertEqual(3, self._config["double_nested"]["a"].int)
self.assertEqual(3, self._config["double_nested"].a["int"])
self.assertEqual(3, self._config["double_nested"]["a"]["int"])
self.assertEqual(4.0, self._config.double_nested.b.float)
self.assertEqual(4.0, self._config.double_nested["b"].float)
self.assertEqual(4.0, self._config.double_nested.b["float"])
self.assertEqual(4.0, self._config.double_nested["b"]["float"])
# Nonexistent parameters.
with self.assertRaises(AttributeError):
_ = self._config.nonexistent
with self.assertRaises(KeyError):
_ = self._config["nonexistent"]
def testSetAttribut(self):
# Overwrite existing simple type.
self._config.int = 40
self.assertEqual(40, self._config.int)
# Overwrite existing nested simple type.
self._config.nested.int = 40
self.assertEqual(40, self._config.nested.int)
# Overwrite existing nested config.
self._config.double_nested.a = {"float": 50.0}
self.assertIsInstance(self._config.double_nested.a, configdict.ConfigDict)
self.assertEqual(50.0, self._config.double_nested.a.float)
self.assertNotIn("int", self._config.double_nested.a)
# Set new simple type.
self._config.int_2 = 10
self.assertEqual(10, self._config.int_2)
# Set new nested simple type.
self._config.nested.int_2 = 20
self.assertEqual(20, self._config.nested.int_2)
# Set new nested config.
self._config.double_nested.c = {"int": 30}
self.assertIsInstance(self._config.double_nested.c, configdict.ConfigDict)
self.assertEqual(30, self._config.double_nested.c.int)
def testSetItem(self):
# Overwrite existing simple type.
self._config["int"] = 40
self.assertEqual(40, self._config.int)
# Overwrite existing nested simple type.
self._config["nested"].int = 40
self.assertEqual(40, self._config.nested.int)
self._config.nested["int"] = 50
self.assertEqual(50, self._config.nested.int)
# Overwrite existing nested config.
self._config.double_nested["a"] = {"float": 50.0}
self.assertIsInstance(self._config.double_nested.a, configdict.ConfigDict)
self.assertEqual(50.0, self._config.double_nested.a.float)
self.assertNotIn("int", self._config.double_nested.a)
# Set new simple type.
self._config["int_2"] = 10
self.assertEqual(10, self._config.int_2)
# Set new nested simple type.
self._config.nested["int_2"] = 20
self.assertEqual(20, self._config.nested.int_2)
self._config.nested["int_3"] = 30
self.assertEqual(30, self._config.nested.int_3)
# Set new nested config.
self._config.double_nested["c"] = {"int": 30}
self.assertIsInstance(self._config.double_nested.c, configdict.ConfigDict)
self.assertEqual(30, self._config.double_nested.c.int)
def testDelete(self):
# Simple types.
self.assertEqual(1, self._config.int)
del self._config.int
with self.assertRaises(AttributeError):
_ = self._config.int
with self.assertRaises(KeyError):
_ = self._config["int"]
self.assertEqual(2.0, self._config["float"])
del self._config["float"]
with self.assertRaises(AttributeError):
_ = self._config.float
with self.assertRaises(KeyError):
_ = self._config["float"]
# Nested config.
self.assertEqual(3, self._config.nested.int)
del self._config.nested
with self.assertRaises(AttributeError):
_ = self._config.nested
with self.assertRaises(KeyError):
_ = self._config["nested"]
if __name__ == "__main__":
absltest.main()
```
#### File: astronet/util/config_util_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from research.astronet.astronet.util import config_util
class ConfigUtilTest(tf.test.TestCase):
def testUnflatten(self):
# Empty dict.
self.assertDictEqual(config_util.unflatten({}), {})
# Already flat dict.
self.assertDictEqual(
config_util.unflatten({
"a": 1,
"b": 2
}), {
"a": 1,
"b": 2
})
# Nested dict.
self.assertDictEqual(
config_util.unflatten({
"a": 1,
"b.c": 2,
"b.d.e": 3,
"b.d.f": 4,
}), {
"a": 1,
"b": {
"c": 2,
"d": {
"e": 3,
"f": 4,
}
}
})
if __name__ == "__main__":
tf.test.main()
```
#### File: brain_coder/common/schedules.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Schedule functions for controlling hparams over time."""
from abc import ABCMeta
from abc import abstractmethod
import math
from research.brain_coder.common import config_lib # brain coder
class Schedule(object):
"""Schedule is a function which sets a hyperparameter's value over time.
For example, a schedule can be used to decay an hparams, or oscillate it over
time.
This object is constructed with an instance of config_lib.Config (will be
specific to each class implementation). For example if this is a decay
schedule, the config may specify the rate of decay and decay start time. Then
the object instance is called like a function, mapping global step (an integer
counting how many calls to the train op have been made) to the hparam value.
Properties of a schedule function f(t):
0) Domain of t is the non-negative integers (t may be 0).
1) Range of f is the reals.
2) Schedule functions can assume that they will be called in time order. This
allows schedules to be stateful.
3) Schedule functions should be deterministic. Two schedule instances with the
same config must always give the same value for each t, and regardless of
what t's it was previously called on. Users may call f(t) on arbitrary
(positive) time jumps. Essentially, multiple schedule instances used in
replica training will behave the same.
4) Duplicate successive calls on the same time are allowed.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, config):
"""Construct this schedule with a config specific to each class impl.
Args:
config: An instance of config_lib.Config.
"""
pass
@abstractmethod
def __call__(self, global_step):
"""Map `global_step` to a value.
`global_step` is an integer counting how many calls to the train op have
been made across all replicas (hence why it is global). Implementations
may assume calls to be made in time order, i.e. `global_step` now >=
previous `global_step` values.
Args:
global_step: Non-negative integer.
Returns:
Hparam value at this step. A number.
"""
pass
class ConstSchedule(Schedule):
"""Constant function.
config:
const: Constant value at every step.
f(t) = const.
"""
def __init__(self, config):
super(ConstSchedule, self).__init__(config)
self.const = config.const
def __call__(self, global_step):
return self.const
class LinearDecaySchedule(Schedule):
"""Linear decay function.
config:
initial: Decay starts from this value.
final: Decay ends at this value.
start_time: Step when decay starts. Constant before it.
end_time: When decay ends. Constant after it.
f(t) is a linear function when start_time <= t <= end_time, with slope of
(final - initial) / (end_time - start_time). f(t) = initial
when t <= start_time. f(t) = final when t >= end_time.
If start_time == end_time, this becomes a step function.
"""
def __init__(self, config):
super(LinearDecaySchedule, self).__init__(config)
self.initial = config.initial
self.final = config.final
self.start_time = config.start_time
self.end_time = config.end_time
if self.end_time < self.start_time:
raise ValueError('start_time must be before end_time.')
# Linear interpolation.
self._time_diff = float(self.end_time - self.start_time)
self._diff = float(self.final - self.initial)
self._slope = (
self._diff / self._time_diff if self._time_diff > 0 else float('inf'))
def __call__(self, global_step):
if global_step <= self.start_time:
return self.initial
if global_step > self.end_time:
return self.final
return self.initial + (global_step - self.start_time) * self._slope
class ExponentialDecaySchedule(Schedule):
"""Exponential decay function.
See https://en.wikipedia.org/wiki/Exponential_decay.
Use this decay function to decay over orders of magnitude. For example, to
decay learning rate from 1e-2 to 1e-6. Exponential decay will decay the
exponent linearly.
config:
initial: Decay starts from this value.
final: Decay ends at this value.
start_time: Step when decay starts. Constant before it.
end_time: When decay ends. Constant after it.
f(t) is an exponential decay function when start_time <= t <= end_time. The
decay rate and amplitude are chosen so that f(t) = initial when
t = start_time, and f(t) = final when t = end_time. f(t) is constant for
t < start_time or t > end_time. initial and final must be positive values.
If start_time == end_time, this becomes a step function.
"""
def __init__(self, config):
super(ExponentialDecaySchedule, self).__init__(config)
self.initial = config.initial
self.final = config.final
self.start_time = config.start_time
self.end_time = config.end_time
if self.initial <= 0 or self.final <= 0:
raise ValueError('initial and final must be positive numbers.')
# Linear interpolation in log space.
self._linear_fn = LinearDecaySchedule(
config_lib.Config(
initial=math.log(self.initial),
final=math.log(self.final),
start_time=self.start_time,
end_time=self.end_time))
def __call__(self, global_step):
return math.exp(self._linear_fn(global_step))
class SmootherstepDecaySchedule(Schedule):
"""Smootherstep decay function.
A sigmoidal like transition from initial to final values. A smoother
transition than linear and exponential decays, hence the name.
See https://en.wikipedia.org/wiki/Smoothstep.
config:
initial: Decay starts from this value.
final: Decay ends at this value.
start_time: Step when decay starts. Constant before it.
end_time: When decay ends. Constant after it.
f(t) is fully defined here:
https://en.wikipedia.org/wiki/Smoothstep#Variations.
f(t) is smooth, as in its first-derivative exists everywhere.
"""
def __init__(self, config):
super(SmootherstepDecaySchedule, self).__init__(config)
self.initial = config.initial
self.final = config.final
self.start_time = config.start_time
self.end_time = config.end_time
if self.end_time < self.start_time:
raise ValueError('start_time must be before end_time.')
self._time_diff = float(self.end_time - self.start_time)
self._diff = float(self.final - self.initial)
def __call__(self, global_step):
if global_step <= self.start_time:
return self.initial
if global_step > self.end_time:
return self.final
x = (global_step - self.start_time) / self._time_diff
# Smootherstep
return self.initial + x * x * x * (x * (x * 6 - 15) + 10) * self._diff
class HardOscillatorSchedule(Schedule):
"""Hard oscillator function.
config:
high: Max value of the oscillator. Value at constant plateaus.
low: Min value of the oscillator. Value at constant valleys.
start_time: Global step when oscillation starts. Constant before this.
period: Width of one oscillation, i.e. number of steps over which the
oscillation takes place.
transition_fraction: Fraction of the period spent transitioning between high
and low values. 50% of this time is spent rising, and 50% of this time
is spent falling. 50% of the remaining time is spent constant at the
high value, and 50% of the remaining time is spent constant at the low
value. transition_fraction = 1.0 means the entire period is spent
rising and falling. transition_fraction = 0.0 means no time is spent
rising and falling, i.e. the function jumps instantaneously between
high and low.
f(t) = high when t < start_time.
f(t) is periodic when t >= start_time, with f(t + period) = f(t).
f(t) is linear with positive slope when rising, and negative slope when
falling. At the start of the period t0, f(t0) = high and begins to descend.
At the middle of the period f is low and is constant until the ascension
begins. f then rises from low to high and is constant again until the period
repeats.
Note: when transition_fraction is 0, f starts the period low and ends high.
"""
def __init__(self, config):
super(HardOscillatorSchedule, self).__init__(config)
self.high = config.high
self.low = config.low
self.start_time = config.start_time
self.period = float(config.period)
self.transition_fraction = config.transition_fraction
self.half_transition_fraction = config.transition_fraction / 2.0
if self.transition_fraction < 0 or self.transition_fraction > 1.0:
raise ValueError('transition_fraction must be between 0 and 1.0')
if self.period <= 0:
raise ValueError('period must be positive')
self._slope = (
float(self.high - self.low) / self.half_transition_fraction
if self.half_transition_fraction > 0 else float('inf'))
def __call__(self, global_step):
if global_step < self.start_time:
return self.high
period_pos = ((global_step - self.start_time) / self.period) % 1.0
if period_pos >= 0.5:
# ascending
period_pos -= 0.5
if period_pos < self.half_transition_fraction:
return self.low + period_pos * self._slope
else:
return self.high
else:
# descending
if period_pos < self.half_transition_fraction:
return self.high - period_pos * self._slope
else:
return self.low
_NAME_TO_CONFIG = {
'const': ConstSchedule,
'linear_decay': LinearDecaySchedule,
'exp_decay': ExponentialDecaySchedule,
'smooth_decay': SmootherstepDecaySchedule,
'hard_osc': HardOscillatorSchedule,
}
def make_schedule(config):
"""Schedule factory.
Given `config` containing a `fn` property, a Schedule implementation is
instantiated with `config`. See `_NAME_TO_CONFIG` for `fn` options.
Args:
config: Config with a `fn` option that specifies which Schedule
implementation to use. `config` is passed into the constructor.
Returns:
A Schedule impl instance.
"""
schedule_class = _NAME_TO_CONFIG[config.fn]
return schedule_class(config)
```
#### File: brain_coder/single_task/run.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
r"""Run training.
Choose training algorithm and task(s) and follow these examples.
Run synchronous policy gradient training locally:
CONFIG="agent=c(algorithm='pg'),env=c(task='reverse')"
OUT_DIR="/tmp/bf_pg_local"
rm -rf $OUT_DIR
bazel run -c opt single_task:run -- \
--alsologtostderr \
--config="$CONFIG" \
--max_npe=0 \
--logdir="$OUT_DIR" \
--summary_interval=1 \
--model_v=0
learning/brain/tensorboard/tensorboard.sh --port 12345 --logdir "$OUT_DIR"
Run genetic algorithm locally:
CONFIG="agent=c(algorithm='ga'),env=c(task='reverse')"
OUT_DIR="/tmp/bf_ga_local"
rm -rf $OUT_DIR
bazel run -c opt single_task:run -- \
--alsologtostderr \
--config="$CONFIG" \
--max_npe=0 \
--logdir="$OUT_DIR"
Run uniform random search locally:
CONFIG="agent=c(algorithm='rand'),env=c(task='reverse')"
OUT_DIR="/tmp/bf_rand_local"
rm -rf $OUT_DIR
bazel run -c opt single_task:run -- \
--alsologtostderr \
--config="$CONFIG" \
--max_npe=0 \
--logdir="$OUT_DIR"
"""
from absl import app
from absl import flags
from absl import logging
from ..single_task import defaults # brain coder
from ..single_task import ga_train # brain coder
from ..single_task import pg_train # brain coder
FLAGS = flags.FLAGS
flags.DEFINE_string('config', '', 'Configuration.')
flags.DEFINE_string(
'logdir', None, 'Absolute path where to write results.')
flags.DEFINE_integer('task_id', 0, 'ID for this worker.')
flags.DEFINE_integer('num_workers', 1, 'How many workers there are.')
flags.DEFINE_integer(
'max_npe', 0,
'NPE = number of programs executed. Maximum number of programs to execute '
'in each run. Training will complete when this threshold is reached. Set '
'to 0 for unlimited training.')
flags.DEFINE_integer(
'num_repetitions', 1,
'Number of times the same experiment will be run (globally across all '
'workers). Each run is independent.')
flags.DEFINE_string(
'log_level', 'INFO',
'The threshold for what messages will be logged. One of DEBUG, INFO, WARN, '
'ERROR, or FATAL.')
# To register an algorithm:
# 1) Add dependency in the BUILD file to this build rule.
# 2) Import the algorithm's module at the top of this file.
# 3) Add a new entry in the following dict. The key is the algorithm name
# (used to select the algorithm in the config). The value is the module
# defining the expected functions for training and tuning. See the docstring
# for `get_namespace` for further details.
ALGORITHM_REGISTRATION = {
'pg': pg_train,
'ga': ga_train,
'rand': ga_train,
}
def get_namespace(config_string):
"""Get namespace for the selected algorithm.
Users who want to add additional algorithm types should modify this function.
The algorithm's namespace should contain the following functions:
run_training: Run the main training loop.
define_tuner_hparam_space: Return the hparam tuning space for the algo.
write_hparams_to_config: Helper for tuning. Write hparams chosen for tuning
to the Config object.
Look at pg_train.py and ga_train.py for function signatures and
implementations.
Args:
config_string: String representation of a Config object. This will get
parsed into a Config in order to determine what algorithm to use.
Returns:
algorithm_namespace: The module corresponding to the algorithm given in the
config.
config: The Config object resulting from parsing `config_string`.
Raises:
ValueError: If config.agent.algorithm is not one of the registered
algorithms.
"""
config = defaults.default_config_with_updates(config_string)
if config.agent.algorithm not in ALGORITHM_REGISTRATION:
raise ValueError('Unknown algorithm type "%s"' % (config.agent.algorithm,))
else:
return ALGORITHM_REGISTRATION[config.agent.algorithm], config
def main(argv):
del argv # Unused.
logging.set_verbosity('error')
flags.mark_flag_as_required('logdir')
if __name__ == '__main__':
app.run(main)
```
#### File: cognitive_mapping_and_planning/datasets/factory.py
```python
r"""Wrapper for selecting the navigation environment that we want to train and
test on.
"""
import os
import glob
import logging
from research.cognitive_mapping_and_planning.render import swiftshader_renderer as renderer
from research.cognitive_mapping_and_planning.src import file_utils as fu
from research.cognitive_mapping_and_planning.src import utils as utils
def get_dataset(dataset_name):
dataset = None
if dataset_name == 'sbpd':
dataset = StanfordBuildingParserDataset(dataset_name)
else:
logging.fatal('Not one of sbpd')
return dataset
class Loader():
def get_data_dir(self):
pass
def get_meta_data(self, file_name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
full_file_name = os.path.join(data_dir, 'meta', file_name)
assert (fu.exists(full_file_name)), \
'{:s} does not exist'.format(full_file_name)
ext = os.path.splitext(full_file_name)[1]
ls = None
if ext == '.txt':
ls = []
with fu.fopen(full_file_name, 'r') as f:
for l in f:
ls.append(l.rstrip())
elif ext == '.pkl':
ls = utils.load_variables(full_file_name)
return ls
def load_building(self, name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
out = {'name': name, 'data_dir': data_dir,
'room_dimension_file': os.path.join(data_dir, 'room-dimension',
name + '.pkl'),
'class_map_folder': os.path.join(data_dir, 'class-maps')}
return out
def load_building_meshes(self, building):
dir_name = os.path.join(building['data_dir'], 'mesh', building['name'])
mesh_file_name = glob.glob1(dir_name, '*.obj')[0]
mesh_file_name_full = os.path.join(dir_name, mesh_file_name)
logging.error('Loading building from obj file: %s', mesh_file_name_full)
shape = renderer.Shape(mesh_file_name_full, load_materials=True,
name_prefix=building['name'] + '_')
return [shape]
class StanfordBuildingParserDataset(Loader):
def __init__(self, ver):
self.ver = ver
self.data_dir = None
def get_data_dir(self):
if self.data_dir is None:
self.data_dir = 'data/stanford_building_parser_dataset/'
return self.data_dir
def get_benchmark_sets(self):
return self._get_benchmark_sets()
def get_split(self, split_name):
if self.ver == 'sbpd':
return self._get_split(split_name)
else:
logging.fatal('Unknown version.')
@staticmethod
def _get_benchmark_sets():
sets = ['train1', 'val', 'test']
return sets
@staticmethod
def _get_split(split_name):
train = ['area1', 'area5a', 'area5b', 'area6']
train1 = ['area1']
val = ['area3']
test = ['area4']
sets = {'train': train, 'train1': train1, 'val': val, 'test': test,
'all': sorted(list(set(train + val + test)))}
return sets[split_name]
```
#### File: entropy_coder/lib/blocks_masked_conv2d.py
```python
import numpy as np
from six.moves import xrange
import tensorflow as tf
from research.compression.entropy_coder.lib import block_util
from research.compression.entropy_coder.lib import blocks_std
# pylint does not recognize block_base.BlockBase.__call__().
# pylint: disable=not-callable
class RasterScanConv2D(blocks_std.Conv2DBase):
"""Conv2D with no dependency on future pixels (in raster scan order).
For example, assuming a 5 x 5 kernel, the kernel is applied a spatial mask:
T T T T T
T T T T T
T T x F F
F F F F F
F F F F F
where 'T' are pixels which are available when computing the convolution
for pixel 'x'. All the pixels marked with 'F' are not available.
'x' itself is not available if strict_order is True, otherwise, it is
available.
"""
def __init__(self, depth, filter_size, strides, padding,
strict_order=True,
bias=None, act=None, initializer=None, name=None):
super(RasterScanConv2D, self).__init__(
depth, filter_size, strides, padding, bias, act, name=name)
if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1:
raise ValueError('Kernel size should be odd.')
with self._BlockScope():
if initializer is None:
initializer = block_util.RsqrtInitializer(dims=(0, 1, 2))
self._initializer = initializer
self._strict_order = strict_order
def _CreateKernel(self, shape, dtype):
init = self._initializer(shape, dtype)
kernel = self.NewVar(init)
mask = np.ones(shape[:2], dtype=dtype.as_numpy_dtype)
center = shape[:2] // 2
mask[center[0] + 1:, :] = 0
if not self._strict_order:
mask[center[0], center[1] + 1:] = 0
else:
mask[center[0], center[1]:] = 0
mask = mask.reshape(mask.shape + (1, 1))
return tf.convert_to_tensor(mask, dtype) * kernel
class DepthOrderConv2D(blocks_std.Conv2DBase):
"""Conv2D with no dependency on higher depth dimensions.
More precisely, the output depth #n has only dependencies on input depths #k
for k < n (if strict_order is True) or for k <= n (if strict_order is False).
"""
def __init__(self, depth, filter_size, strides, padding,
strict_order=True,
bias=None, act=None, initializer=None, name=None):
super(DepthOrderConv2D, self).__init__(
depth, filter_size, strides, padding, bias, act, name=name)
with self._BlockScope():
if initializer is None:
initializer = block_util.RsqrtInitializer(dims=(0, 1, 2))
self._initializer = initializer
self._strict_order = strict_order
def _CreateKernel(self, shape, dtype):
init = self._initializer(shape, dtype)
kernel = self.NewVar(init)
mask = np.ones(shape[2:], dtype=dtype.as_numpy_dtype)
depth_output = shape[3]
for d in xrange(depth_output):
if self._strict_order:
mask[d:, d] = 0
else:
mask[d + 1:, d] = 0
mask = mask.reshape((1, 1) + mask.shape)
return tf.convert_to_tensor(mask, dtype) * kernel
class GroupRasterScanConv2D(blocks_std.Conv2DBase):
"""Conv2D with no dependency on future pixels (in raster scan order).
This version only introduces dependencies on previous pixels in raster scan
order. It can also introduce some dependencies on previous depth positions
of the current pixel (current pixel = center pixel of the kernel) in the
following way:
the depth dimension of the input is split into Ki groups of size
|input_group_size|, the output dimension is split into Ko groups of size
|output_group_size| (usually Ki == Ko). Each output group ko of the current
pixel position can only depend on previous input groups ki
(i.e. ki < ko if strict_order is True or ki <= ko if strict_order is False).
Notes:
- Block RasterScanConv2D is a special case of GroupRasterScanConv2D
where Ki == Ko == 1 (i.e. input_group_size == input_depth and
output_group_size == output_depth).
- For 1x1 convolution, block DepthOrderConv2D is a special case of
GroupRasterScanConv2D where input_group_size == 1 and
output_group_size == 1.
"""
def __init__(self, depth, filter_size, strides, padding,
strict_order=True,
input_group_size=1,
output_group_size=1,
bias=None, act=None, initializer=None, name=None):
super(GroupRasterScanConv2D, self).__init__(
depth, filter_size, strides, padding, bias, act, name=name)
if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1:
raise ValueError('Kernel size should be odd.')
with self._BlockScope():
if initializer is None:
initializer = block_util.RsqrtInitializer(dims=(0, 1, 2))
self._initializer = initializer
self._input_group_size = input_group_size
self._output_group_size = output_group_size
self._strict_order = strict_order
if depth % self._output_group_size != 0:
raise ValueError(
'Invalid depth group size: {} for depth {}'.format(
self._output_group_size, depth))
self._output_group_count = depth // self._output_group_size
def _CreateKernel(self, shape, dtype):
init = self._initializer(shape, dtype)
kernel = self.NewVar(init)
depth_input = shape[2]
if depth_input % self._input_group_size != 0:
raise ValueError(
'Invalid depth group size: {} for depth {}'.format(
self._input_group_size, depth_input))
input_group_count = depth_input // self._input_group_size
output_group_count = self._output_group_count
# Set the mask to 0 for future pixels in raster scan order.
center = shape[:2] // 2
mask = np.ones([shape[0], shape[1],
input_group_count, self._input_group_size,
output_group_count, self._output_group_size],
dtype=dtype.as_numpy_dtype)
mask[center[0] + 1:, :, :, :, :, :] = 0
mask[center[0], center[1] + 1:, :, :, :, :] = 0
# Adjust the mask for the current position (the center position).
depth_output = shape[3]
for d in xrange(output_group_count):
mask[center[0], center[1], d + 1:, :, d:d + 1, :] = 0
if self._strict_order:
mask[center[0], center[1], d, :, d:d + 1, :] = 0
mask = mask.reshape([shape[0], shape[1], depth_input, depth_output])
return tf.convert_to_tensor(mask, dtype) * kernel
class InFillingConv2D(blocks_std.Conv2DBase):
"""Conv2D with kernel having no dependency on the current pixel.
For example, assuming a 5 x 5 kernel, the kernel is applied a spatial mask:
T T T T T
T T T T T
T T x T T
T T T T T
T T T T T
where 'T' marks a pixel which is available when computing the convolution
for pixel 'x'. 'x' itself is not available.
"""
def __init__(self, depth, filter_size, strides, padding,
bias=None, act=None, initializer=None, name=None):
super(InFillingConv2D, self).__init__(
depth, filter_size, strides, padding, bias, act, name=name)
if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1:
raise ValueError('Kernel size should be odd.')
if filter_size[0] == 1 and filter_size[1] == 1:
raise ValueError('Kernel size should be larger than 1x1.')
with self._BlockScope():
if initializer is None:
initializer = block_util.RsqrtInitializer(dims=(0, 1, 2))
self._initializer = initializer
def _CreateKernel(self, shape, dtype):
init = self._initializer(shape, dtype)
kernel = self.NewVar(init)
mask = np.ones(shape[:2], dtype=dtype.as_numpy_dtype)
center = shape[:2] // 2
mask[center[0], center[1]] = 0
mask = mask.reshape(mask.shape + (1, 1))
return tf.convert_to_tensor(mask, dtype) * kernel
```
#### File: entropy_coder/lib/blocks_std_test.py
```python
from __future__ import division
from __future__ import unicode_literals
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from research.compression.entropy_coder.lib import blocks_std
def _NumpyConv2D(x, f, strides, padding, rate=1):
assert strides[0] == 1 and strides[3] == 1, strides
if rate > 1:
f_shape = f.shape
expand_f = np.zeros([f_shape[0], ((f_shape[1] - 1) * rate + 1),
f_shape[2], f_shape[3]])
expand_f[:, [y * rate for y in range(f_shape[1])], :, :] = f
f = np.zeros([((f_shape[0] - 1) * rate + 1), expand_f.shape[1],
f_shape[2], f_shape[3]])
f[[y * rate for y in range(f_shape[0])], :, :, :] = expand_f
if padding != 'VALID':
assert x.shape[1] > 0 and x.shape[2] > 0, x.shape
# Compute the number of padded rows and cols.
# See Conv2D block comments for a math explanation.
remainder = ((x.shape[1] - 1) % strides[1], (x.shape[2] - 1) % strides[2])
pad_rows = f.shape[0] - remainder[0] - 1
pad_cols = f.shape[1] - remainder[1] - 1
pad = ((0, 0),
(pad_rows // 2, (pad_rows + 1) // 2),
(pad_cols // 2, (pad_cols + 1) // 2),
(0, 0))
# Pad the input using numpy.pad().
mode = None
if padding == 'SAME':
mode = str('constant')
if padding == 'REFLECT':
mode = str('reflect')
if padding == 'SYMMETRIC':
mode = str('symmetric')
x = np.pad(x, pad, mode=mode)
# Since x is now properly padded, proceed as if padding mode is VALID.
x_window = np.empty(
(x.shape[0],
int(math.ceil((x.shape[1] - f.shape[0] + 1) / strides[1])),
int(math.ceil((x.shape[2] - f.shape[1] + 1) / strides[2])),
np.prod(f.shape[:3])))
# The output at pixel location (i, j) is the result of linear transformation
# applied to the window whose top-left corner is at
# (i * row_stride, j * col_stride).
for i in xrange(x_window.shape[1]):
k = i * strides[1]
for j in xrange(x_window.shape[2]):
l = j * strides[2]
x_window[:, i, j, :] = x[:,
k:(k + f.shape[0]),
l:(l + f.shape[1]),
:].reshape((x_window.shape[0], -1))
y = np.tensordot(x_window, f.reshape((-1, f.shape[3])), axes=1)
return y
class BlocksStdTest(tf.test.TestCase):
def CheckUnary(self, y, op_type):
self.assertEqual(op_type, y.op.type)
self.assertEqual(1, len(y.op.inputs))
return y.op.inputs[0]
def CheckBinary(self, y, op_type):
self.assertEqual(op_type, y.op.type)
self.assertEqual(2, len(y.op.inputs))
return y.op.inputs
def testPassThrough(self):
p = blocks_std.PassThrough()
x = tf.placeholder(dtype=tf.float32, shape=[1])
self.assertIs(p(x), x)
def CheckBiasAdd(self, y, b):
x, u = self.CheckBinary(y, 'BiasAdd')
self.assertIs(u, b._bias.value())
self.assertEqual(x.dtype, u.dtype.base_dtype)
return x
def testBiasAdd(self):
b = blocks_std.BiasAdd()
x = tf.placeholder(dtype=tf.float32, shape=[4, 8])
y = b(x)
self.assertEqual(b._bias.get_shape(), x.get_shape()[-1:])
self.assertIs(x, self.CheckBiasAdd(y, b))
def testBiasRankTest(self):
b = blocks_std.BiasAdd()
x = tf.placeholder(dtype=tf.float32, shape=[10])
with self.assertRaises(ValueError):
b(x)
def CheckLinear(self, y, m):
x, w = self.CheckBinary(y, 'MatMul')
self.assertIs(w, m._matrix.value())
self.assertEqual(x.dtype, w.dtype.base_dtype)
return x
def testLinear(self):
m = blocks_std.Linear(10)
x = tf.placeholder(dtype=tf.float32, shape=[8, 9])
y = m(x)
self.assertEqual(m._matrix.get_shape(), [9, 10])
self.assertIs(x, self.CheckLinear(y, m))
def testLinearShared(self):
# Create a linear map which is applied twice on different inputs
# (i.e. the weights of the map are shared).
linear_map = blocks_std.Linear(6)
x1 = tf.random_normal(shape=[1, 5])
x2 = tf.random_normal(shape=[1, 5])
xs = x1 + x2
# Apply the transform with the same weights.
y1 = linear_map(x1)
y2 = linear_map(x2)
ys = linear_map(xs)
with self.test_session() as sess:
# Initialize all the variables of the graph.
tf.global_variables_initializer().run()
y1_res, y2_res, ys_res = sess.run([y1, y2, ys])
self.assertAllClose(y1_res + y2_res, ys_res)
def CheckNN(self, y, nn, act=None):
if act:
pre_act = self.CheckUnary(y, act)
else:
pre_act = y
if not isinstance(nn._bias, blocks_std.PassThrough):
pre_bias = self.CheckBiasAdd(pre_act, nn._bias)
else:
pre_bias = pre_act
if len(nn._matrices) > 1:
self.assertEqual('AddN', pre_bias.op.type)
pre_bias = pre_bias.op.inputs
else:
pre_bias = [pre_bias]
self.assertEqual(len(pre_bias), len(nn._matrices))
return [self.CheckLinear(u, m) for u, m in zip(pre_bias, nn._matrices)]
def testNNWithoutActWithoutBias(self):
nn = blocks_std.NN(10, act=None, bias=None)
x = tf.placeholder(dtype=tf.float32, shape=[5, 7])
y = nn(x)
self.assertIs(x, self.CheckNN(y, nn)[0])
def testNNWithoutBiasWithAct(self):
nn = blocks_std.NN(10, act=tf.nn.relu, bias=None)
x = tf.placeholder(dtype=tf.float32, shape=[5, 7])
y = nn(x)
self.assertIs(x, self.CheckNN(y, nn, 'Relu')[0])
def testNNWithBiasWithoutAct(self):
nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=None)
x = tf.placeholder(dtype=tf.float32, shape=[5, 7])
y = nn(x)
self.assertIs(x, self.CheckNN(y, nn)[0])
def testNNWithBiasWithAct(self):
nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.square)
x = tf.placeholder(dtype=tf.float32, shape=[5, 7])
y = nn(x)
self.assertIs(x, self.CheckNN(y, nn, 'Square')[0])
def testNNMultipleInputs(self):
nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.tanh)
x = [tf.placeholder(dtype=tf.float32, shape=[5, 7]),
tf.placeholder(dtype=tf.float32, shape=[5, 3]),
tf.placeholder(dtype=tf.float32, shape=[5, 5])]
y = nn(*x)
xs = self.CheckNN(y, nn, 'Tanh')
self.assertEqual(len(x), len(xs))
for u, v in zip(x, xs):
self.assertIs(u, v)
def testConv2DSAME(self):
np.random.seed(142536)
x_shape = [4, 16, 11, 5]
f_shape = [4, 3, 5, 6]
strides = [1, 2, 2, 1]
padding = 'SAME'
conv = blocks_std.Conv2D(depth=f_shape[-1],
filter_size=f_shape[0:2],
strides=strides[1:3],
padding=padding,
act=None,
bias=None)
x_value = np.random.normal(size=x_shape)
x = tf.convert_to_tensor(x_value, dtype=tf.float32)
y = conv(x)
with self.test_session():
tf.global_variables_initializer().run()
f_value = conv._kernel.eval()
y_value = y.eval()
y_expected = _NumpyConv2D(x_value, f_value,
strides=strides, padding=padding)
self.assertAllClose(y_expected, y_value)
def testConv2DValid(self):
np.random.seed(253647)
x_shape = [4, 11, 12, 5]
f_shape = [5, 2, 5, 5]
strides = [1, 2, 2, 1]
padding = 'VALID'
conv = blocks_std.Conv2D(depth=f_shape[-1],
filter_size=f_shape[0:2],
strides=strides[1:3],
padding=padding,
act=None,
bias=None)
x_value = np.random.normal(size=x_shape)
x = tf.convert_to_tensor(x_value, dtype=tf.float32)
y = conv(x)
with self.test_session():
tf.global_variables_initializer().run()
f_value = conv._kernel.eval()
y_value = y.eval()
y_expected = _NumpyConv2D(x_value, f_value,
strides=strides, padding=padding)
self.assertAllClose(y_expected, y_value)
def testConv2DSymmetric(self):
np.random.seed(364758)
x_shape = [4, 10, 12, 6]
f_shape = [3, 4, 6, 5]
strides = [1, 1, 1, 1]
padding = 'SYMMETRIC'
conv = blocks_std.Conv2D(depth=f_shape[-1],
filter_size=f_shape[0:2],
strides=strides[1:3],
padding=padding,
act=None,
bias=None)
x_value = np.random.normal(size=x_shape)
x = tf.convert_to_tensor(x_value, dtype=tf.float32)
y = conv(x)
with self.test_session():
tf.global_variables_initializer().run()
f_value = conv._kernel.eval()
y_value = y.eval()
y_expected = _NumpyConv2D(x_value, f_value,
strides=strides, padding=padding)
self.assertAllClose(y_expected, y_value)
def testConv2DReflect(self):
np.random.seed(768798)
x_shape = [4, 10, 12, 6]
f_shape = [3, 4, 6, 5]
strides = [1, 2, 2, 1]
padding = 'REFLECT'
conv = blocks_std.Conv2D(depth=f_shape[-1],
filter_size=f_shape[0:2],
strides=strides[1:3],
padding=padding,
act=None,
bias=None)
x_value = np.random.normal(size=x_shape)
x = tf.convert_to_tensor(x_value, dtype=tf.float32)
y = conv(x)
with self.test_session():
tf.global_variables_initializer().run()
f_value = conv._kernel.eval()
y_value = y.eval()
y_expected = _NumpyConv2D(x_value, f_value,
strides=strides, padding=padding)
self.assertAllClose(y_expected, y_value)
def testConv2DBias(self):
input_shape = [19, 14, 14, 64]
filter_shape = [3, 7, 64, 128]
strides = [1, 2, 2, 1]
output_shape = [19, 6, 4, 128]
conv = blocks_std.Conv2D(depth=filter_shape[-1],
filter_size=filter_shape[0:2],
strides=strides[1:3],
padding='VALID',
act=None,
bias=blocks_std.Bias(1))
x = tf.placeholder(dtype=tf.float32, shape=input_shape)
y = conv(x)
self.CheckBiasAdd(y, conv._bias)
self.assertEqual(output_shape, y.get_shape().as_list())
if __name__ == '__main__':
tf.test.main()
```
#### File: cvt_text/model/shared_inputs.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Inputs(object):
def __init__(self, config):
self._config = config
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.label_smoothing = tf.placeholder(tf.float32, name='label_smoothing')
self.lengths = tf.placeholder(tf.int32, shape=[None], name='lengths')
self.mask = tf.placeholder(tf.float32, [None, None], name='mask')
self.words = tf.placeholder(tf.int32, shape=[None, None], name='words')
self.chars = tf.placeholder(tf.int32, shape=[None, None, None],
name='chars')
def create_feed_dict(self, mb, is_training):
cvt = mb.task_name == 'unlabeled'
return {
self.keep_prob: 1.0 if not is_training else
(self._config.unlabeled_keep_prob if cvt else
self._config.labeled_keep_prob),
self.label_smoothing: self._config.label_smoothing
if (is_training and not cvt) else 0.0,
self.lengths: mb.lengths,
self.words: mb.words,
self.chars: mb.chars,
self.mask: mb.mask.astype('float32')
}
```
#### File: cvt_text/training/trainer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import time
import numpy as np
import tensorflow as tf
from research.cvt_text.base import utils
from research.cvt_text.model import multitask_model
from research.cvt_text.task_specific import task_definitions
class Trainer(object):
def __init__(self, config):
self._config = config
self.tasks = [task_definitions.get_task(self._config, task_name)
for task_name in self._config.task_names]
utils.log('Loading Pretrained Embeddings')
pretrained_embeddings = utils.load_cpickle(self._config.word_embeddings)
utils.log('Building Model')
self._model = multitask_model.Model(
self._config, pretrained_embeddings, self.tasks)
utils.log()
def train(self, sess, progress, summary_writer):
heading = lambda s: utils.heading(s, '(' + self._config.model_name + ')')
trained_on_sentences = 0
start_time = time.time()
unsupervised_loss_total, unsupervised_loss_count = 0, 0
supervised_loss_total, supervised_loss_count = 0, 0
for mb in self._get_training_mbs(progress.unlabeled_data_reader):
if mb.task_name != 'unlabeled':
loss = self._model.train_labeled(sess, mb)
supervised_loss_total += loss
supervised_loss_count += 1
if mb.task_name == 'unlabeled':
self._model.run_teacher(sess, mb)
loss = self._model.train_unlabeled(sess, mb)
unsupervised_loss_total += loss
unsupervised_loss_count += 1
mb.teacher_predictions.clear()
trained_on_sentences += mb.size
global_step = self._model.get_global_step(sess)
if global_step % self._config.print_every == 0:
utils.log('step {:} - '
'supervised loss: {:.2f} - '
'unsupervised loss: {:.2f} - '
'{:.1f} sentences per second'.format(
global_step,
supervised_loss_total / max(1, supervised_loss_count),
unsupervised_loss_total / max(1, unsupervised_loss_count),
trained_on_sentences / (time.time() - start_time)))
unsupervised_loss_total, unsupervised_loss_count = 0, 0
supervised_loss_total, supervised_loss_count = 0, 0
if global_step % self._config.eval_dev_every == 0:
heading('EVAL ON DEV')
self.evaluate_all_tasks(sess, summary_writer, progress.history)
progress.save_if_best_dev_model(sess, global_step)
utils.log()
if global_step % self._config.eval_train_every == 0:
heading('EVAL ON TRAIN')
self.evaluate_all_tasks(sess, summary_writer, progress.history, True)
utils.log()
if global_step % self._config.save_model_every == 0:
heading('CHECKPOINTING MODEL')
progress.write(sess, global_step)
utils.log()
def evaluate_all_tasks(self, sess, summary_writer, history, train_set=False):
for task in self.tasks:
results = self._evaluate_task(sess, task, summary_writer, train_set)
if history is not None:
results.append(('step', self._model.get_global_step(sess)))
history.append(results)
if history is not None:
utils.write_cpickle(history, self._config.history_file)
def _evaluate_task(self, sess, task, summary_writer, train_set):
scorer = task.get_scorer()
data = task.train_set if train_set else task.val_set
for i, mb in enumerate(data.get_minibatches(self._config.test_batch_size)):
loss, batch_preds = self._model.test(sess, mb)
scorer.update(mb.examples, batch_preds, loss)
results = scorer.get_results(task.name +
('_train_' if train_set else '_dev_'))
utils.log(task.name.upper() + ': ' + scorer.results_str())
write_summary(summary_writer, results,
global_step=self._model.get_global_step(sess))
return results
def _get_training_mbs(self, unlabeled_data_reader):
datasets = [task.train_set for task in self.tasks]
weights = [np.sqrt(dataset.size) for dataset in datasets]
thresholds = np.cumsum([w / np.sum(weights) for w in weights])
labeled_mbs = [dataset.endless_minibatches(self._config.train_batch_size)
for dataset in datasets]
unlabeled_mbs = unlabeled_data_reader.endless_minibatches()
while True:
dataset_ind = bisect.bisect(thresholds, np.random.random())
yield next(labeled_mbs[dataset_ind])
if self._config.is_semisup:
yield next(unlabeled_mbs)
def write_summary(writer, results, global_step):
for k, v in results:
if 'f1' in k or 'acc' in k or 'loss' in k:
writer.add_summary(tf.Summary(
value=[tf.Summary.Value(tag=k, simple_value=v)]), global_step)
writer.flush()
```
#### File: fivo/models/vrnn_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from research.fivo.fivo.models import base
from research.fivo.fivo.test_utils import create_vrnn
class VrnnTest(tf.test.TestCase):
def test_vrnn_normal_emission(self):
self.run_vrnn(base.ConditionalNormalDistribution, [-4.509767, -3.242221])
def test_vrnn_bernoulli_emission(self):
self.run_vrnn(base.ConditionalBernoulliDistribution, [-2.63812733, -2.02216434]),
def run_vrnn(self, generative_class, gt_log_p_x_given_z):
"""Tests the VRNN.
All test values are 'golden values' derived by running the code and copying
the output.
Args:
generative_class: The class of the generative distribution to use.
gt_log_p_x_given_z: The ground-truth value of log p(x|z).
"""
tf.set_random_seed(1234)
with self.test_session() as sess:
batch_size = 2
model, inputs, targets, _ = create_vrnn(generative_class=generative_class,
batch_size=batch_size,
data_lengths=(1, 1),
random_seed=1234)
zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32)
model.set_observations([inputs, targets], tf.convert_to_tensor([1, 1]))
model_out = model.propose_and_weight(zero_state, 0)
sess.run(tf.global_variables_initializer())
log_alpha, state = sess.run(model_out)
rnn_state, latent_state, rnn_out = state
self.assertAllClose(
rnn_state.c,
[[-0.15014534, 0.0143046, 0.00160489, -0.12899463],
[-0.25015137, 0.09377634, -0.05000039, -0.17123522]])
self.assertAllClose(
rnn_state.h,
[[-0.06842659, 0.00760155, 0.00096106, -0.05434214],
[-0.1109542, 0.0441804, -0.03121299, -0.07882939]]
)
self.assertAllClose(
latent_state,
[[0.025241, 0.122011, 1.066661, 0.316209, -0.25369, 0.108215,
-1.501128, -0.440111, -0.40447, -0.156649, 1.206028],
[0.066824, 0.519937, 0.610973, 0.977739, -0.121889, -0.223429,
-0.32687, -0.578763, -0.56965, 0.751886, 0.681606]]
)
self.assertAllClose(rnn_out, [[-0.068427, 0.007602, 0.000961, -0.054342],
[-0.110954, 0.04418, -0.031213, -0.078829]])
gt_log_q_z = [-8.0895052, -6.75819111]
gt_log_p_z = [-7.246827, -6.512877]
gt_log_alpha = (np.array(gt_log_p_z) +
np.array(gt_log_p_x_given_z) -
np.array(gt_log_q_z))
self.assertAllClose(log_alpha, gt_log_alpha)
def test_vrnn_with_tilt_normal_emission(self):
self.run_vrnn_with_tilt(base.ConditionalNormalDistribution, [-5.198263, -6.31686])
def test_vrnn_with_tilt_bernoulli_emission(self):
self.run_vrnn_with_tilt(base.ConditionalBernoulliDistribution, [-4.66985, -3.802245])
def run_vrnn_with_tilt(self, generative_class, gt_log_alpha):
"""Tests the VRNN with a tilting function.
All test values are 'golden values' derived by running the code and copying
the output.
Args:
generative_class: The class of the generative distribution to use.
gt_log_alpha: The ground-truth value of log alpha.
"""
tf.set_random_seed(1234)
with self.test_session() as sess:
batch_size = 2
model, inputs, targets, _ = create_vrnn(generative_class=generative_class,
batch_size=batch_size,
data_lengths=(3, 2),
random_seed=1234,
use_tilt=True)
zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32)
model.set_observations([inputs, targets], tf.convert_to_tensor([3, 2]))
model_out = model.propose_and_weight(zero_state, 0)
sess.run(tf.global_variables_initializer())
log_alpha, state = sess.run(model_out)
rnn_state, latent_state, rnn_out = state
self.assertAllClose(
rnn_state.c,
[[-0.15014534, 0.0143046, 0.00160489, -0.12899463],
[-0.25015137, 0.09377634, -0.05000039, -0.17123522]])
self.assertAllClose(
rnn_state.h,
[[-0.06842659, 0.00760155, 0.00096106, -0.05434214],
[-0.1109542, 0.0441804, -0.03121299, -0.07882939]]
)
self.assertAllClose(
latent_state,
[[0.025241, 0.122011, 1.066661, 0.316209, -0.25369, 0.108215,
-1.501128, -0.440111, -0.40447, -0.156649, 1.206028],
[0.066824, 0.519937, 0.610973, 0.977739, -0.121889, -0.223429,
-0.32687, -0.578763, -0.56965, 0.751886, 0.681606]]
)
self.assertAllClose(rnn_out, [[-0.068427, 0.007602, 0.000961, -0.054342],
[-0.110954, 0.04418, -0.031213, -0.078829]])
self.assertAllClose(log_alpha, gt_log_alpha)
if __name__ == "__main__":
tf.test.main()
```
#### File: fivo/fivo/nested_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import tensorflow as tf
from tensorflow.python.util import nest
def map_nested(map_fn, nested):
"""Executes map_fn on every element in a (potentially) nested structure.
Args:
map_fn: A callable to execute on each element in 'nested'.
nested: A potentially nested combination of sequence objects. Sequence
objects include tuples, lists, namedtuples, and all subclasses of
collections.Sequence except strings. See nest.is_sequence for details.
For example [1, ('hello', 4.3)] is a nested structure containing elements
1, 'hello', and 4.3.
Returns:
out_structure: A potentially nested combination of sequence objects with the
same structure as the 'nested' input argument. out_structure
contains the result of applying map_fn to each element in 'nested'. For
example map_nested(lambda x: x+1, [1, (3, 4.3)]) returns [2, (4, 5.3)].
"""
out = map(map_fn, nest.flatten(nested))
return nest.pack_sequence_as(nested, out)
def tile_tensors(tensors, multiples):
"""Tiles a set of Tensors.
Args:
tensors: A potentially nested tuple or list of Tensors with rank
greater than or equal to the length of 'multiples'. The Tensors do not
need to have the same rank, but their rank must not be dynamic.
multiples: A python list of ints indicating how to tile each Tensor
in 'tensors'. Similar to the 'multiples' argument to tf.tile.
Returns:
tiled_tensors: A potentially nested tuple or list of Tensors with the same
structure as the 'tensors' input argument. Contains the result of
applying tf.tile to each Tensor in 'tensors'. When the rank of a Tensor
in 'tensors' is greater than the length of multiples, multiples is padded
at the end with 1s. For example when tiling a 4-dimensional Tensor with
multiples [3, 4], multiples would be padded to [3, 4, 1, 1] before tiling.
"""
def tile_fn(x):
return tf.tile(x, multiples + [1] * (x.shape.ndims - len(multiples)))
return map_nested(tile_fn, tensors)
def where_tensors(condition, x_tensors, y_tensors):
"""Performs a tf.where operation on a two sets of Tensors.
Args:
condition: The condition tensor to use for the where operation.
x_tensors: A potentially nested tuple or list of Tensors.
y_tensors: A potentially nested tuple or list of Tensors. Must have the
same structure as x_tensors.
Returns:
whered_tensors: A potentially nested tuple or list of Tensors with the
same structure as the 'tensors' input argument. Contains the result of
applying tf.where(condition, x, y) on each pair of elements in x_tensors
and y_tensors.
"""
flat_x = nest.flatten(x_tensors)
flat_y = nest.flatten(y_tensors)
result = [tf.where(condition, x, y) for x, y in
itertools.izip(flat_x, flat_y)]
return nest.pack_sequence_as(x_tensors, result)
def gather_tensors(tensors, indices):
"""Performs a tf.gather operation on a set of Tensors.
Args:
tensors: A potentially nested tuple or list of Tensors.
indices: The indices to use for the gather operation.
Returns:
gathered_tensors: A potentially nested tuple or list of Tensors with the
same structure as the 'tensors' input argument. Contains the result of
applying tf.gather(x, indices) on each element x in 'tensors'.
"""
return map_nested(lambda x: tf.gather(x, indices), tensors)
def tas_for_tensors(tensors, length, **kwargs):
"""Unstacks a set of Tensors into TensorArrays.
Args:
tensors: A potentially nested tuple or list of Tensors with length in the
first dimension greater than or equal to the 'length' input argument.
length: The desired length of the TensorArrays.
**kwargs: Keyword args for TensorArray constructor.
Returns:
tensorarrays: A potentially nested tuple or list of TensorArrays with the
same structure as 'tensors'. Contains the result of unstacking each Tensor
in 'tensors'.
"""
def map_fn(x):
ta = tf.TensorArray(x.dtype, length,
name=x.name.split(':')[0] + '_ta', **kwargs)
return ta.unstack(x[:length, :])
return map_nested(map_fn, tensors)
def read_tas(tas, index):
"""Performs a read operation on a set of TensorArrays.
Args:
tas: A potentially nested tuple or list of TensorArrays with length greater
than 'index'.
index: The location to read from.
Returns:
read_tensors: A potentially nested tuple or list of Tensors with the same
structure as the 'tas' input argument. Contains the result of
performing a read operation at 'index' on each TensorArray in 'tas'.
"""
return map_nested(lambda ta: ta.read(index), tas)
```
#### File: fivo/fivo/smc.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import research.fivo.fivo.nested_utils as nested
def ess_criterion(log_weights, unused_t):
"""A criterion that resamples based on effective sample size."""
num_particles = tf.shape(log_weights)[0]
# Calculate the effective sample size.
ess_num = 2 * tf.reduce_logsumexp(log_weights, axis=0)
ess_denom = tf.reduce_logsumexp(2 * log_weights, axis=0)
log_ess = ess_num - ess_denom
return log_ess <= tf.log(tf.to_float(num_particles) / 2.0)
def never_resample_criterion(log_weights, unused_t):
"""A criterion that never resamples."""
batch_size = tf.shape(log_weights)[1]
return tf.cast(tf.zeros([batch_size]), tf.bool)
def always_resample_criterion(log_weights, unused_t):
"""A criterion resamples at every timestep."""
batch_size = tf.shape(log_weights)[1]
return tf.cast(tf.ones([batch_size]), tf.bool)
def multinomial_resampling(log_weights, states, num_particles, batch_size,
random_seed=None):
"""Resample states with multinomial resampling.
Args:
log_weights: A [num_particles, batch_size] Tensor representing a batch
of batch_size logits for num_particles-ary Categorical distribution.
states: A nested list of [batch_size*num_particles, data_size] Tensors that
will be resampled from the groups of every num_particles-th row.
num_particles: The number of particles/samples.
batch_size: The batch size.
random_seed: The random seed to pass to the resampling operations in
the particle filter. Mainly useful for testing.
Returns:
resampled_states: A nested list of [batch_size*num_particles, data_size]
Tensors resampled via multinomial sampling.
"""
# Calculate the ancestor indices via resampling. Because we maintain the
# log unnormalized weights, we pass the weights in as logits, allowing
# the distribution object to apply a softmax and normalize them.
resampling_parameters = tf.transpose(log_weights, perm=[1, 0])
resampling_dist = tf.contrib.distributions.Categorical(
logits=resampling_parameters)
ancestors = tf.stop_gradient(
resampling_dist.sample(sample_shape=num_particles, seed=random_seed))
# Because the batch is flattened, we must modify ancestor_inds to index the
# proper samples. The particles in the ith filter are distributed every
# batch_size rows in the batch, and offset i rows from the top. So, to
# correct the indices we multiply by the batch_size and add the proper offset.
# Crucially, when ancestor_inds is flattened the layout of the batch is
# maintained.
offset = tf.expand_dims(tf.range(batch_size), 0)
ancestor_inds = tf.reshape(ancestors * batch_size + offset, [-1])
resampled_states = nested.gather_tensors(states, ancestor_inds)
return resampled_states
def _blend_tensor(blending_weights, tensor, num_particles, batch_size):
"""Blend tensor according to the weights.
The first dimension of tensor is actually a 2d index compacted to a 1d
index and similarly for blended_tensor. So if we index these Tensors
by [(i, j), k], then
blended_tensor[(i, j), k] =
sum_l tensor[(l, j), :] * blending_weights[i, j, l].
Args:
blending_weights: [num_particles, batch_size, num_particles] weights where
the indices represent [sample index, batch index, blending weight index].
tensor: [num_particles * batch_size, state_dim] Tensor to be blended.
num_particles: The number of particles/samples.
batch_size: The batch size.
Returns:
blended_tensor: [num_particles*batch_size, state_dim] blended Tensor.
"""
# tensor is currently [num_particles * batch_size, state_dim], so we reshape
# it to [num_particles, batch_size, state_dim]. Then, transpose it to
# [batch_size, state_size, num_particles].
tensor = tf.transpose(
tf.reshape(tensor, [num_particles, batch_size, -1]), perm=[1, 2, 0])
blending_weights = tf.transpose(blending_weights, perm=[1, 2, 0])
# blendeding_weights is [batch index, blending weight index, sample index].
# Multiplying these gives a matrix of size [batch_size, state_size,
# num_particles].
tensor = tf.matmul(tensor, blending_weights)
# transpose the tensor to be [num_particles, batch_size, state_size]
# and then reshape it to match the original format.
tensor = tf.reshape(tf.transpose(tensor, perm=[2, 0, 1]),
[num_particles * batch_size, -1])
return tensor
def relaxed_resampling(log_weights, states, num_particles, batch_size,
temperature=0.5, random_seed=None):
"""Resample states with relaxed resampling.
Draw soft "ancestors" using the Gumbel-Softmax distribution.
Args:
log_weights: A [num_particles, batch_size] Tensor representing a batch
of batch_size logits for num_particles-ary Categorical distribution.
states: A nested list of [batch_size * num_particles, d] Tensors that will
be resampled from the groups of every num_particles-th row.
num_particles: The number of particles/samples.
batch_size: The batch size.
temperature: The temperature used for the relaxed one hot distribution.
random_seed: The random seed to pass to the resampling operations in
the particle filter. Mainly useful for testing.
Returns:
resampled_states: A nested list of [batch_size * num_particles, d]
Tensors resampled via multinomial sampling.
"""
# log_weights are [num_particles, batch_size], so we transpose to get a
# set of batch_size distributions over [0, num_particles).
resampling_parameters = tf.transpose(log_weights, perm=[1, 0])
resampling_dist = tf.contrib.distributions.RelaxedOneHotCategorical(
temperature,
logits=resampling_parameters)
# Sample num_particles samples from the distribution, resulting in a
# [num_particles, batch_size, num_particles] Tensor that represents a set of
# [num_particles, batch_size] blending weights. The dimensions represent
# [particle index, batch index, blending weight index].
ancestors = resampling_dist.sample(sample_shape=num_particles,
seed=random_seed)
def map_fn(tensor):
return _blend_tensor(ancestors, tensor, num_particles, batch_size)
resampled_states = nested.map_nested(map_fn, states)
return resampled_states
def smc(
transition_fn,
num_steps,
num_particles=1,
resampling_criterion=ess_criterion,
resampling_fn=multinomial_resampling,
loop_fn=None,
parallel_iterations=30,
swap_memory=True):
"""Run a sequential Monte Carlo (SMC) algorithm.
This method runs an SMC algorithm that evolves systems of particles
using the supplied transition function for the specified number of steps. The
particles are optionally resampled using resampling_fn when indicated by
resampling_criterion.
Args:
transition_fn: A callable that propogates a batch of particles one step.
Must accept as arguments a batch of particle states and the current
timestep. Must return the particle states one timestep in the future, the
incremental weights of each particle as a [num_samples*batch_size] float
Tensor, and optionally a set of arguments to pass to the loop_fn. If
the loop args are not provided, they will be set to None. Before the
first timestep transition_fn will be called with the arguments None, -1
and should return the initial particle states.
num_steps: A [batch_size] Tensor of ints representing the number of steps
to run each filter for.
num_particles: A scalar int, the number of particles to use in each filter.
resampling_criterion: The resampling criterion to use for this particle
filter. Must accept the current log weights and timestep and
return a boolean Tensor of shape [batch_size] indicating whether each
particle filter should resample. See ess_criterion and related functions
for examples. When resampling_criterion is never_resample_criterion,
resampling_fn is ignored and never called.
resampling_fn: A callable that performs the resampling operation. Must
accept as arguments the log weights, particle states, num_particles,
and batch_size and return the resampled particle states. See
multinomial_resampling and relaxed_resampling for examples.
loop_fn: A callable that performs operations on the weights and
particle states, useful for accumulating and processing state that
shouldn't be resampled. At each timestep after (possibly) resampling
loop_fn will be called with the previous loop_state, a set of arguments
produced by transition_fn called loop_args, the resampled particle states,
the current log weights as [num_particles, batch_size] float Tensor, a
[batch_size] float Tensor representing whether or not each filter
resampled, the current mask indicating which filters are active, and the
current timestep. It must return the next loop state. Before the first
timestep loop_fn will be called with the arguments None, None, None, None,
-1 and must return the initial loop state. The loop state can be a
possibly nested structure of Tensors and TensorArrays.
parallel_iterations: The number of parallel iterations to use for the
internal while loop. Note that values greater than 1 can introduce
non-determinism even when resampling is deterministic.
swap_memory: Whether GPU-CPU memory swapping should be enabled for the
internal while loop.
Returns:
log_z_hat: A Tensor of shape [batch_size] containing an estimate of the log
normalizing constant that converts between the unormalized target
distribution (as defined by the weights) and the true target distribution.
log_weights: A Tensor of shape [max_num_steps, batch_size, num_particles]
containing the log weights at each timestep of the particle filter.
Will not be valid for timesteps past the supplied num_steps.
resampled: A float Tensor of shape [max_num_steps, batch_size] indicating
when the particle filters resampled. Will be 1.0 on timesteps when
resampling occurred and 0.0 on timesteps when it did not.
final_loop_state: The final state returned by loop_fn. If loop_fn is None
then 0 will be returned.
"""
# batch_size represents the number of particle filters running in parallel.
batch_size = tf.shape(num_steps)[0]
# Create a TensorArray where element t is the [num_particles*batch_size]
# sequence mask for timestep t.
max_num_steps = tf.reduce_max(num_steps)
seq_mask = tf.transpose(
tf.sequence_mask(num_steps, maxlen=max_num_steps, dtype=tf.float32),
perm=[1, 0])
seq_mask = tf.tile(seq_mask, [1, num_particles])
mask_ta = tf.TensorArray(seq_mask.dtype,
max_num_steps,
name='mask_ta')
mask_ta = mask_ta.unstack(seq_mask)
# Initialize the state.
t0 = tf.constant(0, tf.int32)
init_particle_state = transition_fn(None, -1)
def transition(*args):
transition_outs = transition_fn(*args)
if len(transition_outs) == 2:
return transition_outs + (None,)
else:
return transition_outs
if loop_fn is None:
loop_fn = lambda *args: 0
init_loop_state = loop_fn(None, None, None, None, None, None, -1)
init_states = (init_particle_state, init_loop_state)
ta_names = ['log_weights', 'resampled']
tas = [tf.TensorArray(tf.float32, max_num_steps, name='%s_ta' % n)
for n in ta_names]
log_weights_acc = tf.zeros([num_particles, batch_size], dtype=tf.float32)
log_z_hat_acc = tf.zeros([batch_size], dtype=tf.float32)
def while_predicate(t, *unused_args):
return t < max_num_steps
def while_step(t, state, tas, log_weights_acc, log_z_hat_acc):
"""Implements one timestep of the particle filter."""
particle_state, loop_state = state
cur_mask = nested.read_tas(mask_ta, t)
# Propagate the particles one step.
log_alpha, new_particle_state, loop_args = transition(particle_state, t)
# Update the current weights with the incremental weights.
log_alpha *= cur_mask
log_alpha = tf.reshape(log_alpha, [num_particles, batch_size])
log_weights_acc += log_alpha
should_resample = resampling_criterion(log_weights_acc, t)
if resampling_criterion == never_resample_criterion:
resampled = tf.to_float(should_resample)
else:
# Compute the states as if we did resample.
resampled_states = resampling_fn(
log_weights_acc,
new_particle_state,
num_particles,
batch_size)
# Decide whether or not we should resample; don't resample if we are past
# the end of a sequence.
should_resample = tf.logical_and(should_resample,
cur_mask[:batch_size] > 0.)
float_should_resample = tf.to_float(should_resample)
new_particle_state = nested.where_tensors(
tf.tile(should_resample, [num_particles]),
resampled_states,
new_particle_state)
resampled = float_should_resample
new_loop_state = loop_fn(loop_state, loop_args, new_particle_state,
log_weights_acc, resampled, cur_mask, t)
# Update log Z hat.
log_z_hat_update = tf.reduce_logsumexp(
log_weights_acc, axis=0) - tf.log(tf.to_float(num_particles))
# If it is the last timestep, always add the update.
log_z_hat_acc += tf.cond(t < max_num_steps - 1,
lambda: log_z_hat_update * resampled,
lambda: log_z_hat_update)
# Update the TensorArrays before we reset the weights so that we capture
# the incremental weights and not zeros.
ta_updates = [log_weights_acc, resampled]
new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]
# For the particle filters that resampled, reset weights to zero.
log_weights_acc *= (1. - tf.tile(resampled[tf.newaxis, :],
[num_particles, 1]))
new_state = (new_particle_state, new_loop_state)
return t + 1, new_state, new_tas, log_weights_acc, log_z_hat_acc
_, final_state, tas, _, log_z_hat = tf.while_loop(
while_predicate,
while_step,
loop_vars=(t0, init_states, tas, log_weights_acc, log_z_hat_acc),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
log_weights, resampled = [x.stack() for x in tas]
log_weights = tf.transpose(log_weights, perm=[0, 2, 1])
final_particle_state, final_loop_state = final_state
return (log_z_hat, log_weights, resampled,
final_particle_state, final_loop_state)
```
#### File: gan/cyclegan/data_provider.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def normalize_image(image):
"""Rescale from range [0, 255] to [-1, 1]."""
return (tf.to_float(image) - 127.5) / 127.5
def undo_normalize_image(normalized_image):
"""Convert to a numpy array that can be read by PIL."""
# Convert from NHWC to HWC.
normalized_image = np.squeeze(normalized_image, axis=0)
return np.uint8(normalized_image * 127.5 + 127.5)
def _sample_patch(image, patch_size):
"""Crop image to square shape and resize it to `patch_size`.
Args:
image: A 3D `Tensor` of HWC format.
patch_size: A Python scalar. The output image size.
Returns:
A 3D `Tensor` of HWC format which has the shape of
[patch_size, patch_size, 3].
"""
image_shape = tf.shape(image)
height, width = image_shape[0], image_shape[1]
target_size = tf.minimum(height, width)
image = tf.image.resize_image_with_crop_or_pad(image, target_size,
target_size)
# tf.image.resize_area only accepts 4D tensor, so expand dims first.
image = tf.expand_dims(image, axis=0)
image = tf.image.resize_images(image, [patch_size, patch_size])
image = tf.squeeze(image, axis=0)
# Force image num_channels = 3
image = tf.tile(image, [1, 1, tf.maximum(1, 4 - tf.shape(image)[2])])
image = tf.slice(image, [0, 0, 0], [patch_size, patch_size, 3])
return image
def full_image_to_patch(image, patch_size):
image = normalize_image(image)
# Sample a patch of fixed size.
image_patch = _sample_patch(image, patch_size)
image_patch.shape.assert_is_compatible_with([patch_size, patch_size, 3])
return image_patch
def _provide_custom_dataset(image_file_pattern,
batch_size,
shuffle=True,
num_threads=1,
patch_size=128):
"""Provides batches of custom image data.
Args:
image_file_pattern: A string of glob pattern of image files.
batch_size: The number of images in each batch.
shuffle: Whether to shuffle the read images. Defaults to True.
num_threads: Number of prefetching threads. Defaults to 1.
patch_size: Size of the path to extract from the image. Defaults to 128.
Returns:
A float `Tensor` of shape [batch_size, patch_size, patch_size, 3]
representing a batch of images.
"""
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once(image_file_pattern),
shuffle=shuffle,
capacity=5 * batch_size)
image_reader = tf.WholeFileReader()
_, image_bytes = image_reader.read(filename_queue)
image = tf.image.decode_image(image_bytes)
image_patch = full_image_to_patch(image, patch_size)
if shuffle:
return tf.train.shuffle_batch(
[image_patch],
batch_size=batch_size,
num_threads=num_threads,
capacity=5 * batch_size,
min_after_dequeue=batch_size)
else:
return tf.train.batch(
[image_patch],
batch_size=batch_size,
num_threads=1, # no threads so it's deterministic
capacity=5 * batch_size)
def provide_custom_datasets(image_file_patterns,
batch_size,
shuffle=True,
num_threads=1,
patch_size=128):
"""Provides multiple batches of custom image data.
Args:
image_file_patterns: A list of glob patterns of image files.
batch_size: The number of images in each batch.
shuffle: Whether to shuffle the read images. Defaults to True.
num_threads: Number of prefetching threads. Defaults to 1.
patch_size: Size of the patch to extract from the image. Defaults to 128.
Returns:
A list of float `Tensor`s with the same size of `image_file_patterns`.
Each of the `Tensor` in the list has a shape of
[batch_size, patch_size, patch_size, 3] representing a batch of images.
Raises:
ValueError: If image_file_patterns is not a list or tuple.
"""
if not isinstance(image_file_patterns, (list, tuple)):
raise ValueError(
'`image_file_patterns` should be either list or tuple, but was {}.'.
format(type(image_file_patterns)))
custom_datasets = []
for pattern in image_file_patterns:
custom_datasets.append(
_provide_custom_dataset(
pattern,
batch_size=batch_size,
shuffle=shuffle,
num_threads=num_threads,
patch_size=patch_size))
return custom_datasets
```
#### File: gan/image_compression/eval.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
from research.gan.image_compression import data_provider
from research.gan.image_compression import networks
from research.gan.image_compression import summaries
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '/tmp/compression/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/compression/',
'Directory where the results are saved to.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_string('dataset_dir', 'testdata', 'Location of data.')
# Compression-specific flags.
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')
flags.DEFINE_integer('bits_per_patch', 1230,
'The number of bits to produce per patch.')
flags.DEFINE_integer('model_depth', 64,
'Number of filters for compression model')
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
images = data_provider.provide_data(
'validation', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,
patch_size=FLAGS.patch_size)
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('generator'):
reconstructions, _, prebinary = networks.compression_model(
images,
num_bits=FLAGS.bits_per_patch,
depth=FLAGS.model_depth,
is_training=False)
summaries.add_reconstruction_summaries(images, reconstructions, prebinary)
# Visualize losses.
pixel_loss_per_example = tf.reduce_mean(
tf.abs(images - reconstructions), axis=[1, 2, 3])
pixel_loss = tf.reduce_mean(pixel_loss_per_example)
tf.summary.histogram('pixel_l1_loss_hist', pixel_loss_per_example)
tf.summary.scalar('pixel_l1_loss', pixel_loss)
# Create ops to write images to disk.
uint8_images = data_provider.float_image_to_uint8(images)
uint8_reconstructions = data_provider.float_image_to_uint8(reconstructions)
uint8_reshaped = summaries.stack_images(uint8_images, uint8_reconstructions)
image_write_ops = tf.write_file(
'%s/%s' % (FLAGS.eval_dir, 'compression.png'),
tf.image.encode_png(uint8_reshaped[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
if __name__ == '__main__':
app.run(_)
```
#### File: gan/mnist/train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import flags
from absl import logging
import tensorflow as tf
from research.gan.mnist import data_provider
from research.gan.mnist import networks
from research.gan.mnist import util
tfgan = tf.contrib.gan
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_string('train_log_dir', '/tmp/mnist/',
'Directory where to write event logs.')
flags.DEFINE_string('dataset_dir', None, 'Location of data.')
flags.DEFINE_integer('max_number_of_steps', 20000,
'The maximum number of gradient steps.')
flags.DEFINE_string(
'gan_type', 'unconditional',
'Either `unconditional`, `conditional`, or `infogan`.')
flags.DEFINE_integer(
'grid_size', 5, 'Grid size for image visualization.')
flags.DEFINE_integer(
'noise_dims', 64, 'Dimensions of the generator noise vector.')
FLAGS = flags.FLAGS
def _learning_rate(gan_type):
# First is generator learning rate, second is discriminator learning rate.
return {
'unconditional': (1e-3, 1e-4),
'conditional': (1e-5, 1e-4),
'infogan': (0.001, 9e-5),
}[gan_type]
def main(_):
if not tf.gfile.Exists(FLAGS.train_log_dir):
tf.gfile.MakeDirs(FLAGS.train_log_dir)
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.name_scope('inputs'):
with tf.device('/cpu:0'):
images, one_hot_labels, _ = data_provider.provide_data(
'train', FLAGS.batch_size, FLAGS.dataset_dir, num_threads=4)
# Define the GANModel tuple. Optionally, condition the GAN on the label or
# use an InfoGAN to learn a latent representation.
if FLAGS.gan_type == 'unconditional':
gan_model = tfgan.gan_model(
generator_fn=networks.unconditional_generator,
discriminator_fn=networks.unconditional_discriminator,
real_data=images,
generator_inputs=tf.random_normal(
[FLAGS.batch_size, FLAGS.noise_dims]))
elif FLAGS.gan_type == 'conditional':
noise = tf.random_normal([FLAGS.batch_size, FLAGS.noise_dims])
gan_model = tfgan.gan_model(
generator_fn=networks.conditional_generator,
discriminator_fn=networks.conditional_discriminator,
real_data=images,
generator_inputs=(noise, one_hot_labels))
elif FLAGS.gan_type == 'infogan':
cat_dim, cont_dim = 10, 2
generator_fn = functools.partial(
networks.infogan_generator, categorical_dim=cat_dim)
discriminator_fn = functools.partial(
networks.infogan_discriminator, categorical_dim=cat_dim,
continuous_dim=cont_dim)
unstructured_inputs, structured_inputs = util.get_infogan_noise(
FLAGS.batch_size, cat_dim, cont_dim, FLAGS.noise_dims)
gan_model = tfgan.infogan_model(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
real_data=images,
unstructured_generator_inputs=unstructured_inputs,
structured_generator_inputs=structured_inputs)
tfgan.eval.add_gan_model_image_summaries(gan_model, FLAGS.grid_size)
# Get the GANLoss tuple. You can pass a custom function, use one of the
# already-implemented losses from the losses library, or use the defaults.
with tf.name_scope('loss'):
mutual_information_penalty_weight = (1.0 if FLAGS.gan_type == 'infogan'
else 0.0)
gan_loss = tfgan.gan_loss(
gan_model,
gradient_penalty_weight=1.0,
mutual_information_penalty_weight=mutual_information_penalty_weight,
add_summaries=True)
tfgan.eval.add_regularization_loss_summaries(gan_model)
# Get the GANTrain ops using custom optimizers.
with tf.name_scope('train'):
gen_lr, dis_lr = _learning_rate(FLAGS.gan_type)
train_ops = tfgan.gan_train_ops(
gan_model,
gan_loss,
generator_optimizer=tf.train.AdamOptimizer(gen_lr, 0.5),
discriminator_optimizer=tf.train.AdamOptimizer(dis_lr, 0.5),
summarize_gradients=True,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
# Run the alternating training loop. Skip it if no steps should be taken
# (used for graph construction tests).
status_message = tf.string_join(
['Starting train step: ',
tf.as_string(tf.train.get_or_create_global_step())],
name='status_message')
if FLAGS.max_number_of_steps == 0: return
tfgan.gan_train(
train_ops,
hooks=[tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps),
tf.train.LoggingTensorHook([status_message], every_n_iter=10)],
logdir=FLAGS.train_log_dir,
get_hooks_fn=tfgan.get_joint_train_hooks())
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.app.run()
```
#### File: learned_optimizer/optimizer/coordinatewise_rnn.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
from research.learned_optimizer.optimizer import utils
from research.learned_optimizer.optimizer import trainable_optimizer as opt
# Default was 1e-3
tf.app.flags.DEFINE_float("crnn_rnn_readout_scale", 0.5,
"""The initialization scale for the RNN readouts.""")
tf.app.flags.DEFINE_float("crnn_default_decay_var_init", 2.2,
"""The default initializer value for any decay/
momentum style variables and constants.
sigmoid(2.2) ~ 0.9, sigmoid(-2.2) ~ 0.01.""")
FLAGS = tf.flags.FLAGS
class CoordinatewiseRNN(opt.TrainableOptimizer):
"""RNN that operates on each coordinate of the problem independently."""
def __init__(self,
cell_sizes,
cell_cls,
init_lr_range=(1., 1.),
dynamic_output_scale=True,
learnable_decay=True,
zero_init_lr_weights=False,
**kwargs):
"""Initializes the RNN per-parameter optimizer.
Args:
cell_sizes: List of hidden state sizes for each RNN cell in the network
cell_cls: tf.contrib.rnn class for specifying the RNN cell type
init_lr_range: the range in which to initialize the learning rates.
dynamic_output_scale: whether to learn weights that dynamically modulate
the output scale (default: True)
learnable_decay: whether to learn weights that dynamically modulate the
input scale via RMS style decay (default: True)
zero_init_lr_weights: whether to initialize the lr weights to zero
**kwargs: args passed to TrainableOptimizer's constructor
Raises:
ValueError: If the init lr range is not of length 2.
ValueError: If the init lr range is not a valid range (min > max).
"""
if len(init_lr_range) != 2:
raise ValueError(
"Initial LR range must be len 2, was {}".format(len(init_lr_range)))
if init_lr_range[0] > init_lr_range[1]:
raise ValueError("Initial LR range min is greater than max.")
self.init_lr_range = init_lr_range
self.zero_init_lr_weights = zero_init_lr_weights
self.reuse_vars = False
# create the RNN cell
with tf.variable_scope(opt.OPTIMIZER_SCOPE):
self.component_cells = [cell_cls(sz) for sz in cell_sizes]
self.cell = tf.contrib.rnn.MultiRNNCell(self.component_cells)
# random normal initialization scaled by the output size
scale_factor = FLAGS.crnn_rnn_readout_scale / math.sqrt(cell_sizes[-1])
scaled_init = tf.random_normal_initializer(0., scale_factor)
# weights for projecting the hidden state to a parameter update
self.update_weights = tf.get_variable("update_weights",
shape=(cell_sizes[-1], 1),
initializer=scaled_init)
self._initialize_decay(learnable_decay, (cell_sizes[-1], 1), scaled_init)
self._initialize_lr(dynamic_output_scale, (cell_sizes[-1], 1),
scaled_init)
state_size = sum([sum(state_size) for state_size in self.cell.state_size])
self._init_vector = tf.get_variable(
"init_vector", shape=[1, state_size],
initializer=tf.random_uniform_initializer(-1., 1.))
state_keys = ["rms", "rnn", "learning_rate", "decay"]
super(CoordinatewiseRNN, self).__init__("cRNN", state_keys, **kwargs)
def _initialize_decay(
self, learnable_decay, weights_tensor_shape, scaled_init):
"""Initializes the decay weights and bias variables or tensors.
Args:
learnable_decay: Whether to use learnable decay.
weights_tensor_shape: The shape the weight tensor should take.
scaled_init: The scaled initialization for the weights tensor.
"""
if learnable_decay:
# weights for projecting the hidden state to the RMS decay term
self.decay_weights = tf.get_variable("decay_weights",
shape=weights_tensor_shape,
initializer=scaled_init)
self.decay_bias = tf.get_variable(
"decay_bias", shape=(1,),
initializer=tf.constant_initializer(
FLAGS.crnn_default_decay_var_init))
else:
self.decay_weights = tf.zeros_like(self.update_weights)
self.decay_bias = tf.constant(FLAGS.crnn_default_decay_var_init)
def _initialize_lr(
self, dynamic_output_scale, weights_tensor_shape, scaled_init):
"""Initializes the learning rate weights and bias variables or tensors.
Args:
dynamic_output_scale: Whether to use a dynamic output scale.
weights_tensor_shape: The shape the weight tensor should take.
scaled_init: The scaled initialization for the weights tensor.
"""
if dynamic_output_scale:
zero_init = tf.constant_initializer(0.)
wt_init = zero_init if self.zero_init_lr_weights else scaled_init
self.lr_weights = tf.get_variable("learning_rate_weights",
shape=weights_tensor_shape,
initializer=wt_init)
self.lr_bias = tf.get_variable("learning_rate_bias", shape=(1,),
initializer=zero_init)
else:
self.lr_weights = tf.zeros_like(self.update_weights)
self.lr_bias = tf.zeros([1, 1])
def _initialize_state(self, var):
"""Return a dictionary mapping names of state variables to their values."""
vectorized_shape = [var.get_shape().num_elements(), 1]
min_lr = self.init_lr_range[0]
max_lr = self.init_lr_range[1]
if min_lr == max_lr:
init_lr = tf.constant(min_lr, shape=vectorized_shape)
else:
actual_vals = tf.random_uniform(vectorized_shape,
np.log(min_lr),
np.log(max_lr))
init_lr = tf.exp(actual_vals)
ones = tf.ones(vectorized_shape)
rnn_init = ones * self._init_vector
return {
"rms": tf.ones(vectorized_shape),
"learning_rate": init_lr,
"rnn": rnn_init,
"decay": tf.ones(vectorized_shape),
}
def _compute_update(self, param, grad, state):
"""Update parameters given the gradient and state.
Args:
param: tensor of parameters
grad: tensor of gradients with the same shape as param
state: a dictionary containing any state for the optimizer
Returns:
updated_param: updated parameters
updated_state: updated state variables in a dictionary
"""
with tf.variable_scope(opt.OPTIMIZER_SCOPE) as scope:
if self.reuse_vars:
scope.reuse_variables()
else:
self.reuse_vars = True
param_shape = tf.shape(param)
(grad_values, decay_state, rms_state, rnn_state, learning_rate_state,
grad_indices) = self._extract_gradients_and_internal_state(
grad, state, param_shape)
# Vectorize and scale the gradients.
grad_scaled, rms = utils.rms_scaling(grad_values, decay_state, rms_state)
# Apply the RNN update.
rnn_state_tuples = self._unpack_rnn_state_into_tuples(rnn_state)
rnn_output, rnn_state_tuples = self.cell(grad_scaled, rnn_state_tuples)
rnn_state = self._pack_tuples_into_rnn_state(rnn_state_tuples)
# Compute the update direction (a linear projection of the RNN output).
delta = utils.project(rnn_output, self.update_weights)
# The updated decay is an affine projection of the hidden state
decay = utils.project(rnn_output, self.decay_weights,
bias=self.decay_bias, activation=tf.nn.sigmoid)
# Compute the change in learning rate (an affine projection of the RNN
# state, passed through a 2x sigmoid, so the change is bounded).
learning_rate_change = 2. * utils.project(rnn_output, self.lr_weights,
bias=self.lr_bias,
activation=tf.nn.sigmoid)
# Update the learning rate.
new_learning_rate = learning_rate_change * learning_rate_state
# Apply the update to the parameters.
update = tf.reshape(new_learning_rate * delta, tf.shape(grad_values))
if isinstance(grad, tf.IndexedSlices):
update = utils.stack_tensor(update, grad_indices, param,
param_shape[:1])
rms = utils.update_slices(rms, grad_indices, state["rms"], param_shape)
new_learning_rate = utils.update_slices(new_learning_rate, grad_indices,
state["learning_rate"],
param_shape)
rnn_state = utils.update_slices(rnn_state, grad_indices, state["rnn"],
param_shape)
decay = utils.update_slices(decay, grad_indices, state["decay"],
param_shape)
new_param = param - update
# Collect the update and new state.
new_state = {
"rms": rms,
"learning_rate": new_learning_rate,
"rnn": rnn_state,
"decay": decay,
}
return new_param, new_state
def _extract_gradients_and_internal_state(self, grad, state, param_shape):
"""Extracts the gradients and relevant internal state.
If the gradient is sparse, extracts the appropriate slices from the state.
Args:
grad: The current gradient.
state: The current state.
param_shape: The shape of the parameter (used if gradient is sparse).
Returns:
grad_values: The gradient value tensor.
decay_state: The current decay state.
rms_state: The current rms state.
rnn_state: The current state of the internal rnns.
learning_rate_state: The current learning rate state.
grad_indices: The indices for the gradient tensor, if sparse.
None otherwise.
"""
if isinstance(grad, tf.IndexedSlices):
grad_indices, grad_values = utils.accumulate_sparse_gradients(grad)
decay_state = utils.slice_tensor(state["decay"], grad_indices,
param_shape)
rms_state = utils.slice_tensor(state["rms"], grad_indices, param_shape)
rnn_state = utils.slice_tensor(state["rnn"], grad_indices, param_shape)
learning_rate_state = utils.slice_tensor(state["learning_rate"],
grad_indices, param_shape)
decay_state.set_shape([None, 1])
rms_state.set_shape([None, 1])
else:
grad_values = grad
grad_indices = None
decay_state = state["decay"]
rms_state = state["rms"]
rnn_state = state["rnn"]
learning_rate_state = state["learning_rate"]
return (grad_values, decay_state, rms_state, rnn_state, learning_rate_state,
grad_indices)
def _unpack_rnn_state_into_tuples(self, rnn_state):
"""Creates state tuples from the rnn state vector."""
rnn_state_tuples = []
cur_state_pos = 0
for cell in self.component_cells:
total_state_size = sum(cell.state_size)
cur_state = tf.slice(rnn_state, [0, cur_state_pos],
[-1, total_state_size])
cur_state_tuple = tf.split(value=cur_state, num_or_size_splits=2,
axis=1)
rnn_state_tuples.append(cur_state_tuple)
cur_state_pos += total_state_size
return rnn_state_tuples
def _pack_tuples_into_rnn_state(self, rnn_state_tuples):
"""Creates a single state vector concatenated along column axis."""
rnn_state = None
for new_state_tuple in rnn_state_tuples:
new_c, new_h = new_state_tuple
if rnn_state is None:
rnn_state = tf.concat([new_c, new_h], axis=1)
else:
rnn_state = tf.concat([rnn_state, tf.concat([new_c, new_h], 1)], axis=1)
return rnn_state
```
#### File: learning_unsupervised_learning/architectures/more_local_weight_update.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import sonnet as snt
import tensorflow as tf
from research.learning_unsupervised_learning.architectures import common
from research.learning_unsupervised_learning import optimizers
from research.learning_unsupervised_learning import utils
from research.learning_unsupervised_learning import summary_utils
OptState = collections.namedtuple('OptState',
['variables', 'opt_state', 'index'])
BaseModelOutputs = collections.namedtuple(
'BaseModelOutputs', ['xs', 'zs', 'mods', 'batch', 'backward_mods'])
class GradChannelReadout(snt.AbstractModule):
"""Perform a linear readout and reshape from input 3 tensor."""
def __init__(self,
num_grad_channels,
device,
perm=(2, 0, 1),
name='GradChannelReadout'):
"""Args:
num_grad_channels: int
number of channels to readout to.
device: str or callable
devicwe to place weights.
perm: list or tuple
transpose applied.
"""
self.num_grad_channels = num_grad_channels
self.device = device
self.perm = perm
super(GradChannelReadout, self).__init__(name=name)
def _build(self, h):
with tf.device(self.device):
mod = snt.Linear(self.num_grad_channels)
ret = snt.BatchApply(mod)(h)
# return as [num_grad_channels] x [bs] x [num units]
return tf.transpose(ret, perm=self.perm)
def get_weight_stats(x, axis):
""" Compute weight statistics over the given axis.
Args:
x: tf.Tensor
a batch of activations.
axis: int
axis to perform statistics over.
Returns:
tf.Tensor
a 3-D tensor with statistics.
"""
if x is None:
return []
stats = []
l1 = tf.reduce_mean(tf.abs(x), axis=axis)
l2 = tf.sqrt(tf.reduce_mean(x ** 2, axis=axis) + 1e-6)
mean, var = tf.nn.moments(x, [axis])
stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)])
stats = [tf.reshape(s, [-1, 1, 1]) for s in stats]
return stats
class AddUnitBatchStatistics(snt.AbstractModule):
"""Compute some number of statistics over units and concat them on."""
def __init__(self, name='AddUnitBatchStatistics'):
super(AddUnitBatchStatistics, self).__init__(name=name)
def _build(self, x):
# [channel, bs, 1]
output = x
for d in [0, 1]:
stats = []
l1 = tf.reduce_mean(tf.abs(x), axis=d, keepdims=True)
l2 = tf.sqrt(tf.reduce_mean(x ** 2, axis=d, keepdims=True) + 1e-6)
mean, var = tf.nn.moments(x, [d], keepdims=True)
stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)])
to_add = tf.concat(stats, axis=2) # [channels/1, units/1, stats]
output += snt.BatchApply(snt.Linear(x.shape.as_list()[2]))(to_add)
return output
class ConcatUnitConv(snt.AbstractModule):
"""Do a small number of convolutions over units and concat / add them on."""
def __init__(self, add=True):
self.add = add
super(ConcatUnitConv, self).__init__(name='ConcatUnitConv')
def _build(self, x):
# x is [units, bs, 1]
net = tf.transpose(x, [1, 0, 2]) # now [bs x units x 1]
channels = x.shape.as_list()[2]
mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
net = mod(net)
net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
net = tf.nn.relu(net)
mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
net = mod(net)
net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
net = tf.nn.relu(net)
to_concat = tf.transpose(net, [1, 0, 2])
if self.add:
return x + to_concat
else:
return tf.concat([x, to_concat], 2)
class MoreLocalWeightUpdateProcess(snt.AbstractModule):
def __init__(
self,
remote_device,
local_device,
top_delta_size=64,
top_delta_layers=2,
compute_h_size=64,
compute_h_layers=1,
delta_dim=32,
num_grad_channels=4,
normalize_epsilon=1.,
):
self.local_device = local_device
self.remote_device = remote_device
self.top_delta_size = top_delta_size
self.top_delta_layers = top_delta_layers
self.compute_h_size = compute_h_size
self.compute_h_layers = compute_h_layers
self.delta_dim = delta_dim
self.num_grad_channels = num_grad_channels
self.normalize_epsilon = normalize_epsilon,
with tf.device(local_device):
self.opt = optimizers.UnrollableGradientDescentRollingOptimizer(
learning_rate=1e-4)
# lazily initialized for readouts
self.readout_mods = {}
super(MoreLocalWeightUpdateProcess,
self).__init__(name='MoreLocalWeightUpdateProcess')
with tf.device(remote_device):
self()
def normalize(self, change_w, normalize_epsilon=None):
if normalize_epsilon is None:
normalize_epsilon = self.normalize_epsilon
# normalize the weights per receptive-field, rather than per-matrix
var = tf.reduce_mean(tf.square(change_w), axis=0, keepdims=True)
change_w = (change_w) / tf.sqrt(normalize_epsilon + var)
return change_w
def _build(self):
pass
@snt.reuse_variables
def compute_top_delta(self, z):
""" parameterization of topD. This converts the top level activation
to an error signal.
Args:
z: tf.Tensor
batch of final layer post activations
Returns
delta: tf.Tensor
the error signal
"""
s_idx = 0
with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
# typically this takes [BS, length, input_channels],
# We are applying this such that we convolve over the batch dimension.
act = tf.expand_dims(tf.transpose(z, [1, 0]), 2) # [channels, BS, 1]
mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
act = mod(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
bs = act.shape.as_list()[0]
act = tf.transpose(act, [2, 1, 0])
act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
act = tf.transpose(act, [2, 1, 0])
prev_act = act
for i in range(self.top_delta_layers):
mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
act = mod(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
prev_act = act
mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
act = mod(act)
# [bs, feature_channels, delta_channels]
act = tf.transpose(act, [1, 0, 2])
return act
@snt.reuse_variables
def compute_h(self,
x,
z,
d,
bias,
W_bot,
W_top,
compute_perc=1.0,
compute_units=None):
"""z = [BS, n_units] a = [BS, n_units] b = [BS, n_units] d = [BS, n_units, delta_channels]
"""
s_idx = 0
if compute_perc != 1.0:
assert compute_units is None
with tf.device(self.remote_device):
inp_feat = [x, z]
inp_feat = [tf.transpose(f, [1, 0]) for f in inp_feat]
units = x.shape.as_list()[1]
bs = x.shape.as_list()[0]
# add unit ID, to help the network differentiate units
id_theta = tf.linspace(0., (4) * np.pi, units)
assert bs is not None
id_theta_bs = tf.reshape(id_theta, [-1, 1]) * tf.ones([1, bs])
inp_feat += [tf.sin(id_theta_bs), tf.cos(id_theta_bs)]
# list of [units, BS, 1]
inp_feat = [tf.expand_dims(f, 2) for f in inp_feat]
d_trans = tf.transpose(d, [1, 0, 2])
if compute_perc != 1.0:
compute_units = int(compute_perc * inp_feat.shape.as_list()[0])
# add weight matrix statistics, both from above and below
w_stats_bot = get_weight_stats(W_bot, 0)
w_stats_top = get_weight_stats(W_top, 1)
w_stats = w_stats_bot + w_stats_top
if W_bot is None or W_top is None:
# if it's an edge layer (top or bottom), just duplicate the stats for
# the weight matrix that does exist
w_stats = w_stats + w_stats
w_stats = [tf.ones([1, x.shape[0], 1]) * ww for ww in w_stats]
# w_stats is a list, with entries with shape UNITS x 1 x channels
if compute_units is None:
inp_feat_in = inp_feat
d_trans_in = d_trans
w_stats_in = w_stats
bias_in = tf.transpose(bias)
else:
# only run on a subset of the activations.
mask = tf.random_uniform(
minval=0,
maxval=1,
dtype=tf.float32,
shape=inp_feat[0].shape.as_list()[0:1])
_, ind = tf.nn.top_k(mask, k=compute_units)
ind = tf.reshape(ind, [-1, 1])
inp_feat_in = [tf.gather_nd(xx, ind) for xx in inp_feat]
w_stats_in = [tf.gather_nd(xx, ind) for xx in w_stats]
d_trans_in = tf.gather_nd(d_trans, ind)
bias_in = tf.gather_nd(tf.transpose(bias), ind)
w_stats_in = tf.concat(w_stats_in, 2)
w_stats_in_norm = w_stats_in * tf.rsqrt(
tf.reduce_mean(w_stats_in ** 2) + 1e-6)
act = tf.concat(inp_feat_in + [d_trans_in], 2)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=True)
bias_dense = tf.reshape(bias_in, [-1, 1, 1]) * tf.ones([1, bs, 1])
act = tf.concat([w_stats_in_norm, bias_dense, act], 2)
mod = snt.Conv1D(output_channels=self.compute_h_size, kernel_shape=[3])
act = mod(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=True)
act = tf.nn.relu(act)
act2 = ConcatUnitConv()(act)
act = act2
prev_act = act
for i in range(self.compute_h_layers):
mod = snt.Conv1D(output_channels=self.compute_h_size, kernel_shape=[3])
act = mod(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=True)
act = tf.nn.relu(act)
act = ConcatUnitConv()(act)
prev_act = act
h = act
if compute_units is not None:
shape = inp_feat[0].shape.as_list()[:1] + h.shape.as_list()[1:]
h = tf.scatter_nd(ind, h, shape=shape)
h = tf.transpose(h, [1, 0, 2]) # [bs, units, channels]
return h
## wrappers to allow forward and backward to have different variables
@snt.reuse_variables
def merge_change_w_forward(self, change_w_terms, global_prefix='', prefix=''):
return self.merge_change_w(
change_w_terms, global_prefix=global_prefix, prefix=prefix)
@snt.reuse_variables
def merge_change_w_backward(self, change_w_terms, global_prefix='',
prefix=''):
return self.merge_change_w(
change_w_terms, global_prefix=global_prefix, prefix=prefix)
def merge_change_w(self, change_w_terms, global_prefix='', prefix=''):
with tf.device(
self.remote_device), tf.name_scope(global_prefix + '_merge_change_w'):
w_base = change_w_terms['w_base']
for kk in sorted(change_w_terms.keys()):
name = global_prefix + 'change_w_plane_%s' % kk
delta_w = change_w_terms[kk]
mean, var = tf.nn.moments(delta_w, [0, 1])
root_mean_square = tf.sqrt(tf.reduce_mean(delta_w ** 2) + 1e-6)
for kk in sorted(change_w_terms.keys()):
change_w_terms[kk] = self.normalize(change_w_terms[kk])
initializers = {
'w': tf.constant_initializer(0.1),
'b': tf.zeros_initializer()
}
mod = snt.Linear(
1,
name=global_prefix + '_weight_readout_coeffs',
initializers=initializers)
change_w_terms_list = [
change_w_terms[kk] for kk in sorted(change_w_terms.keys())
]
stack_terms = tf.stack(change_w_terms_list, axis=-1)
change_w = tf.squeeze(
snt.BatchApply(mod)(stack_terms), axis=-1) / len(change_w_terms)
# only allow perpendicular updates, or updates which grow length. don't
# allow length to decay towards zero.
ip = tf.reduce_mean(change_w * w_base)
# zero out any updates that shrink length
ip = tf.nn.relu(ip)
change_w -= w_base * ip
change_w /= tf.sqrt(len(change_w_terms) * 1.)
change_w = self.normalize(change_w)
# encourage the receptive field to not collapse to 0
change_w -= w_base / 7. # This is an arbitrary scale choice
return tf.identity(change_w)
@snt.reuse_variables
def bias_readout(self, h):
with tf.device(self.remote_device):
mod = snt.Linear(1, name='bias_readout')
ret = snt.BatchApply(mod)(h)
return tf.squeeze(ret, 2)
@snt.reuse_variables
def next_delta(self, z, h, d):
with tf.device(self.remote_device):
return d * tf.expand_dims(tf.nn.sigmoid(z), 2) + self.to_delta_size(h)
@utils.create_variables_in_class_scope
def get_readout_mod(self, name):
if name not in self.readout_mods:
self.readout_mods[name] = GradChannelReadout(
self.num_grad_channels, device=self.remote_device, name=name)
return self.readout_mods[name]
@utils.create_variables_in_class_scope
def low_rank_readout(self, name, h1, h2, psd=False):
BS = h1.shape.as_list()[0]
r_t = self.get_readout_mod(name + '_top')(h1)
if psd:
r_b = r_t
else:
r_b = self.get_readout_mod(name + '_bottom')(h2)
return tf.reduce_mean(tf.matmul(r_b, r_t, transpose_a=True), axis=0) / BS
@snt.reuse_variables
def to_delta_size(self, h):
with tf.device(self.remote_device):
mod = snt.Linear(self.delta_dim)
return snt.BatchApply(mod)(h)
@snt.reuse_variables
def initial_state(self, variables):
"""The inner optimization state.
Args:
variables: list of tf.Variable
list of variables to get the initial state of.
Returns:
opt_state: OptState
"""
with tf.device(self.local_device):
initial_opt_state = self.opt.get_state(variables)
return OptState(
variables=variables, opt_state=initial_opt_state, index=tf.constant(0))
@snt.reuse_variables
def compute_next_state(self, grads, learning_rate, cur_state,
cur_transformer):
summaries = []
with tf.device(self.local_device):
with tf.control_dependencies(summaries):
new_vars, new_state = self.opt.compute_updates(
cur_state.variables, grads, learning_rate, cur_state.opt_state)
pass
return OptState(
variables=tuple(new_vars),
opt_state=new_state,
index=cur_state.index + 1)
def assign_state(self, base_model, next_state):
var_ups = [
v.assign(nv) for v, nv in utils.eqzip(base_model.get_variables(),
next_state.variables)
]
opt_ups = self.opt.assign_state(next_state.opt_state)
return tf.group(opt_ups, *var_ups)
def local_variables(self):
return list(self.opt.get_variables())
def remote_variables(self):
train = list(
snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES))
train += list(
snt.get_variables_in_module(self,
tf.GraphKeys.MOVING_AVERAGE_VARIABLES))
return train
class MoreLocalWeightUpdateWLearner(snt.AbstractModule):
"""The BaseModel that the UnsupervisedUpdateRule acts on.
"""
def __init__(self,
remote_device,
local_device,
inner_size=128,
output_size=32,
n_layers=4,
shuffle_input=True,
activation_fn=tf.nn.relu,
identical_updates=True,
**kwargs):
self.local_device = local_device
self.remote_device = remote_device
self.inner_size = inner_size
self.n_layers = n_layers
self.shuffle_input = shuffle_input
self.activation_fn = activation_fn
self.identical_updates = identical_updates
self.output_size = output_size
if output_size == None:
self.output_size = inner_size
self.shuffle_ind = None
super(MoreLocalWeightUpdateWLearner, self).__init__(
name='LocalWeightUpdateWLearner', **kwargs)
@snt.reuse_variables
def get_shuffle_ind(self, size):
if self.shuffle_ind is None:
# put the shuffle in tf memory to make the eval jobs
# re-entrant.
shuffle_ind_val = np.random.permutation(size)
shuffle_ind = tf.get_variable(
name='shuffle_ind', dtype=tf.int64, initializer=shuffle_ind_val)
unshuffle_ind = tf.scatter_nd(
tf.reshape(shuffle_ind, [-1, 1]), tf.range(size), [size])
return shuffle_ind, unshuffle_ind
def _build(self, batch):
image = batch.image
x0 = snt.BatchFlatten()(image)
if self.shuffle_input:
size = x0.shape.as_list()[1]
shuffle_ind, unshuffle_ind = self.get_shuffle_ind(size)
x0 = tf.gather(x0, shuffle_ind, axis=1)
xs = [x0]
mods = []
zs = []
init = {}
for i in range(self.n_layers):
mod = common.LinearBatchNorm(
self.inner_size, activation_fn=self.activation_fn)
z, x = mod(xs[i])
xs.append(x)
zs.append(z)
mods.append(mod)
mod = common.LinearBatchNorm(
self.output_size, activation_fn=self.activation_fn)
z, x = mod(xs[-1])
mods.append(mod)
xs.append(x)
zs.append(z)
embedding_x = xs[-1]
# make a random set of backward mods
backward_mods = []
for i, (x, x_p1) in enumerate(zip(xs[0:-1], xs[1:])):
m = common.LinearBatchNorm(
x_p1.shape.as_list()[1], activation_fn=tf.identity)
_ = m(x)
backward_mods.append(m)
shape = image.shape.as_list()[1:4]
for mods_p, prefix in [(mods, 'forward'), (backward_mods, 'backward')]:
if self.shuffle_input:
unshuf_w = tf.gather(mods_p[0].w, unshuffle_ind, axis=0)
else:
unshuf_w = mods_p[0].w
img = summary_utils.first_layer_weight_image(unshuf_w, shape)
tf.summary.image(prefix + '_w0_receptive_field', img)
for i, m in enumerate(mods_p[0:]):
img = summary_utils.inner_layer_weight_image(m.w)
tf.summary.image(prefix + '_w%d' % (i + 1), img)
img = summary_utils.sorted_images(image, batch.label_onehot)
tf.summary.image('inputs', img)
# log out pre-activations and activations
for all_vis, base_name in [(xs, 'x'), (zs, 'z')]:
for i, x_vis in enumerate(all_vis):
img = summary_utils.activation_image(x_vis, batch.label_onehot)
tf.summary.image('%s%d' % (base_name, i), img)
embedding_x = tf.identity(embedding_x)
outputs = BaseModelOutputs(
xs=xs, zs=zs, mods=mods, batch=batch, backward_mods=backward_mods)
return embedding_x, outputs
def compute_next_h_d(self, meta_opt, w_bot, w_top, bias, x, z, d, backward_w):
""" Propogate error back down the network while computing hidden state.
"""
if z is None:
z = x
h = meta_opt.compute_h(x, z, d, bias, w_bot,
w_top) # [bs x 60 x h_channels]
# compute the next d
delta = meta_opt.next_delta(z, h, d)
if backward_w is not None:
def delta_matmul(w, delta):
d = tf.transpose(delta, [0, 2, 1]) # [bs x delta_channels x n_units)
d = snt.BatchApply(lambda x: tf.matmul(x, w, transpose_b=True))(d)
d = tf.transpose(d, [0, 2, 1])
return d
# replace the "backward pass" with a random matrix.
d = delta_matmul(backward_w, delta) # [bs x 60 x delta_channels]
var = tf.reduce_mean(tf.square(d), [2], keepdims=True)
d = d * tf.rsqrt(1e-6 + var)
return h, d
def weight_change_for_layer(self, meta_opt, l_idx, w_base, b_base, upper_h,
lower_h, upper_x, lower_x, prefix, include_bias):
"""Compute the change in weights for each layer.
This computes something roughly analagous to a gradient.
"""
reduce_upper_h = upper_h
reduce_lower_h = lower_h
BS = lower_x.shape.as_list()[0]
change_w_terms = dict()
# initial weight value normalized
# normalize the weights per receptive-field, rather than per-matrix
weight_scale = tf.rsqrt(
tf.reduce_mean(w_base ** 2, axis=0, keepdims=True) + 1e-6)
w_base *= weight_scale
change_w_terms['w_base'] = w_base
# this will act to decay larger weights towards zero
change_w_terms['large_decay'] = w_base ** 2 * tf.sign(w_base)
# term based on activations
ux0 = upper_x - tf.reduce_mean(upper_x, axis=0, keepdims=True)
uxs0 = ux0 * tf.rsqrt(tf.reduce_mean(ux0 ** 2, axis=0, keepdims=True) + 1e-6)
change_U = tf.matmul(uxs0, uxs0, transpose_a=True) / BS
change_U /= tf.sqrt(float(change_U.shape.as_list()[0]))
cw = tf.matmul(w_base, change_U)
cw_scale = tf.rsqrt(tf.reduce_mean(cw ** 2 + 1e-8))
cw *= cw_scale
change_w_terms['decorr_x'] = cw
# hebbian term
lx0 = lower_x - tf.reduce_mean(lower_x, axis=0, keepdims=True)
lxs0 = lx0 * tf.rsqrt(tf.reduce_mean(lx0 ** 2, axis=0, keepdims=True) + 1e-6)
cw = tf.matmul(lxs0, uxs0, transpose_a=True) / BS
change_w_terms['hebb'] = -cw
# 0th order term
w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_0', upper_h,
lower_h)
change_w_terms['0_order'] = w_term
# # rbf term (weight update scaled by distance from 0)
w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_rbf',
reduce_upper_h, reduce_lower_h)
change_w_terms['rbf'] = tf.exp(-w_base ** 2) * w_term
# 1st order term (weight dependent update to weights)
w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_1',
reduce_upper_h, reduce_lower_h)
change_w_terms['1_order'] = w_base * w_term
# more terms based on single layer readouts.
for update_type in ['lin', 'sqr']:
for h_source, h_source_name in [(reduce_upper_h, 'upper'),
(reduce_lower_h, 'lower')]:
structures = ['symm']
if update_type == 'lin' and h_source_name == 'upper':
structures += ['psd']
for structure in structures:
name = update_type + '_' + h_source_name + '_' + structure
if structure == 'symm':
change_U = meta_opt.low_rank_readout(prefix + name, h_source,
h_source)
change_U = (change_U + tf.transpose(change_U)) / tf.sqrt(2.)
change_U = tf.matrix_set_diag(change_U,
tf.zeros(
[change_U.shape.as_list()[0]]))
elif structure == 'psd':
change_U = meta_opt.low_rank_readout(
prefix + name, h_source, None, psd=True)
else:
assert False
change_U /= tf.sqrt(float(change_U.shape.as_list()[0]))
if update_type == 'lin':
sign_multiplier = tf.ones_like(w_base)
w_base_l = w_base
elif update_type == 'sqr':
sign_multiplier = tf.sign(w_base)
w_base_l = tf.sqrt(1. + w_base ** 2) - 1.
if h_source_name == 'upper':
cw = tf.matmul(w_base_l, change_U) # [N^l-1 x N^l]
elif h_source_name == 'lower':
cw = tf.matmul(change_U, w_base_l)
change_w_terms[name] = cw * sign_multiplier
if prefix == 'forward':
change_w = meta_opt.merge_change_w_forward(
change_w_terms, global_prefix=prefix, prefix='l%d' % l_idx)
elif prefix == 'backward':
change_w = meta_opt.merge_change_w_backward(
change_w_terms, global_prefix=prefix, prefix='l%d' % l_idx)
else:
assert (False)
if not include_bias:
return change_w
change_b = tf.reduce_mean(meta_opt.bias_readout(upper_h), [0])
# force nonlinearities to be exercised -- biases can't all be increased without bound
change_b_mean = tf.reduce_mean(change_b)
offset = -tf.nn.relu(-change_b_mean)
change_b -= offset
var = tf.reduce_mean(tf.square(change_b), [0], keepdims=True)
change_b = (change_b) / tf.sqrt(0.5 + var)
return change_w, change_b
def compute_next_state(self, outputs, meta_opt, previous_state):
zs = outputs.zs
xs = outputs.xs
batch = outputs.batch
mods = outputs.mods
backward_mods = outputs.backward_mods
variables = self.get_variables()
rev_mods = mods[::-1]
rev_backward_mods = backward_mods[::-1]
rev_xs = xs[::-1]
rev_zs = zs[::-1] + [None]
to_top = xs[-1]
# variables that change in the loop
hs = []
d = meta_opt.compute_top_delta(to_top) # [bs x 32 x delta_channels]
iterator = utils.eqzip(rev_backward_mods + [None], rev_mods + [None],
[None] + rev_mods, rev_xs, rev_zs)
for (backward_mod, lower_mod, upper_mod, x, z) in iterator:
w_bot = None
if not lower_mod is None:
w_bot = previous_state.variables[variables.index(lower_mod.w)]
w_top = None
if not upper_mod is None:
w_top = previous_state.variables[variables.index(upper_mod.w)]
backward_w = None
if backward_mod is not None:
backward_w = previous_state.variables[variables.index(backward_mod.w)]
if lower_mod is not None:
bias = previous_state.variables[variables.index(lower_mod.b)]
else:
bias = tf.zeros([x.shape[1]])
h, d = self.compute_next_h_d(
meta_opt=meta_opt,
w_bot=w_bot,
w_top=w_top,
bias=bias,
backward_w=backward_w,
x=x,
z=z,
d=d)
hs.append(h)
w_forward_var_idx = [variables.index(mod.w) for mod in rev_mods]
w_backward_var_idx = [variables.index(mod.w) for mod in rev_backward_mods]
b_var_idx = [variables.index(mod.b) for mod in rev_mods]
# storage location for outputs of below loop
grads = [None for _ in previous_state.variables]
# over-ride learning rate for perturbation variables
learning_rate = [None for _ in previous_state.variables]
# This is a map -- no state is shared cross loop
for l_idx, w_forward_idx, w_backward_idx, b_idx, upper_h, lower_h, lower_x, upper_x in utils.eqzip(
range(len(w_forward_var_idx)), w_forward_var_idx, w_backward_var_idx,
b_var_idx, hs[:-1], hs[1:], xs[::-1][1:], xs[::-1][:-1]):
b_base = previous_state.variables[b_idx]
change_w_forward, change_b = self.weight_change_for_layer(
meta_opt=meta_opt,
l_idx=l_idx,
w_base=previous_state.variables[w_forward_idx],
b_base=b_base,
upper_h=upper_h,
lower_h=lower_h,
upper_x=upper_x,
lower_x=lower_x,
prefix='forward',
include_bias=True)
if self.identical_updates:
change_w_backward = change_w_forward
else:
change_w_backward = self.weight_change_for_layer(
meta_opt=meta_opt,
l_idx=l_idx,
w_base=previous_state.variables[w_backward_idx],
b_base=b_base,
upper_h=upper_h,
lower_h=lower_h,
upper_x=upper_x,
lower_x=lower_x,
prefix='backward',
include_bias=False)
grads[w_forward_idx] = change_w_forward
grads[w_backward_idx] = change_w_backward
grads[b_idx] = change_b
cur_transformer = common.transformer_at_state(self,
previous_state.variables)
next_state = meta_opt.compute_next_state(
grads,
learning_rate=learning_rate,
cur_state=previous_state,
cur_transformer=lambda x: cur_transformer(x)[0])
return next_state
def initial_state(self, meta_opt):
return meta_opt.initial_state(self.get_variables())
```
#### File: maskgan/losses/losses.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def discriminator_loss(predictions, labels, missing_tokens):
"""Discriminator loss based on predictions and labels.
Args:
predictions: Discriminator linear predictions Tensor of shape [batch_size,
sequence_length]
labels: Labels for predictions, Tensor of shape [batch_size,
sequence_length]
missing_tokens: Indicator for the missing tokens. Evaluate the loss only
on the tokens that were missing.
Returns:
loss: Scalar tf.float32 loss.
"""
loss = tf.losses.sigmoid_cross_entropy(labels,
predictions,
weights=missing_tokens)
loss = tf.Print(
loss, [loss, labels, missing_tokens],
message='loss, labels, missing_tokens',
summarize=25,
first_n=25)
return loss
def cross_entropy_loss_matrix(gen_labels, gen_logits):
"""Computes the cross entropy loss for G.
Args:
gen_labels: Labels for the correct token.
gen_logits: Generator logits.
Returns:
loss_matrix: Loss matrix of shape [batch_size, sequence_length].
"""
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
return cross_entropy_loss
def GAN_loss_matrix(dis_predictions):
"""Computes the cross entropy loss for G.
Args:
dis_predictions: Discriminator predictions.
Returns:
loss_matrix: Loss matrix of shape [batch_size, sequence_length].
"""
eps = tf.constant(1e-7, tf.float32)
gan_loss_matrix = -tf.log(dis_predictions + eps)
return gan_loss_matrix
def generator_GAN_loss(predictions):
"""Generator GAN loss based on Discriminator predictions."""
return -tf.log(tf.reduce_mean(predictions))
def generator_blended_forward_loss(gen_logits, gen_labels, dis_predictions,
is_real_input):
"""Computes the masked-loss for G. This will be a blend of cross-entropy
loss where the true label is known and GAN loss where the true label has been
masked.
Args:
gen_logits: Generator logits.
gen_labels: Labels for the correct token.
dis_predictions: Discriminator predictions.
is_real_input: Tensor indicating whether the label is present.
Returns:
loss: Scalar tf.float32 total loss.
"""
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
gan_loss = -tf.log(dis_predictions)
loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)
return tf.reduce_mean(loss_matrix)
def wasserstein_generator_loss(gen_logits, gen_labels, dis_values,
is_real_input):
"""Computes the masked-loss for G. This will be a blend of cross-entropy
loss where the true label is known and GAN loss where the true label is
missing.
Args:
gen_logits: Generator logits.
gen_labels: Labels for the correct token.
dis_values: Discriminator values Tensor of shape [batch_size,
sequence_length].
is_real_input: Tensor indicating whether the label is present.
Returns:
loss: Scalar tf.float32 total loss.
"""
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
# Maximize the dis_values (minimize the negative)
gan_loss = -dis_values
loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)
loss = tf.reduce_mean(loss_matrix)
return loss
def wasserstein_discriminator_loss(real_values, fake_values):
"""Wasserstein discriminator loss.
Args:
real_values: Value given by the Wasserstein Discriminator to real data.
fake_values: Value given by the Wasserstein Discriminator to fake data.
Returns:
loss: Scalar tf.float32 loss.
"""
real_avg = tf.reduce_mean(real_values)
fake_avg = tf.reduce_mean(fake_values)
wasserstein_loss = real_avg - fake_avg
return wasserstein_loss
def wasserstein_discriminator_loss_intrabatch(values, is_real_input):
"""Wasserstein discriminator loss. This is an odd variant where the value
difference is between the real tokens and the fake tokens within a single
batch.
Args:
values: Value given by the Wasserstein Discriminator of shape [batch_size,
sequence_length] to an imputed batch (real and fake).
is_real_input: tf.bool Tensor of shape [batch_size, sequence_length]. If
true, it indicates that the label is known.
Returns:
wasserstein_loss: Scalar tf.float32 loss.
"""
zero_tensor = tf.constant(0., dtype=tf.float32, shape=[])
present = tf.cast(is_real_input, tf.float32)
missing = tf.cast(1 - present, tf.float32)
# Counts for real and fake tokens.
real_count = tf.reduce_sum(present)
fake_count = tf.reduce_sum(missing)
# Averages for real and fake token values.
real = tf.mul(values, present)
fake = tf.mul(values, missing)
real_avg = tf.reduce_sum(real) / real_count
fake_avg = tf.reduce_sum(fake) / fake_count
# If there are no real or fake entries in the batch, we assign an average
# value of zero.
real_avg = tf.where(tf.equal(real_count, 0), zero_tensor, real_avg)
fake_avg = tf.where(tf.equal(fake_count, 0), zero_tensor, fake_avg)
wasserstein_loss = real_avg - fake_avg
return wasserstein_loss
```
#### File: maskgan/models/attention_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import function
__all__ = [
"prepare_attention", "attention_decoder_fn_train",
"attention_decoder_fn_inference"
]
def attention_decoder_fn_train(encoder_state,
attention_keys,
attention_values,
attention_score_fn,
attention_construct_fn,
name=None):
"""Attentional decoder function for `dynamic_rnn_decoder` during training.
The `attention_decoder_fn_train` is a training function for an
attention-based sequence-to-sequence model. It should be used when
`dynamic_rnn_decoder` is in the training mode.
The `attention_decoder_fn_train` is called with a set of the user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_train = attention_decoder_fn_train(encoder_state)
outputs_train, state_train = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_train, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
attention_keys: to be compared with target states.
attention_values: to be used to construct context vectors.
attention_score_fn: to compute similarity between key and target states.
attention_construct_fn: to build attention states.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_train"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for training.
"""
with tf.name_scope(name, "attention_decoder_fn_train", [
encoder_state, attention_keys, attention_values, attention_score_fn,
attention_construct_fn
]):
pass
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
"""Decoder function used in the `dynamic_rnn_decoder` for training.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: `None`, which is used by the `dynamic_rnn_decoder` to indicate
that `sequence_lengths` in `dynamic_rnn_decoder` should be used.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: `cell_input`, this decoder function does not modify the
given input. The input could be modified when applying e.g. attention.
emit output: `cell_output`, this decoder function does not modify the
given output.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with tf.name_scope(
name, "attention_decoder_fn_train",
[time, cell_state, cell_input, cell_output, context_state]):
if cell_state is None: # first call, return encoder_state
cell_state = encoder_state
# init attention
attention = _init_attention(encoder_state)
else:
# construct attention
attention = attention_construct_fn(cell_output, attention_keys,
attention_values)
cell_output = attention
# combine cell_input and attention
next_input = tf.concat([cell_input, attention], 1)
return (None, cell_state, next_input, cell_output, context_state)
return decoder_fn
def attention_decoder_fn_inference(output_fn,
encoder_state,
attention_keys,
attention_values,
attention_score_fn,
attention_construct_fn,
embeddings,
start_of_sequence_id,
end_of_sequence_id,
maximum_length,
num_decoder_symbols,
dtype=tf.int32,
name=None):
"""Attentional decoder function for `dynamic_rnn_decoder` during inference.
The `attention_decoder_fn_inference` is a simple inference function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the inference mode.
The `attention_decoder_fn_inference` is called with user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_inference = attention_decoder_fn_inference(...)
outputs_inference, state_inference = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_inference, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
output_fn: An output function to project your `cell_output` onto class
logits.
An example of an output function;
```
tf.variable_scope("decoder") as varscope
output_fn = lambda x: tf.contrib.layers.linear(x, num_decoder_symbols,
scope=varscope)
outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...)
logits_train = output_fn(outputs_train)
varscope.reuse_variables()
logits_inference, state_inference = seq2seq.dynamic_rnn_decoder(
output_fn=output_fn, ...)
```
If `None` is supplied it will act as an identity function, which
might be wanted when using the RNNCell `OutputProjectionWrapper`.
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
attention_keys: to be compared with target states.
attention_values: to be used to construct context vectors.
attention_score_fn: to compute similarity between key and target states.
attention_construct_fn: to build attention states.
embeddings: The embeddings matrix used for the decoder sized
`[num_decoder_symbols, embedding_size]`.
start_of_sequence_id: The start of sequence ID in the decoder embeddings.
end_of_sequence_id: The end of sequence ID in the decoder embeddings.
maximum_length: The maximum allowed of time steps to decode.
num_decoder_symbols: The number of classes to decode at each time step.
dtype: (default: `tf.int32`) The default data type to use when
handling integer objects.
name: (default: `None`) NameScope for the decoder function;
defaults to "attention_decoder_fn_inference"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for inference.
"""
with tf.name_scope(name, "attention_decoder_fn_inference", [
output_fn, encoder_state, attention_keys, attention_values,
attention_score_fn, attention_construct_fn, embeddings,
start_of_sequence_id, end_of_sequence_id, maximum_length,
num_decoder_symbols, dtype
]):
start_of_sequence_id = tf.convert_to_tensor(start_of_sequence_id, dtype)
end_of_sequence_id = tf.convert_to_tensor(end_of_sequence_id, dtype)
maximum_length = tf.convert_to_tensor(maximum_length, dtype)
num_decoder_symbols = tf.convert_to_tensor(num_decoder_symbols, dtype)
encoder_info = tf.contrib.framework.nest.flatten(encoder_state)[0]
batch_size = encoder_info.get_shape()[0].value
if output_fn is None:
output_fn = lambda x: x
if batch_size is None:
batch_size = tf.shape(encoder_info)[0]
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
"""Decoder function used in the `dynamic_rnn_decoder` for inference.
The main difference between this decoder function and the `decoder_fn` in
`attention_decoder_fn_train` is how `next_cell_input` is calculated. In
decoder function we calculate the next input by applying an argmax across
the feature dimension of the output from the decoder. This is a
greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014)
use beam-search instead.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: A boolean vector to indicate which sentences has reached a
`end_of_sequence_id`. This is used for early stopping by the
`dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with
all elements as `true` is returned.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: The embedding from argmax of the `cell_output` is used as
`next_input`.
emit output: If `output_fn is None` the supplied `cell_output` is
returned, else the `output_fn` is used to update the `cell_output`
before calculating `next_input` and returning `cell_output`.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
Raises:
ValueError: if cell_input is not None.
"""
with tf.name_scope(
name, "attention_decoder_fn_inference",
[time, cell_state, cell_input, cell_output, context_state]):
if cell_input is not None:
raise ValueError(
"Expected cell_input to be None, but saw: %s" % cell_input)
if cell_output is None:
# invariant that this is time == 0
next_input_id = tf.ones(
[
batch_size,
], dtype=dtype) * (
start_of_sequence_id)
done = tf.zeros(
[
batch_size,
], dtype=tf.bool)
cell_state = encoder_state
cell_output = tf.zeros([num_decoder_symbols], dtype=tf.float32)
cell_input = tf.gather(embeddings, next_input_id)
# init attention
attention = _init_attention(encoder_state)
else:
# construct attention
attention = attention_construct_fn(cell_output, attention_keys,
attention_values)
cell_output = attention
# argmax decoder
cell_output = output_fn(cell_output) # logits
next_input_id = tf.cast(tf.argmax(cell_output, 1), dtype=dtype)
done = tf.equal(next_input_id, end_of_sequence_id)
cell_input = tf.gather(embeddings, next_input_id)
# combine cell_input and attention
next_input = tf.concat([cell_input, attention], 1)
# if time > maxlen, return all true vector
done = tf.cond(
tf.greater(time, maximum_length),
lambda: tf.ones([
batch_size, ], dtype=tf.bool), lambda: done)
return (done, cell_state, next_input, cell_output, context_state)
return decoder_fn
## Helper functions ##
def prepare_attention(attention_states, attention_option, num_units,
reuse=None):
"""Prepare keys/values/functions for attention.
Args:
attention_states: hidden states to attend over.
attention_option: how to compute attention, either "luong" or "bahdanau".
num_units: hidden state dimension.
reuse: whether to reuse variable scope.
Returns:
attention_keys: to be compared with target states.
attention_values: to be used to construct context vectors.
attention_score_fn: to compute similarity between key and target states.
attention_construct_fn: to build attention states.
"""
# Prepare attention keys / values from attention_states
with tf.variable_scope("attention_keys", reuse=reuse) as scope:
attention_keys = tf.contrib.layers.linear(
attention_states, num_units, biases_initializer=None, scope=scope)
attention_values = attention_states
# Attention score function
attention_score_fn = _create_attention_score_fn("attention_score", num_units,
attention_option, reuse)
# Attention construction function
attention_construct_fn = _create_attention_construct_fn(
"attention_construct", num_units, attention_score_fn, reuse)
return (attention_keys, attention_values, attention_score_fn,
attention_construct_fn)
def _init_attention(encoder_state):
"""Initialize attention. Handling both LSTM and GRU.
Args:
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
Returns:
attn: initial zero attention vector.
"""
# Multi- vs single-layer
# TODO(thangluong): is this the best way to check?
if isinstance(encoder_state, tuple):
top_state = encoder_state[-1]
else:
top_state = encoder_state
# LSTM vs GRU
if isinstance(top_state, tf.contrib.rnn.LSTMStateTuple):
attn = tf.zeros_like(top_state.h)
else:
attn = tf.zeros_like(top_state)
return attn
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
"""Function to compute attention vectors.
Args:
name: to label variables.
num_units: hidden state dimension.
attention_score_fn: to compute similarity between key and target states.
reuse: whether to reuse variable scope.
Returns:
attention_construct_fn: to build attention states.
"""
def construct_fn(attention_query, attention_keys, attention_values):
with tf.variable_scope(name, reuse=reuse) as scope:
context = attention_score_fn(attention_query, attention_keys,
attention_values)
concat_input = tf.concat([attention_query, context], 1)
attention = tf.contrib.layers.linear(
concat_input, num_units, biases_initializer=None, scope=scope)
return attention
return construct_fn
# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length]
@function.Defun(func_name="attn_add_fun", noinline=True)
def _attn_add_fun(v, keys, query):
return tf.reduce_sum(v * tf.tanh(keys + query), [2])
@function.Defun(func_name="attn_mul_fun", noinline=True)
def _attn_mul_fun(keys, query):
return tf.reduce_sum(keys * query, [2])
def _create_attention_score_fn(name,
num_units,
attention_option,
reuse,
dtype=tf.float32):
"""Different ways to compute attention scores.
Args:
name: to label variables.
num_units: hidden state dimension.
attention_option: how to compute attention, either "luong" or "bahdanau".
"bahdanau": additive (Bahdanau et al., ICLR'2015)
"luong": multiplicative (Luong et al., EMNLP'2015)
reuse: whether to reuse variable scope.
dtype: (default: `tf.float32`) data type to use.
Returns:
attention_score_fn: to compute similarity between key and target states.
"""
with tf.variable_scope(name, reuse=reuse):
if attention_option == "bahdanau":
query_w = tf.get_variable("attnW", [num_units, num_units], dtype=dtype)
score_v = tf.get_variable("attnV", [num_units], dtype=dtype)
def attention_score_fn(query, keys, values):
"""Put attention masks on attention_values using attention_keys and query.
Args:
query: A Tensor of shape [batch_size, num_units].
keys: A Tensor of shape [batch_size, attention_length, num_units].
values: A Tensor of shape [batch_size, attention_length, num_units].
Returns:
context_vector: A Tensor of shape [batch_size, num_units].
Raises:
ValueError: if attention_option is neither "luong" or "bahdanau".
"""
if attention_option == "bahdanau":
# transform query
query = tf.matmul(query, query_w)
# reshape query: [batch_size, 1, num_units]
query = tf.reshape(query, [-1, 1, num_units])
# attn_fun
scores = _attn_add_fun(score_v, keys, query)
elif attention_option == "luong":
# reshape query: [batch_size, 1, num_units]
query = tf.reshape(query, [-1, 1, num_units])
# attn_fun
scores = _attn_mul_fun(keys, query)
else:
raise ValueError("Unknown attention option %s!" % attention_option)
# Compute alignment weights
# scores: [batch_size, length]
# alignments: [batch_size, length]
# TODO(thangluong): not normalize over padding positions.
alignments = tf.nn.softmax(scores)
# Now calculate the attention-weighted vector.
alignments = tf.expand_dims(alignments, 2)
context_vector = tf.reduce_sum(alignments * values, [1])
context_vector.set_shape([None, num_units])
return context_vector
return attention_score_fn
```
#### File: research/minigo/dualnet_model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
def _batch_norm(inputs, training, center=True, scale=True):
"""Performs a batch normalization using a standard set of parameters."""
return tf.layers.batch_normalization(
inputs=inputs, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON,
center=center, scale=scale, fused=True, training=training)
def _conv2d(inputs, filters, kernel_size):
"""Performs 2D convolution with a standard set of parameters."""
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size,
padding='same')
def _conv_block(inputs, filters, kernel_size, training):
"""A convolutional block.
Args:
inputs: A tensor representing a batch of input features with shape
[BATCH_SIZE, board_size, board_size, features.NEW_FEATURES_PLANES].
filters: The number of filters for network layers in residual tower.
kernel_size: The kernel to be used in conv2d.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
Returns:
The output tensor of the convolutional block layer.
"""
conv = _conv2d(inputs, filters, kernel_size)
batchn = _batch_norm(conv, training)
output = tf.nn.relu(batchn)
return output
def _res_block(inputs, filters, kernel_size, training):
"""A residual block.
Args:
inputs: A tensor representing a batch of input features with shape
[BATCH_SIZE, board_size, board_size, features.NEW_FEATURES_PLANES].
filters: The number of filters for network layers in residual tower.
kernel_size: The kernel to be used in conv2d.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
Returns:
The output tensor of the residual block layer.
"""
initial_output = _conv_block(inputs, filters, kernel_size, training)
int_layer2_conv = _conv2d(initial_output, filters, kernel_size)
int_layer2_batchn = _batch_norm(int_layer2_conv, training)
output = tf.nn.relu(inputs + int_layer2_batchn)
return output
class Model(object):
"""Base class for building the DualNet Model."""
def __init__(self, num_filters, num_shared_layers, fc_width, board_size):
"""Initialize a model for computing the policy and value in RL.
Args:
num_filters: Number of filters (AlphaGoZero used 256). We use 128 by
default for a 19x19 go board, and 32 for 9x9 size.
num_shared_layers: Number of shared residual blocks. AGZ used both 19
and 39. Here we use 19 for 19x19 size and 9 for 9x9 size because it's
faster to train.
fc_width: Dimensionality of the fully connected linear layer.
board_size: A single integer for the board size.
"""
self.num_filters = num_filters
self.num_shared_layers = num_shared_layers
self.fc_width = fc_width
self.board_size = board_size
self.kernel_size = [3, 3] # kernel size is from AGZ paper
def __call__(self, inputs, training):
"""Add operations to classify a batch of input Go features.
Args:
inputs: A Tensor representing a batch of input Go features with shape
[BATCH_SIZE, board_size, board_size, features.NEW_FEATURES_PLANES]
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
policy_logits: A vector of size self.board_size * self.board_size + 1
corresponding to the policy logit probabilities for all intersections
and the pass move.
value_logits: A scalar for the value logits output
"""
initial_output = _conv_block(
inputs=inputs, filters=self.num_filters,
kernel_size=self.kernel_size, training=training)
# the shared stack
shared_output = initial_output
for _ in range(self.num_shared_layers):
shared_output = _res_block(
inputs=shared_output, filters=self.num_filters,
kernel_size=self.kernel_size, training=training)
# policy head
policy_conv2d = _conv2d(inputs=shared_output, filters=2, kernel_size=[1, 1])
policy_batchn = _batch_norm(inputs=policy_conv2d, training=training,
center=False, scale=False)
policy_relu = tf.nn.relu(policy_batchn)
policy_logits = tf.layers.dense(
tf.reshape(policy_relu, [-1, self.board_size * self.board_size * 2]),
self.board_size * self.board_size + 1)
# value head
value_conv2d = _conv2d(shared_output, filters=1, kernel_size=[1, 1])
value_batchn = _batch_norm(value_conv2d, training,
center=False, scale=False)
value_relu = tf.nn.relu(value_batchn)
value_fc_hidden = tf.nn.relu(tf.layers.dense(
tf.reshape(value_relu, [-1, self.board_size * self.board_size]),
self.fc_width))
value_logits = tf.reshape(tf.layers.dense(value_fc_hidden, 1), [-1])
return policy_logits, value_logits
def model_fn(features, labels, mode, params, config=None): # pylint: disable=unused-argument
"""DualNet model function.
Args:
features: tensor with shape
[BATCH_SIZE, self.board_size, self.board_size,
features.NEW_FEATURES_PLANES]
labels: dict from string to tensor with shape
'pi_tensor': [BATCH_SIZE, self.board_size * self.board_size + 1]
'value_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only)
params: an object of hyperparams
config: ignored; is required by Estimator API.
Returns:
EstimatorSpec parameterized according to the input params and the current
mode.
"""
model = Model(params.num_filters, params.num_shared_layers, params.fc_width,
params.board_size)
policy_logits, value_logits = model(
features, mode == tf.estimator.ModeKeys.TRAIN)
policy_output = tf.nn.softmax(policy_logits, name='policy_output')
value_output = tf.nn.tanh(value_logits, name='value_output')
# Calculate model loss. The loss function sums over the mean-squared error,
# the cross-entropy losses and the l2 regularization term.
# Cross-entropy of policy
policy_entropy = -tf.reduce_mean(tf.reduce_sum(
policy_output * tf.log(policy_output), axis=1))
policy_cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=policy_logits, labels=labels['pi_tensor']))
# Mean squared error
value_cost = tf.reduce_mean(
tf.square(value_output - labels['value_tensor']))
# L2 term
l2_cost = params.l2_strength * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'bias' not in v.name])
# The loss function
combined_cost = policy_cost + value_cost + l2_cost
# Get model train ops
global_step = tf.train.get_or_create_global_step()
boundaries = [int(1e6), int(2e6)]
values = [1e-2, 1e-3, 1e-4]
learning_rate = tf.train.piecewise_constant(
global_step, boundaries, values)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.MomentumOptimizer(
learning_rate, params.momentum).minimize(
combined_cost, global_step=global_step)
# Create multiple tensors for logging purpose
metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels['pi_tensor'],
predictions=policy_output,
name='accuracy_op'),
'policy_cost': tf.metrics.mean(policy_cost),
'value_cost': tf.metrics.mean(value_cost),
'l2_cost': tf.metrics.mean(l2_cost),
'policy_entropy': tf.metrics.mean(policy_entropy),
'combined_cost': tf.metrics.mean(combined_cost),
}
for metric_name, metric_op in metric_ops.items():
tf.summary.scalar(metric_name, metric_op[1])
# Return tf.estimator.EstimatorSpec
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'policy_output': policy_output,
'value_output': value_output,
},
loss=combined_cost,
train_op=train_op,
eval_metric_ops=metric_ops)
```
#### File: research/neural_programmer/parameters.py
```python
import numpy as np
import tensorflow as tf
class Parameters:
def __init__(self, u):
self.utility = u
self.init_seed_counter = 0
self.word_init = {}
def parameters(self, utility):
params = {}
inits = []
embedding_dims = self.utility.FLAGS.embedding_dims
params["unit"] = tf.Variable(
self.RandomUniformInit([len(utility.operations_set), embedding_dims]))
params["word"] = tf.Variable(
self.RandomUniformInit([utility.FLAGS.vocab_size, embedding_dims]))
params["word_match_feature_column_name"] = tf.Variable(
self.RandomUniformInit([1]))
params["controller"] = tf.Variable(
self.RandomUniformInit([2 * embedding_dims, embedding_dims]))
params["column_controller"] = tf.Variable(
self.RandomUniformInit([2 * embedding_dims, embedding_dims]))
params["column_controller_prev"] = tf.Variable(
self.RandomUniformInit([embedding_dims, embedding_dims]))
params["controller_prev"] = tf.Variable(
self.RandomUniformInit([embedding_dims, embedding_dims]))
global_step = tf.Variable(1, name="global_step")
# weigths of question and history RNN (or LSTM)
key_list = ["question_lstm"]
for key in key_list:
# Weights going from inputs to nodes.
for wgts in ["ix", "fx", "cx", "ox"]:
params[key + "_" + wgts] = tf.Variable(
self.RandomUniformInit([embedding_dims, embedding_dims]))
# Weights going from nodes to nodes.
for wgts in ["im", "fm", "cm", "om"]:
params[key + "_" + wgts] = tf.Variable(
self.RandomUniformInit([embedding_dims, embedding_dims]))
# Biases for the gates and cell
for bias in ["i", "f", "c", "o"]:
if (bias == "f"):
print("forget gate bias")
params[key + "_" + bias] = tf.Variable(
tf.random_uniform([embedding_dims], 1.0, 1.1, self.utility.
tf_data_type[self.utility.FLAGS.data_type]))
else:
params[key + "_" + bias] = tf.Variable(
self.RandomUniformInit([embedding_dims]))
params["history_recurrent"] = tf.Variable(
self.RandomUniformInit([3 * embedding_dims, embedding_dims]))
params["history_recurrent_bias"] = tf.Variable(
self.RandomUniformInit([1, embedding_dims]))
params["break_conditional"] = tf.Variable(
self.RandomUniformInit([2 * embedding_dims, embedding_dims]))
init = tf.global_variables_initializer()
return params, global_step, init
def RandomUniformInit(self, shape):
"""Returns a RandomUniform Tensor between -param_init and param_init."""
param_seed = self.utility.FLAGS.param_seed
self.init_seed_counter += 1
return tf.random_uniform(
shape, -1.0 *
(np.float32(self.utility.FLAGS.param_init)
).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]),
(np.float32(self.utility.FLAGS.param_init)
).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]),
self.utility.tf_data_type[self.utility.FLAGS.data_type],
param_seed + self.init_seed_counter)
```
#### File: object_detection/metrics/oid_od_challenge_evaluation_utils.py
```python
r"""Converts data from CSV to the OpenImagesDetectionChallengeEvaluator format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from research.object_detection.core import standard_fields
def build_groundtruth_boxes_dictionary(data, class_label_map):
"""Builds a groundtruth dictionary from groundtruth data in CSV file.
Args:
data: Pandas DataFrame with the groundtruth data for a single image.
class_label_map: Class labelmap from string label name to an integer.
Returns:
A dictionary with keys suitable for passing to
OpenImagesDetectionChallengeEvaluator.add_single_ground_truth_image_info:
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.verified_labels: integer 1D numpy array
containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length
M numpy boolean array denoting whether a groundtruth box contains a
group of instances.
"""
data_boxes = data[data.ConfidenceImageLabel.isnull()]
data_labels = data[data.XMin.isnull()]
return {
standard_fields.InputDataFields.groundtruth_boxes:
data_boxes[['YMin', 'XMin', 'YMax', 'XMax']].values,
standard_fields.InputDataFields.groundtruth_classes:
data_boxes['LabelName'].map(lambda x: class_label_map[x]).values,
standard_fields.InputDataFields.groundtruth_group_of:
data_boxes['IsGroupOf'].values.astype(int),
standard_fields.InputDataFields.groundtruth_image_classes:
data_labels['LabelName'].map(lambda x: class_label_map[x]).values,
}
def build_predictions_dictionary(data, class_label_map):
"""Builds a predictions dictionary from predictions data in CSV file.
Args:
data: Pandas DataFrame with the predictions data for a single image.
class_label_map: Class labelmap from string label name to an integer.
Returns:
Dictionary with keys suitable for passing to
OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info:
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
"""
return {
standard_fields.DetectionResultFields.detection_boxes:
data[['YMin', 'XMin', 'YMax', 'XMax']].values,
standard_fields.DetectionResultFields.detection_classes:
data['LabelName'].map(lambda x: class_label_map[x]).values,
standard_fields.DetectionResultFields.detection_scores:
data['Score'].values
}
```
#### File: ptn/nets/ptn_vox_decoder.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
@tf.contrib.framework.add_arg_scope
def conv3d_transpose(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
reuse=None,
trainable=True,
scope=None):
"""Wrapper for conv3d_transpose layer.
This function wraps the tf.conv3d_transpose with basic non-linearity.
Tt creates a variable called `weights`, representing the kernel,
that is convoled with the input. A second varibale called `biases'
is added to the result of operation.
"""
with tf.variable_scope(
scope, 'Conv3d_transpose', [inputs], reuse=reuse):
dtype = inputs.dtype.base_dtype
kernel_d, kernel_h, kernel_w = kernel_size[0:3]
num_filters_in = inputs.get_shape()[4]
weights_shape = [kernel_d, kernel_h, kernel_w, num_outputs, num_filters_in]
weights = tf.get_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
trainable=trainable)
tf.contrib.framework.add_model_variable(weights)
input_shape = inputs.get_shape().as_list()
batch_size = input_shape[0]
depth = input_shape[1]
height = input_shape[2]
width = input_shape[3]
def get_deconv_dim(dim_size, stride_size):
# Only support padding='SAME'.
if isinstance(dim_size, tf.Tensor):
dim_size = tf.multiply(dim_size, stride_size)
elif dim_size is not None:
dim_size *= stride_size
return dim_size
out_depth = get_deconv_dim(depth, stride)
out_height = get_deconv_dim(height, stride)
out_width = get_deconv_dim(width, stride)
out_shape = [batch_size, out_depth, out_height, out_width, num_outputs]
outputs = tf.nn.conv3d_transpose(inputs, weights, out_shape,
[1, stride, stride, stride, 1],
padding=padding)
outputs.set_shape(out_shape)
if biases_initializer is not None:
biases = tf.get_variable('biases',
shape=[num_outputs, ],
dtype=dtype,
initializer=biases_initializer,
trainable=trainable)
tf.contrib.framework.add_model_variable(biases)
outputs = tf.nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return outputs
def model(identities, params, is_training):
"""Model transforming embedding to voxels."""
del is_training # Unused
f_dim = params.f_dim
# Please refer to the original implementation: github.com/xcyan/nips16_PTN
# In TF replication, we use a slightly different architecture.
with slim.arg_scope(
[slim.fully_connected, conv3d_transpose],
weights_initializer=tf.truncated_normal_initializer(stddev=0.02, seed=1)):
h0 = slim.fully_connected(
identities, 4 * 4 * 4 * f_dim * 8, activation_fn=tf.nn.relu)
h1 = tf.reshape(h0, [-1, 4, 4, 4, f_dim * 8])
h1 = conv3d_transpose(
h1, f_dim * 4, [4, 4, 4], stride=2, activation_fn=tf.nn.relu)
h2 = conv3d_transpose(
h1, int(f_dim * 3 / 2), [5, 5, 5], stride=2, activation_fn=tf.nn.relu)
h3 = conv3d_transpose(
h2, 1, [6, 6, 6], stride=2, activation_fn=tf.nn.sigmoid)
return h3
```
#### File: research/ptn/train_ptn.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow import app
from research.ptn import model_ptn
flags = tf.app.flags
slim = tf.contrib.slim
flags.DEFINE_string('inp_dir',
'',
'Directory path containing the input data (tfrecords).')
flags.DEFINE_string(
'dataset_name', 'shapenet_chair',
'Dataset name that is to be used for training and evaluation.')
flags.DEFINE_integer('z_dim', 512, '')
flags.DEFINE_integer('f_dim', 64, '')
flags.DEFINE_integer('fc_dim', 1024, '')
flags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.')
flags.DEFINE_integer('image_size', 64,
'Input images dimension (pixels) - width & height.')
flags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.')
flags.DEFINE_integer('step_size', 24, 'Steps to take in rotation to fetch viewpoints.')
flags.DEFINE_integer('batch_size', 6, 'Batch size while training.')
flags.DEFINE_float('focal_length', 0.866, 'Focal length parameter used in perspective projection.')
flags.DEFINE_float('focal_range', 1.732, 'Focal length parameter used in perspective projection.')
flags.DEFINE_string('encoder_name', 'ptn_encoder',
'Name of the encoder network being used.')
flags.DEFINE_string('decoder_name', 'ptn_vox_decoder',
'Name of the decoder network being used.')
flags.DEFINE_string('projector_name', 'perspective_projector',
'Name of the projector network being used.')
# Save options
flags.DEFINE_string('checkpoint_dir', '/tmp/ptn_train/',
'Directory path for saving trained models and other data.')
flags.DEFINE_string('model_name', 'ptn_finetune',
'Name of the model used in naming the TF job. Must be different for each run.')
flags.DEFINE_string('init_model', None,
'Checkpoint path of the model to initialize with.')
flags.DEFINE_integer('save_every', 1000,
'Average period of steps after which we save a model.')
# Optimization
flags.DEFINE_float('proj_weight', 10, 'Weighting factor for projection loss.')
flags.DEFINE_float('volume_weight', 0, 'Weighting factor for volume loss.')
flags.DEFINE_float('viewpoint_weight', 1, 'Weighting factor for viewpoint loss.')
flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.')
flags.DEFINE_float('weight_decay', 0.001, 'Weight decay parameter while training.')
flags.DEFINE_float('clip_gradient_norm', 0, 'Gradient clim norm, leave 0 if no gradient clipping.')
flags.DEFINE_integer('max_number_of_steps', 10000, 'Maximum number of steps for training.')
# Summary
flags.DEFINE_integer('save_summaries_secs', 15, 'Seconds interval for dumping TF summaries.')
flags.DEFINE_integer('save_interval_secs', 60 * 5, 'Seconds interval to save models.')
# Scheduling
flags.DEFINE_string('master', '', 'The address of the tensorflow master')
flags.DEFINE_bool('sync_replicas', False, 'Whether to sync gradients between replicas for optimizer.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas (train tasks).')
flags.DEFINE_integer('backup_workers', 0, 'Number of backup workers.')
flags.DEFINE_integer('ps_tasks', 0, 'Number of ps tasks.')
flags.DEFINE_integer('task', 0,
'Task identifier flag to be set for each task running in distributed manner. Task number 0 '
'will be chosen as the chief.')
FLAGS = flags.FLAGS
def main(_):
train_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, 'train')
save_image_dir = os.path.join(train_dir, 'images')
if not os.path.exists(train_dir):
os.makedirs(train_dir)
if not os.path.exists(save_image_dir):
os.makedirs(save_image_dir)
g = tf.Graph()
with g.as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
global_step = slim.get_or_create_global_step()
###########
## model ##
###########
model = model_ptn.model_PTN(FLAGS)
##########
## data ##
##########
train_data = model.get_inputs(
FLAGS.inp_dir,
FLAGS.dataset_name,
'train',
FLAGS.batch_size,
FLAGS.image_size,
FLAGS.vox_size,
is_training=True)
inputs = model.preprocess(train_data, FLAGS.step_size)
##############
## model_fn ##
##############
model_fn = model.get_model_fn(
is_training=True, reuse=False, run_projection=True)
outputs = model_fn(inputs)
##################
## train_scopes ##
##################
if FLAGS.init_model:
train_scopes = ['decoder']
init_scopes = ['encoder']
else:
train_scopes = ['encoder', 'decoder']
##########
## loss ##
##########
task_loss = model.get_loss(inputs, outputs)
regularization_loss = model.get_regularization_loss(train_scopes)
loss = task_loss + regularization_loss
###############
## optimizer ##
###############
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer,
replicas_to_aggregate=FLAGS.workers_replicas - FLAGS.backup_workers,
total_num_replicas=FLAGS.worker_replicas)
##############
## train_op ##
##############
train_op = model.get_train_op_for_scope(loss, optimizer, train_scopes)
###########
## saver ##
###########
saver = tf.train.Saver(max_to_keep=np.minimum(5,
FLAGS.worker_replicas + 1))
if FLAGS.task == 0:
params = FLAGS
params.batch_size = params.num_views
params.step_size = 1
model.set_params(params)
val_data = model.get_inputs(
params.inp_dir,
params.dataset_name,
'val',
params.batch_size,
params.image_size,
params.vox_size,
is_training=False)
val_inputs = model.preprocess(val_data, params.step_size)
# Note: don't compute loss here
reused_model_fn = model.get_model_fn(is_training=False, reuse=True)
val_outputs = reused_model_fn(val_inputs)
with tf.device(tf.DeviceSpec(device_type='CPU')):
vis_input_images = val_inputs['images_1'] * 255.0
vis_gt_projs = (val_outputs['masks_1'] * (-1) + 1) * 255.0
vis_pred_projs = (val_outputs['projs_1'] * (-1) + 1) * 255.0
vis_gt_projs = tf.concat([vis_gt_projs] * 3, axis=3)
vis_pred_projs = tf.concat([vis_pred_projs] * 3, axis=3)
# rescale
new_size = [FLAGS.image_size] * 2
vis_gt_projs = tf.image.resize_nearest_neighbor(
vis_gt_projs, new_size)
vis_pred_projs = tf.image.resize_nearest_neighbor(
vis_pred_projs, new_size)
# flip
# vis_gt_projs = utils.image_flipud(vis_gt_projs)
# vis_pred_projs = utils.image_flipud(vis_pred_projs)
# vis_gt_projs is of shape [batch, height, width, channels]
write_disk_op = model.write_disk_grid(
global_step=global_step,
log_dir=save_image_dir,
input_images=vis_input_images,
gt_projs=vis_gt_projs,
pred_projs=vis_pred_projs,
input_voxels=val_inputs['voxels'],
output_voxels=val_outputs['voxels_1'])
with tf.control_dependencies([write_disk_op]):
train_op = tf.identity(train_op)
#############
## init_fn ##
#############
if FLAGS.init_model:
init_fn = model.get_init_fn(init_scopes)
else:
init_fn = None
##############
## training ##
##############
slim.learning.train(
train_op=train_op,
logdir=train_dir,
init_fn=init_fn,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
number_of_steps=FLAGS.max_number_of_steps,
saver=saver,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
app.run()
```
#### File: sentiment_analysis/data/dataset.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from research.sentiment_analysis.data import imdb as imdb
DATASET_IMDB = "imdb"
def load(dataset, vocabulary_size, sentence_length):
"""Returns training and evaluation input.
Args:
dataset: Dataset to be trained and evaluated.
Currently only imdb is supported.
vocabulary_size: The number of the most frequent tokens
to be used from the corpus.
sentence_length: The number of words in each sentence.
Longer sentences get cut, shorter ones padded.
Raises:
ValueError: if the dataset value is not valid.
Returns:
A tuple of length 4, for training sentences, labels,
evaluation sentences, and evaluation labels,
each being an numpy array.
"""
if dataset == DATASET_IMDB:
return imdb.load(vocabulary_size, sentence_length)
else:
raise ValueError("unsupported dataset: " + dataset)
def get_num_class(dataset):
"""Returns an integer for the number of label classes.
Args:
dataset: Dataset to be trained and evaluated.
Currently only imdb is supported.
Raises:
ValueError: if the dataset value is not valid.
Returns:
int: The number of label classes.
"""
if dataset == DATASET_IMDB:
return imdb.NUM_CLASS
else:
raise ValueError("unsupported dataset: " + dataset)
```
#### File: sentiment_analysis/data/util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
START_CHAR = 1
END_CHAR = 2
OOV_CHAR = 3
def pad_sentence(sentence, sentence_length):
"""Pad the given sentense at the end.
If the input is longer than sentence_length,
the remaining portion is dropped.
END_CHAR is used for the padding.
Args:
sentence: A numpy array of integers.
sentence_length: The length of the input after the padding.
Returns:
A numpy array of integers of the given length.
"""
sentence = sentence[:sentence_length]
if len(sentence) < sentence_length:
sentence = np.pad(sentence, (0, sentence_length - len(sentence)),
"constant", constant_values=(START_CHAR, END_CHAR))
return sentence
```
#### File: research/seq2species/run_training.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import flags
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from research.seq2species import build_model
from research.seq2species import configuration
from research.seq2species import input as seq2species_input
from research.seq2species.protos import seq2label_pb2
from research.seq2species import seq2label_utils
# Define non-tunable parameters.
flags.DEFINE_integer('num_filters', 1, 'Number of filters for conv model')
flags.DEFINE_string('hparams', '',
'Comma-separated list of name=value hyperparameter '
"pairs ('hp1=value1,hp2=value2'). Unspecified "
'hyperparameters will be filled with defaults.')
flags.DEFINE_integer('batch_size', 512, 'Size of batches during training.')
flags.DEFINE_integer('min_train_steps', 1000,
'Minimum number of training steps to run.')
flags.DEFINE_float('max_task_loss', 10.0,
"Terminate trial if task loss doesn't fall below this "
'within --min_train_steps.')
flags.DEFINE_integer('n_print_progress_every', 1000,
'Print training progress every '
'--n_print_progress_every global steps.')
flags.DEFINE_list('targets', ['species'],
'Names of taxonomic ranks to use as training targets.')
flags.DEFINE_float(
'noise_rate', 0.0, 'Rate [0.0, 1.0] at which to inject '
'base-flipping noise into input read sequences.')
# Define paths to logs and data.
flags.DEFINE_list(
'train_files', [], 'Full paths to the TFRecords containing the '
'training examples.')
flags.DEFINE_string(
'metadata_path', '', 'Full path of the text proto containing configuration '
'information about the set of training examples.')
flags.DEFINE_string('logdir', '/tmp/seq2species',
'Directory to which to write logs.')
# Define supervisor/checkpointing options.
flags.DEFINE_integer('task', 0, 'Task ID of the replica running the training.')
flags.DEFINE_string('master', '', 'Name of the TF master to use.')
flags.DEFINE_integer(
'save_model_secs', 900, 'Rate at which to save model parameters. '
'Set to 0 to disable checkpointing.')
flags.DEFINE_integer('recovery_wait_secs', 30,
'Wait to recover model from checkpoint '
'before timing out.')
flags.DEFINE_integer('save_summaries_secs', 900,
'Rate at which to save Tensorboard summaries.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of tasks in the ps job; 0 if no ps is used.')
FLAGS = flags.FLAGS
RANDOM_SEED = 42
def wait_until(time_sec):
"""Stalls execution until a given time.
Args:
time_sec: time, in seconds, until which to loop idly.
"""
while time.time() < time_sec:
pass
def update_measures(measures, new_measures, loss_val, max_loss=None):
"""Updates tracking of experimental measures and infeasibilty.
Args:
measures: dict; mapping from measure name to measure value.
new_measures: dict; mapping from measure name to new measure values.
loss_val: float; value of loss metric by which to determine fesibility.
max_loss: float; maximum value at which to consider the loss feasible.
Side Effects:
Updates the given mapping of measures and values based on the current
experimental metrics stored in new_measures, and determines current
feasibility of the experiment based on the provided loss value.
"""
max_loss = max_loss if max_loss else np.finfo('f').max
measures['is_infeasible'] = (
loss_val >= max_loss or not np.isfinite(loss_val))
measures.update(new_measures)
def run_training(model, hparams, training_dataset, logdir, batch_size):
"""Trains the given model on random mini-batches of reads.
Args:
model: ConvolutionalNet instance containing the model graph and operations.
hparams: tf.contrib.training.Hparams object containing the model's
hyperparamters; see configuration.py for hyperparameter definitions.
training_dataset: an `InputDataset` that can feed labelled examples.
logdir: string; full path of directory to which to save checkpoints.
batch_size: integer batch size.
Yields:
Tuple comprising a dictionary of experimental measures and the save path
for train checkpoints and summaries.
"""
input_params = dict(batch_size=batch_size)
features, labels = training_dataset.input_fn(input_params)
model.build_graph(features, labels, tf.estimator.ModeKeys.TRAIN, batch_size)
is_chief = FLAGS.task == 0
scaffold = tf.train.Scaffold(
saver=tf.train.Saver(
tf.global_variables(),
max_to_keep=5,
keep_checkpoint_every_n_hours=1.0),
init_op=tf.global_variables_initializer(),
summary_op=model.summary_op)
with tf.train.MonitoredTrainingSession(
master=FLAGS.master,
checkpoint_dir=logdir,
is_chief=is_chief,
scaffold=scaffold,
save_summaries_secs=FLAGS.save_summaries_secs,
save_checkpoint_secs=FLAGS.save_model_secs,
max_wait_secs=FLAGS.recovery_wait_secs) as sess:
global_step = sess.run(model.global_step)
print('Initialized model at global step ', global_step)
init_time = time.time()
measures = {'is_infeasible': False}
if is_chief:
model_info = seq2label_utils.construct_seq2label_model_info(
hparams, 'conv', FLAGS.targets, FLAGS.metadata_path, FLAGS.batch_size,
FLAGS.num_filters, FLAGS.noise_rate)
write_message(model_info, os.path.join(logdir, 'model_info.pbtxt'))
ops = [
model.accuracy, model.weighted_accuracy, model.total_loss,
model.global_step, model.train_op
]
while not sess.should_stop() and global_step < hparams.train_steps:
accuracy, weighted_accuracy, loss, global_step, _ = sess.run(ops)
def gather_measures():
"""Updates the measures dictionary from this batch."""
new_measures = {'train_loss': loss, 'global_step': global_step}
for target in FLAGS.targets:
new_measures.update({
('train_accuracy/%s' % target): accuracy[target],
('train_weighted_accuracy/%s' % target): weighted_accuracy[target]
})
update_measures(
measures, new_measures, loss, max_loss=FLAGS.max_task_loss)
# Periodically track measures according to current mini-batch performance.
# Log a message.
if global_step % FLAGS.n_print_progress_every == 0:
log_message = ('\tstep: %d (%d sec), loss: %f' %
(global_step, time.time() - init_time, loss))
for target in FLAGS.targets:
log_message += (', accuracy/%s: %f ' % (target, accuracy[target]))
log_message += (', weighted_accuracy/%s: %f ' %
(target, weighted_accuracy[target]))
print(log_message)
# Gather new measures and update the measures dictionary.
gather_measures()
yield measures, scaffold.saver.last_checkpoints[-1]
# Check for additional stopping criteria.
if not np.isfinite(loss) or (loss >= FLAGS.max_task_loss and
global_step > FLAGS.min_train_steps):
break
# Always yield once at the end.
gather_measures()
yield measures, scaffold.saver.last_checkpoints[-1]
def write_message(message, filename):
"""Writes contents of the given message to the given filename as a text proto.
Args:
message: the proto message to save.
filename: full path of file to which to save the text proto.
Side Effects:
Outputs a text proto file to the given filename.
"""
message_string = text_format.MessageToString(message)
with tf.gfile.GFile(filename, 'w') as f:
f.write(message_string)
def write_measures(measures, checkpoint_file, init_time):
"""Writes performance measures to file.
Args:
measures: dict; mapping from measure name to measure value.
checkpoint_file: string; full save path for checkpoints and summaries.
init_time: int; start time for work on the current experiment.
Side Effects:
Writes given dictionary of performance measures for the current experiment
to a 'measures.pbtxt' file in the checkpoint directory.
"""
# Save experiment measures.
print('global_step: ', measures['global_step'])
experiment_measures = seq2label_pb2.Seq2LabelExperimentMeasures(
checkpoint_path=checkpoint_file,
steps=measures['global_step'],
experiment_infeasible=measures['is_infeasible'],
wall_time=time.time() - init_time) # Inaccurate for restarts.
for name in measures:
value = measures.get(name)
if name not in ['is_infeasible', 'global_step']:
experiment_measures.measures.add(name=name, value=value)
measures_file = os.path.join(
os.path.dirname(checkpoint_file), 'measures.pbtxt')
write_message(experiment_measures, measures_file)
print('Wrote ', measures_file,
' containing the following experiment measures:\n', experiment_measures)
def main(unused_argv):
dataset_info = seq2species_input.load_dataset_info('')
init_time = time.time()
# Determine model hyperparameters.
hparams = configuration.parse_hparams(FLAGS.hparams, FLAGS.num_filters)
print('Current Hyperparameters:')
for hp_name, hp_val in hparams.values().items():
print('\t', hp_name, ': ', hp_val)
# Initialize the model graph.
print('Constructing TensorFlow Graph.')
tf.reset_default_graph()
input_dataset = seq2species_input.InputDataset.from_tfrecord_files(
FLAGS.train_files,
'train',
FLAGS.targets,
dataset_info,
noise_rate=FLAGS.noise_rate,
random_seed=RANDOM_SEED)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
model = build_model.ConvolutionalNet(
hparams, dataset_info, targets=FLAGS.targets)
# Run the experiment.
measures, checkpoint_file = None, None
print('Starting model training.')
for cur_measures, cur_file in run_training(
model, hparams, input_dataset, FLAGS.logdir, batch_size=FLAGS.batch_size):
measures, checkpoint_file = cur_measures, cur_file
# Save experiment results.
write_measures(measures, checkpoint_file, init_time)
if __name__ == '__main__':
tf.app.run(main)
```
#### File: skip_thoughts/ops/input_ops.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
# A SentenceBatch is a pair of Tensors:
# ids: Batch of input sentences represented as sequences of word ids: an int64
# Tensor with shape [batch_size, padded_length].
# mask: Boolean mask distinguishing real words (1) from padded words (0): an
# int32 Tensor with shape [batch_size, padded_length].
SentenceBatch = collections.namedtuple("SentenceBatch", ("ids", "mask"))
def parse_example_batch(serialized):
"""Parses a batch of tf.Example protos.
Args:
serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
Returns:
encode: A SentenceBatch of encode sentences.
decode_pre: A SentenceBatch of "previous" sentences to decode.
decode_post: A SentenceBatch of "post" sentences to decode.
"""
features = tf.parse_example(
serialized,
features={
"encode": tf.VarLenFeature(dtype=tf.int64),
"decode_pre": tf.VarLenFeature(dtype=tf.int64),
"decode_post": tf.VarLenFeature(dtype=tf.int64),
})
def _sparse_to_batch(sparse):
ids = tf.sparse_tensor_to_dense(sparse) # Padding with zeroes.
mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
tf.ones_like(sparse.values, dtype=tf.int32))
return SentenceBatch(ids=ids, mask=mask)
output_names = ("encode", "decode_pre", "decode_post")
return tuple(_sparse_to_batch(features[x]) for x in output_names)
def prefetch_input_data(reader,
file_pattern,
shuffle,
capacity,
num_reader_threads=1):
"""Prefetches string values from disk into an input queue.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
"/tmp/train_data-?????-of-00100", where '?' acts as a wildcard that
matches any character).
shuffle: Boolean; whether to randomly shuffle the input data.
capacity: Queue capacity (number of records).
num_reader_threads: Number of reader threads feeding into the queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
filename_queue = tf.train.string_input_producer(
data_files, shuffle=shuffle, capacity=16, name="filename_queue")
if shuffle:
min_after_dequeue = int(0.6 * capacity)
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=[tf.string],
shapes=[[]],
name="random_input_queue")
else:
values_queue = tf.FIFOQueue(
capacity=capacity,
dtypes=[tf.string],
shapes=[[]],
name="fifo_input_queue")
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops))
tf.summary.scalar("queue/%s/fraction_of_%d_full" % (values_queue.name,
capacity),
tf.cast(values_queue.size(), tf.float32) * (1.0 / capacity))
return values_queue
```
#### File: research/steve/nn.py
```python
from builtins import range
from builtins import object
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
class FeedForwardNet(object):
"""Custom feed-forward network layer."""
def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None,
get_uncertainty=False):
self.name = name
self.in_size = in_size
self.out_shape = out_shape
self.out_size = np.prod(out_shape)
self.layers = layers
self.hidden_dim = hidden_dim
self.final_nonlinearity = (lambda x: x) if final_nonlinearity is None else final_nonlinearity
self.get_uncertainty = get_uncertainty
self.weights = [None] * layers
self.biases = [None] * layers
self.params_list = []
with tf.variable_scope(name):
for layer_i in range(self.layers):
in_size = self.hidden_dim
out_size = self.hidden_dim
if layer_i == 0: in_size = self.in_size
if layer_i == self.layers - 1: out_size = self.out_size
self.weights[layer_i] = tf.get_variable("weights%d" % layer_i, [in_size, out_size],
initializer=tf.contrib.layers.xavier_initializer())
self.biases[layer_i] = tf.get_variable("bias%d" % layer_i, [1, out_size],
initializer=tf.constant_initializer(0.0))
self.params_list += [self.weights[layer_i], self.biases[layer_i]]
def __call__(self, x, stop_params_gradient=False, is_eval=True, ensemble_idxs=None, pre_expanded=None,
reduce_mode="none"):
original_shape = tf.shape(x)
h = tf.reshape(x, [-1, self.in_size])
for layer_i in range(self.layers):
nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity
if stop_params_gradient:
h = nonlinearity(
tf.matmul(h, tf.stop_gradient(self.weights[layer_i])) + tf.stop_gradient(self.biases[layer_i]))
else:
h = nonlinearity(tf.matmul(h, self.weights[layer_i]) + self.biases[layer_i])
if len(self.out_shape) > 0:
h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1))
else:
h = tf.reshape(h, original_shape[:-1])
if pre_expanded is None: pre_expanded = ensemble_idxs is not None
if reduce_mode == "none" and not pre_expanded and self.get_uncertainty:
if len(self.out_shape) > 0:
h = tf.expand_dims(h, -2)
else:
h = tf.expand_dims(h, -1)
return h
def l2_loss(self):
return tf.add_n([tf.reduce_sum(.5 * tf.square(mu)) for mu in self.params_list])
class BayesianDropoutFeedForwardNet(FeedForwardNet):
"""Custom feed-forward network layer, with dropout as a Bayesian approximation."""
def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None,
get_uncertainty=False, keep_prob=.5, eval_sample_count=2, consistent_random_seed=False):
super(BayesianDropoutFeedForwardNet, self).__init__(name, in_size, out_shape, layers=layers,
hidden_dim=hidden_dim,
final_nonlinearity=final_nonlinearity,
get_uncertainty=get_uncertainty)
self.keep_prob = keep_prob
self.eval_sample_count = eval_sample_count
if eval_sample_count < 2: raise Exception("eval_sample_count must be at least 2 to estimate uncertainty")
self.dropout_seed = tf.random_uniform([layers], maxval=1e18, dtype=tf.int64) if consistent_random_seed else [
None] * layers
def __call__(self, x, stop_params_gradient=False, is_eval=True, pre_expanded=False, ensemble_idxs=None,
reduce_mode="none"):
if is_eval:
x = tf.tile(tf.expand_dims(x, 0),
tf.concat([tf.constant([self.eval_sample_count]), tf.ones_like(tf.shape(x))], 0))
original_shape = tf.shape(x)
h = tf.reshape(x, [-1, self.in_size])
for layer_i in range(self.layers):
nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity
if layer_i > 0: h = tf.nn.dropout(h, keep_prob=self.keep_prob, seed=self.dropout_seed[layer_i])
if stop_params_gradient:
h = nonlinearity(
tf.matmul(h, tf.stop_gradient(self.weights[layer_i])) + tf.stop_gradient(self.biases[layer_i]))
else:
h = nonlinearity(tf.matmul(h, self.weights[layer_i]) + self.biases[layer_i])
if len(self.out_shape) > 0:
h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1))
else:
h = tf.reshape(h, original_shape[:-1])
if is_eval:
h, uncertainty = tf.nn.moments(h, 0)
if self.get_uncertainty:
return h, uncertainty
else:
return h
else:
return h
class EnsembleFeedForwardNet(FeedForwardNet):
"""Custom feed-forward network layer with an ensemble."""
def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None,
get_uncertainty=False, ensemble_size=2, train_sample_count=2, eval_sample_count=2):
super().__init__(name, in_size, out_shape, layers, hidden_dim, final_nonlinearity, get_uncertainty)
if train_sample_count > ensemble_size: raise Exception("train_sample_count cannot be larger than ensemble size")
if eval_sample_count > ensemble_size: raise Exception("eval_sample_count cannot be larger than ensemble size")
self.name = name
self.in_size = in_size
self.out_shape = out_shape
self.out_size = np.prod(out_shape)
self.layers = layers
self.hidden_dim = hidden_dim
self.final_nonlinearity = (lambda x: x) if final_nonlinearity is None else final_nonlinearity
self.get_uncertainty = get_uncertainty
self.ensemble_size = ensemble_size
self.train_sample_count = train_sample_count
self.eval_sample_count = eval_sample_count
self.weights = [None] * layers
self.biases = [None] * layers
self.params_list = []
with tf.variable_scope(name):
for layer_i in range(self.layers):
in_size = self.hidden_dim
out_size = self.hidden_dim
if layer_i == 0: in_size = self.in_size
if layer_i == self.layers - 1: out_size = self.out_size
self.weights[layer_i] = tf.get_variable("weights%d" % layer_i, [ensemble_size, in_size, out_size],
initializer=tf.contrib.layers.xavier_initializer())
self.biases[layer_i] = tf.get_variable("bias%d" % layer_i, [ensemble_size, out_size],
initializer=tf.constant_initializer(0.0))
self.params_list += [self.weights[layer_i], self.biases[layer_i]]
def __call__(self, x, stop_params_gradient=False, is_eval=True, ensemble_idxs=None, pre_expanded=None,
reduce_mode="none"):
if pre_expanded is None: pre_expanded = ensemble_idxs is not None
if ensemble_idxs is None:
ensemble_idxs = tf.random_shuffle(tf.range(self.ensemble_size))
ensemble_sample_n = self.eval_sample_count if is_eval else self.train_sample_count
ensemble_idxs = ensemble_idxs[:ensemble_sample_n]
else:
ensemble_sample_n = tf.shape(ensemble_idxs)[0]
weights = [tf.gather(w, ensemble_idxs, axis=0) for w in self.weights]
biases = [tf.expand_dims(tf.gather(b, ensemble_idxs, axis=0), 0) for b in self.biases]
original_shape = tf.shape(x)
if pre_expanded:
h = tf.reshape(x, [-1, ensemble_sample_n, self.in_size])
else:
h = tf.tile(tf.reshape(x, [-1, 1, self.in_size]), [1, ensemble_sample_n, 1])
for layer_i in range(self.layers):
nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity
if stop_params_gradient:
h = nonlinearity(tf.einsum('bri,rij->brj', h, tf.stop_gradient(weights[layer_i])) + tf.stop_gradient(
biases[layer_i]))
else:
h = nonlinearity(tf.einsum('bri,rij->brj', h, weights[layer_i]) + biases[layer_i])
if pre_expanded:
if len(self.out_shape) > 0:
h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1))
else:
h = tf.reshape(h, original_shape[:-1])
else:
if len(self.out_shape) > 0:
h = tf.reshape(h, tf.concat(
[original_shape[:-1], tf.constant([ensemble_sample_n]), tf.constant(self.out_shape)], -1))
else:
h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant([ensemble_sample_n])], -1))
if reduce_mode == "none":
pass
elif reduce_mode == "random":
if len(self.out_shape) > 0:
h = tf.reduce_sum(h * tf.reshape(
tf.one_hot(tf.random_uniform([tf.shape(h)[0]], 0, ensemble_sample_n, dtype=tf.int64),
ensemble_sample_n), tf.concat(
[tf.shape(h)[:1], tf.ones_like(tf.shape(h)[1:-2]), tf.constant([ensemble_sample_n]),
tf.constant([1])], 0)), -2)
else:
h = tf.reduce_sum(h * tf.reshape(
tf.one_hot(tf.random_uniform([tf.shape(h)[0]], 0, ensemble_sample_n, dtype=tf.int64),
ensemble_sample_n),
tf.concat([tf.shape(h)[:1], tf.ones_like(tf.shape(h)[1:-1]), tf.constant([ensemble_sample_n])], 0)),
-1)
elif reduce_mode == "mean":
if len(self.out_shape) > 0:
h = tf.reduce_mean(h, -2)
else:
h = tf.reduce_mean(h, -1)
else:
raise Exception("use a valid reduce mode: none, random, or mean")
return h
class ReparamNormal(object):
"""Wrapper to make a feedforward network that outputs both mu and logsigma,
for use in the reparameterization trick."""
def __init__(self, base_net, name, in_size, out_shape, layers=2, hidden_dim=32, final_nonlinearity=None,
ls_start_bias=0.0, final_net=FeedForwardNet, logsigma_min=-5., logsigma_max=2., **kwargs):
assert layers > 1
self.main_encoder = base_net(name + "_base", in_size, [hidden_dim], layers, hidden_dim,
final_nonlinearity=tf.nn.relu, **kwargs)
self.mu = final_net(name + "_mu", hidden_dim, out_shape, layers=1, final_nonlinearity=final_nonlinearity,
**kwargs)
self.logsigma = final_net(name + "_logsigma", hidden_dim, out_shape, layers=1, final_nonlinearity=None,
**kwargs)
self.ls_start_bias = ls_start_bias
self.params_list = self.main_encoder.params_list + self.mu.params_list + self.logsigma.params_list
self.logsigma_min = logsigma_min
self.logsigma_max = logsigma_max
def __call__(self, x):
encoded = self.main_encoder(x)
mu = self.mu(encoded)
logsigma = tf.clip_by_value(self.logsigma(encoded) + self.ls_start_bias, self.logsigma_min, self.logsigma_max)
return mu, logsigma
def l2_loss(self):
return self.main_encoder.l2_loss() + self.mu.l2_loss() + self.logsigma.l2_loss()
```
#### File: research/steve/toy_demo.py
```python
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
### Hyperparameters
NONTERMINAL_STATE_COUNT = 100
NOISE_AMOUNT = 0.1
TRAIN_STEPS = 10000
Q_ENSEMBLE_SIZE = 8
MODEL_ENSEMBLE_SIZE = 8
HORIZON = 5
TRIAL_N = 10
### Helper functions
initial_state = 0
terminal_state = NONTERMINAL_STATE_COUNT + 1
nonterminal_state_count = NONTERMINAL_STATE_COUNT
state_count = NONTERMINAL_STATE_COUNT + 1
final_reward = NONTERMINAL_STATE_COUNT
colors = sns.color_palette('husl', 4)
plt.rcParams["figure.figsize"] = (6, 5)
def step(state):
if state == terminal_state:
next_state = terminal_state
else:
next_state = state + 1
if state == terminal_state:
reward = 0
elif state + 1 == terminal_state:
reward = final_reward
else:
reward = -1
return next_state, reward
def noisy_step(state):
if state == terminal_state:
next_state = terminal_state
elif np.random.random([]) < NOISE_AMOUNT:
next_state = np.random.randint(0, state_count)
else:
next_state = state + 1
if state == terminal_state:
reward = 0
elif state + 1 == terminal_state:
reward = final_reward
else:
reward = -1
return next_state, reward
def get_error(Q):
losses = np.square(np.arange(state_count) - Q[:-1])
return np.mean(losses)
def downsample(array, factor):
pad_size = np.ceil(old_div(float(array.size), factor)) * factor - array.size
array_padded = np.append(array, np.zeros([pad_size.astype(np.int64)]) * np.NaN)
return scipy.nanmean(array_padded.reshape(-1, factor), axis=1)
######################
### Main experiments
######################
# Basic Q
if True:
print("Running basic Q-learning.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [state_count + 1]).astype(np.float64)
Q[state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
Q[state] = reward + Q[next_state]
losses.append(get_error(Q))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="Basic Q-learning", color=colors[0])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[0])
with open('Toy-v1/baseline.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# Ensemble Q
if True:
print("Running ensemble Q-learning.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
Q[q_ensemble_i, state] = reward + np.mean(Q[:, next_state])
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="Ensemble Q-learning", color=colors[1])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[1])
# Ensemble MVE-Oracle
if True:
print("Running ensemble oracle MVE.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# MVE rollout
target = reward
for _ in range(HORIZON):
next_state, reward = step(next_state)
target += reward
target += np.mean(Q[:, next_state])
Q[q_ensemble_i, state] = target
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="MVE-oracle", color=colors[2])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2])
with open('Toy-v1/mve_oracle.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# Ensemble MVE-Noisy
if True:
print("Running ensemble noisy MVE.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# MVE rollout
targets = []
first_next_state, first_reward = next_state, reward
for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE):
next_state, reward = first_next_state, first_reward
target = reward
for _ in range(HORIZON):
next_state, reward = noisy_step(next_state)
target += reward
target += np.mean(Q[:, next_state])
targets.append(target)
Q[q_ensemble_i, state] = np.mean(targets)
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="MVE-noisy", color=colors[2], linestyle='dotted')
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2])
with open('Toy-v1/mve_noisy.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# STEVE-Oracle
if True:
print("Running ensemble oracle STEVE.")
trial_results = []
oracle_q_estimate_errors = []
oracle_mve_estimate_errors = []
oracle_steve_estimate_errors = []
oracle_opt_estimate_errors = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
q_estimate_errors = []
mve_estimate_errors = []
steve_estimate_errors = []
opt_estimate_errors = []
steve_beat_freq = []
for step_i in range(TRAIN_STEPS):
_q_estimate_errors = []
_mve_estimate_errors = []
_steve_estimate_errors = []
_opt_estimate_errors = []
_steve_beat_freq = []
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# STEVE rollout
Q_est_mat = np.zeros([HORIZON + 1, Q_ENSEMBLE_SIZE])
reward_est_mat = np.zeros([HORIZON + 1, 1])
first_next_state, first_reward = next_state, reward
next_state, reward = first_next_state, first_reward
Q_est_mat[0, :] = Q[:, next_state]
reward_est_mat[0, 0] = reward
for timestep_i in range(1, HORIZON + 1):
next_state, reward = step(next_state)
Q_est_mat[timestep_i, :] = Q[:, next_state]
reward_est_mat[timestep_i, 0] = reward
all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0)
# STEVE weight calculation
estimates = np.mean(all_targets, axis=1)
confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8))
coefficients = old_div(confidences, np.sum(confidences))
target = np.sum(estimates * coefficients)
Q[q_ensemble_i, state] = target
true_target = state + 1. if state != terminal_state else 0.
_q_estimate_errors.append(np.square(estimates[0] - true_target))
_mve_estimate_errors.append(np.square(estimates[-1] - true_target))
_steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target))
_opt_estimate_errors.append(np.min(np.square(estimates - true_target)))
losses.append(get_error(np.mean(Q, axis=0)))
q_estimate_errors.append(np.mean(_q_estimate_errors))
mve_estimate_errors.append(np.mean(_mve_estimate_errors))
steve_estimate_errors.append(np.mean(_steve_estimate_errors))
opt_estimate_errors.append(np.mean(_opt_estimate_errors))
trial_results.append(losses)
oracle_q_estimate_errors.append(q_estimate_errors)
oracle_mve_estimate_errors.append(mve_estimate_errors)
oracle_steve_estimate_errors.append(steve_estimate_errors)
oracle_opt_estimate_errors.append(opt_estimate_errors)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="STEVE-oracle", color=colors[3])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3])
with open('Toy-v1/steve_oracle.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# STEVE-Noisy
if True:
print("Running ensemble noisy STEVE.")
trial_results = []
noisy_q_estimate_errors = []
noisy_mve_estimate_errors = []
noisy_steve_estimate_errors = []
noisy_opt_estimate_errors = []
noisy_steve_beat_freq = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
q_estimate_errors = []
mve_estimate_errors = []
steve_estimate_errors = []
opt_estimate_errors = []
steve_beat_freq = []
for step_i in range(TRAIN_STEPS):
_q_estimate_errors = []
_mve_estimate_errors = []
_steve_estimate_errors = []
_opt_estimate_errors = []
_steve_beat_freq = []
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# STEVE rollout
Q_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, Q_ENSEMBLE_SIZE])
reward_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, 1])
first_next_state, first_reward = next_state, reward
for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE):
next_state, reward = first_next_state, first_reward
Q_est_mat[0, model_ensemble_i, :] = Q[:, next_state]
reward_est_mat[0, model_ensemble_i, 0] = reward
for timestep_i in range(1, HORIZON + 1):
next_state, reward = noisy_step(next_state)
Q_est_mat[timestep_i, model_ensemble_i, :] = Q[:, next_state]
reward_est_mat[timestep_i, model_ensemble_i, 0] = reward
all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0)
# STEVE weight calculation
all_targets = np.reshape(all_targets, [HORIZON + 1, MODEL_ENSEMBLE_SIZE * Q_ENSEMBLE_SIZE])
estimates = np.mean(all_targets, axis=1)
confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8))
coefficients = old_div(confidences, np.sum(confidences))
target = np.sum(estimates * coefficients)
# target = estimates[0]
Q[q_ensemble_i, state] = target
true_target = state + 1. if state != terminal_state else 0.
_q_estimate_errors.append(np.square(estimates[0] - true_target))
_mve_estimate_errors.append(np.square(estimates[-1] - true_target))
_steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target))
_opt_estimate_errors.append(np.min(np.square(estimates - true_target)))
_steve_beat_freq.append(float(np.square(estimates[0] - true_target) > np.square(target - true_target)))
losses.append(get_error(np.mean(Q, axis=0)))
q_estimate_errors.append(np.mean(_q_estimate_errors))
mve_estimate_errors.append(np.mean(_mve_estimate_errors))
steve_estimate_errors.append(np.mean(_steve_estimate_errors))
opt_estimate_errors.append(np.mean(_opt_estimate_errors))
steve_beat_freq.append(np.mean(_steve_beat_freq))
trial_results.append(losses)
noisy_q_estimate_errors.append(q_estimate_errors)
noisy_mve_estimate_errors.append(mve_estimate_errors)
noisy_steve_estimate_errors.append(steve_estimate_errors)
noisy_opt_estimate_errors.append(opt_estimate_errors)
noisy_steve_beat_freq.append(steve_beat_freq)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="STEVE-noisy", color=colors[3], linestyle='dotted')
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3])
with open('Toy-v1/steve_noisy.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# ### Display results
# plt.title("Comparison of convergence rates")
# plt.legend()
# plt.savefig("comparison.pdf")
# plt.show()
#
# ### Display secondary results - error comparison
# DOWNSAMPLE = 50
# colors = sns.color_palette('husl', 8)
# for i, (error_curve, label) in enumerate([
# (oracle_q_estimate_errors, "Oracle Q error"),
# (oracle_mve_estimate_errors, "Oracle MVE error"),
# (oracle_steve_estimate_errors, "Oracle STEVE error"),
# # (oracle_opt_estimate_errors, "Oracle minimum single-estimate error"),
# ]):
# result = np.stack(error_curve, axis=1)
# means = downsample(np.mean(result, axis=1), DOWNSAMPLE)
# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE)
# plt.plot(means, label=label, color=colors[i])
# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i])
#
# plt.title("Comparison of errors for oracle dynamics")
# plt.legend()
# plt.show()
#
# for i, (error_curve, label) in enumerate([
# (noisy_q_estimate_errors, "Noisy Q error"),
# (noisy_mve_estimate_errors, "Noisy MVE error"),
# (noisy_steve_estimate_errors, "Noisy STEVE error"),
# # (noisy_opt_estimate_errors, "Noisy minimum single-estimate error"),
# # (trial_steve_beat_freq, "STEVE beat freq"),
# ]):
# result = np.stack(error_curve, axis=1)
# means = downsample(np.mean(result, axis=1), DOWNSAMPLE)
# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE)
# plt.plot(means, label=label, color=colors[i])
# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i])
#
# plt.title("Comparison of errors for noisy dynamics")
# plt.legend()
# plt.show()
```
#### File: research/steve/worldmodel.py
```python
import tensorflow as tf
import numpy as np
from research.steve import nn
from research.steve.learner import CoreModel
class DeterministicWorldModel(CoreModel):
"""
A simple feed-forward neural network world model, with an option for an ensemble.
"""
@property
def saveid(self):
return "worldmodel"
def create_params(self, env_config, learner_config):
self.obs_dim = np.prod(env_config["obs_dims"])
self.action_dim = env_config["action_dim"]
self.reward_scale = env_config["reward_scale"]
self.discount = env_config["discount"]
self.aux_hidden_dim = self.learner_config["aux_hidden_dim"]
self.transition_hidden_dim = self.learner_config["transition_hidden_dim"]
self.bayesian_config = self.learner_config["bayesian"]
with tf.variable_scope(self.name):
if self.bayesian_config:
self.transition_predictor = nn.EnsembleFeedForwardNet('transition_predictor',
self.obs_dim + self.action_dim, [self.obs_dim],
layers=8, hidden_dim=self.transition_hidden_dim,
get_uncertainty=True,
ensemble_size=self.bayesian_config["transition"][
"ensemble_size"], train_sample_count=
self.bayesian_config["transition"][
"train_sample_count"], eval_sample_count=
self.bayesian_config["transition"][
"eval_sample_count"])
self.done_predictor = nn.EnsembleFeedForwardNet('done_predictor',
self.obs_dim + self.obs_dim + self.action_dim, [],
layers=4, hidden_dim=self.aux_hidden_dim,
get_uncertainty=True,
ensemble_size=self.bayesian_config["transition"][
"ensemble_size"],
train_sample_count=self.bayesian_config["transition"][
"train_sample_count"],
eval_sample_count=self.bayesian_config["transition"][
"eval_sample_count"])
self.reward_predictor = nn.EnsembleFeedForwardNet('reward_predictor',
self.obs_dim + self.obs_dim + self.action_dim, [],
layers=4, hidden_dim=self.aux_hidden_dim,
get_uncertainty=True,
ensemble_size=self.bayesian_config["reward"][
"ensemble_size"],
train_sample_count=self.bayesian_config["reward"][
"train_sample_count"],
eval_sample_count=self.bayesian_config["reward"][
"eval_sample_count"])
else:
self.transition_predictor = nn.FeedForwardNet('transition_predictor', self.obs_dim + self.action_dim,
[self.obs_dim], layers=8,
hidden_dim=self.transition_hidden_dim,
get_uncertainty=True)
self.done_predictor = nn.FeedForwardNet('done_predictor', self.obs_dim + self.obs_dim + self.action_dim,
[], layers=4, hidden_dim=self.aux_hidden_dim,
get_uncertainty=True)
self.reward_predictor = nn.FeedForwardNet('reward_predictor',
self.obs_dim + self.obs_dim + self.action_dim, [], layers=4,
hidden_dim=self.aux_hidden_dim, get_uncertainty=True)
def get_ensemble_idx_info(self):
if self.bayesian_config is not False:
ensemble_idxs = tf.random_shuffle(tf.range(self.transition_predictor.ensemble_size))
transition_ensemble_sample_n = self.transition_predictor.eval_sample_count
reward_ensemble_sample_n = self.reward_predictor.eval_sample_count
ensemble_idxs = ensemble_idxs[:transition_ensemble_sample_n]
return ensemble_idxs, transition_ensemble_sample_n, reward_ensemble_sample_n
else:
return None, 1, 1
def build_training_graph(self, obs, next_obs, actions, rewards, dones, data_size):
info = tf.concat([obs, actions], -1)
predicted_next_obs = self.transition_predictor(info, is_eval=False, reduce_mode="random") + obs
next_info = tf.concat([next_obs, info], -1)
predicted_dones = self.done_predictor(next_info, is_eval=False, reduce_mode="random")
predicted_rewards = self.reward_predictor(next_info, is_eval=False, reduce_mode="random")
done_losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=dones, logits=predicted_dones)
reward_losses = .5 * tf.square(rewards - predicted_rewards)
next_obs_losses = .5 * tf.reduce_sum(tf.square(next_obs - predicted_next_obs), -1)
done_loss = tf.reduce_mean(done_losses)
reward_loss = tf.reduce_mean(reward_losses)
next_obs_loss = tf.reduce_mean(next_obs_losses)
reg_loss = .0001 * (self.done_predictor.l2_loss() +
self.reward_predictor.l2_loss() +
self.transition_predictor.l2_loss())
total_loss = done_loss + reward_loss + next_obs_loss + reg_loss
inspect = (total_loss, done_loss, reward_loss, next_obs_loss, reg_loss)
return total_loss, inspect
def init_extra_info(self, obs):
return tf.zeros_like(obs)
def transition(self, obs, action, extra_info, ensemble_idxs=None, pre_expanded=None):
info = tf.concat([obs, action], -1)
next_obs_delta = self.transition_predictor(info, reduce_mode="none", ensemble_idxs=ensemble_idxs,
pre_expanded=pre_expanded)
if ensemble_idxs is None:
next_obs = tf.expand_dims(obs, -2) + next_obs_delta
next_info = tf.concat([next_obs, tf.expand_dims(info, -2)], -1)
else:
next_obs = obs + next_obs_delta
next_info = tf.concat([next_obs, info], -1)
done = tf.nn.sigmoid(
self.done_predictor(next_info, reduce_mode="none", ensemble_idxs=ensemble_idxs, pre_expanded=True))
extra_info = tf.zeros_like(obs)
return next_obs, done, extra_info
def get_rewards(self, obs, action, next_obs):
next_info = tf.concat([next_obs, obs, action], -1)
reward = self.reward_predictor(next_info, reduce_mode="none")
return reward
```
#### File: tcn/estimators/svtcn_estimator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from research.tcn import data_providers
from research.tcn import model as model_module
from research.tcn.estimators import base_estimator
from research.tcn.estimators import svtcn_loss
import tensorflow as tf
class SVTCNEstimator(base_estimator.BaseEstimator):
"""Single-view TCN Estimator base class."""
def __init__(self, config, logdir):
super(SVTCNEstimator, self).__init__(config, logdir)
def construct_input_fn(self, records, is_training):
"""See base class."""
config = self._config
num_views = config.data.num_views
num_parallel_calls = config.data.num_parallel_calls
sequence_prefetch_size = config.data.sequence_prefetch_size
batch_prefetch_size = config.data.batch_prefetch_size
def input_fn():
"""Provides input to SVTCN models."""
(images_preprocessed,
images_raw,
timesteps) = data_providers.singleview_tcn_provider(
file_list=records,
preprocess_fn=self.preprocess_data,
num_views=num_views,
is_training=is_training,
batch_size=self._batch_size,
num_parallel_calls=num_parallel_calls,
sequence_prefetch_size=sequence_prefetch_size,
batch_prefetch_size=batch_prefetch_size)
if config.logging.summary.image_summaries and is_training:
tf.summary.image('training/svtcn_images', images_raw)
features = {'batch_preprocessed': images_preprocessed}
return (features, timesteps)
return input_fn
def forward(self, images, is_training, reuse=False):
"""See base class."""
embedder_strategy = self._config.embedder_strategy
embedder = model_module.get_embedder(
embedder_strategy,
self._config,
images,
is_training=is_training, reuse=reuse)
embeddings = embedder.construct_embedding()
if is_training:
self.variables_to_train = embedder.get_trainable_variables()
self.pretrained_init_fn = embedder.init_fn
return embeddings
class SVTCNTripletEstimator(SVTCNEstimator):
"""Single-View TCN with semihard triplet loss."""
def __init__(self, config, logdir):
super(SVTCNTripletEstimator, self).__init__(config, logdir)
def define_loss(self, embeddings, timesteps, is_training):
"""See base class."""
pos_radius = self._config.svtcn.pos_radius
neg_radius = self._config.svtcn.neg_radius
margin = self._config.triplet_semihard.margin
loss = svtcn_loss.singleview_tcn_loss(
embeddings, timesteps, pos_radius, neg_radius, margin=margin)
self._loss = loss
if is_training:
tf.summary.scalar('training/svtcn_loss', loss)
return loss
def define_eval_metric_ops(self):
"""See base class."""
return {'validation/svtcn_loss': tf.metrics.mean(self._loss)}
```
#### File: tcn/utils/luatables.py
```python
class T(object):
"""Class for emulating lua tables."""
def __init__(self, *args, **kwargs):
if len(args) > 1 or (len(args) == 1 and len(kwargs) > 0):
errmsg = '''constructor only allows a single dict as a positional
argument or keyword arguments'''
raise ValueError(errmsg)
if len(args) == 1 and isinstance(args[0], dict):
self.__dict__.update(args[0])
else:
self.__dict__.update(kwargs)
def __repr__(self):
fmt = ', '.join('%s=%s' for i in range(len(self.__dict__)))
kwargstr = fmt % tuple(
x for tup in self.__dict__.items() for x in [str(tup[0]), repr(tup[1])])
return 'T(' + kwargstr + ')'
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def keys(self): # Needed for dict(T( ... )) to work.
return self.__dict__.keys()
def iteritems(self):
return [(k, self.__dict__.get(k)) for k in self.__dict__]
```
#### File: research/vid2depth/train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
from research.vid2depth import model
import numpy as np
import tensorflow as tf
from research.vid2depth import util
gfile = tf.gfile
HOME_DIR = os.path.expanduser('~')
DEFAULT_DATA_DIR = os.path.join(HOME_DIR, 'vid2depth/data/kitti_raw_eigen')
DEFAULT_CHECKPOINT_DIR = os.path.join(HOME_DIR, 'vid2depth/checkpoints')
flags.DEFINE_string('data_dir', DEFAULT_DATA_DIR, 'Preprocessed data.')
flags.DEFINE_float('learning_rate', 0.0002, 'Adam learning rate.')
flags.DEFINE_float('beta1', 0.9, 'Adam momentum.')
flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.')
flags.DEFINE_float('smooth_weight', 0.05, 'Smoothness loss weight.')
flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.')
flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.')
flags.DEFINE_integer('batch_size', 4, 'The size of a sample batch')
flags.DEFINE_integer('img_height', 128, 'Input frame height.')
flags.DEFINE_integer('img_width', 416, 'Input frame width.')
# Note: Training time grows linearly with sequence length. Use 2 or 3.
flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.')
flags.DEFINE_string('pretrained_ckpt', None, 'Path to checkpoint with '
'pretrained weights. Do not include .data* extension.')
flags.DEFINE_string('checkpoint_dir', DEFAULT_CHECKPOINT_DIR,
'Directory to save model checkpoints.')
flags.DEFINE_integer('train_steps', 200000, 'Number of training steps.')
flags.DEFINE_integer('summary_freq', 100, 'Save summaries every N steps.')
flags.DEFINE_bool('legacy_mode', False, 'Whether to limit losses to using only '
'the middle frame in sequence as the target frame.')
FLAGS = flags.FLAGS
# Maximum number of checkpoints to keep.
MAX_TO_KEEP = 100
def main(_):
# Fixed seed for repeatability
seed = 8964
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
if FLAGS.legacy_mode and FLAGS.seq_length < 3:
raise ValueError('Legacy mode supports sequence length > 2 only.')
if not gfile.Exists(FLAGS.checkpoint_dir):
gfile.MakeDirs(FLAGS.checkpoint_dir)
train_model = model.Model(data_dir=FLAGS.data_dir,
is_training=True,
learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
reconstr_weight=FLAGS.reconstr_weight,
smooth_weight=FLAGS.smooth_weight,
ssim_weight=FLAGS.ssim_weight,
icp_weight=FLAGS.icp_weight,
batch_size=FLAGS.batch_size,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
legacy_mode=FLAGS.legacy_mode)
train(train_model, FLAGS.pretrained_ckpt, FLAGS.checkpoint_dir,
FLAGS.train_steps, FLAGS.summary_freq)
def train(train_model, pretrained_ckpt, checkpoint_dir, train_steps,
summary_freq):
"""Train model."""
if pretrained_ckpt is not None:
vars_to_restore = util.get_vars_to_restore(pretrained_ckpt)
pretrain_restorer = tf.train.Saver(vars_to_restore)
vars_to_save = util.get_vars_to_restore()
saver = tf.train.Saver(vars_to_save + [train_model.global_step],
max_to_keep=MAX_TO_KEEP)
sv = tf.train.Supervisor(logdir=checkpoint_dir, save_summaries_secs=0,
saver=None)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with sv.managed_session(config=config) as sess:
if pretrained_ckpt is not None:
logging.info('Restoring pretrained weights from %s', pretrained_ckpt)
pretrain_restorer.restore(sess, pretrained_ckpt)
logging.info('Attempting to resume training from %s...', checkpoint_dir)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
logging.info('Last checkpoint found: %s', checkpoint)
if checkpoint:
saver.restore(sess, checkpoint)
logging.info('Training...')
start_time = time.time()
last_summary_time = time.time()
steps_per_epoch = train_model.reader.steps_per_epoch
step = 1
while step <= train_steps:
fetches = {
'train': train_model.train_op,
'global_step': train_model.global_step,
'incr_global_step': train_model.incr_global_step
}
if step % summary_freq == 0:
fetches['loss'] = train_model.total_loss
fetches['summary'] = sv.summary_op
results = sess.run(fetches)
global_step = results['global_step']
if step % summary_freq == 0:
sv.summary_writer.add_summary(results['summary'], global_step)
train_epoch = math.ceil(global_step / steps_per_epoch)
train_step = global_step - (train_epoch - 1) * steps_per_epoch
this_cycle = time.time() - last_summary_time
last_summary_time += this_cycle
logging.info(
'Epoch: [%2d] [%5d/%5d] time: %4.2fs (%ds total) loss: %.3f',
train_epoch, train_step, steps_per_epoch, this_cycle,
time.time() - start_time, results['loss'])
if step % steps_per_epoch == 0:
logging.info('[*] Saving checkpoint to %s...', checkpoint_dir)
saver.save(sess, os.path.join(checkpoint_dir, 'model'),
global_step=global_step)
# Setting step to global_step allows for training for a total of
# train_steps even if the program is restarted during training.
step = global_step + 1
if __name__ == '__main__':
app.run(main)
```
#### File: research/vid2depth/util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import locale
import os
from absl import logging
import numpy as np
import tensorflow as tf
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Cound the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for v in get_vars_to_restore():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
if ckpt is not None:
ckpt_var_names = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
for v in model_vars:
if v.op.name not in ckpt_var_names:
logging.warning('Missing var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
model_vars = [v for v in model_vars if v.op.name in ckpt_var_names]
return model_vars
def format_number(n):
"""Formats number with thousands commas."""
locale.setlocale(locale.LC_ALL, 'en_US')
return locale.format('%d', n, grouping=True)
def read_text_lines(filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
```
|
{
"source": "jdavidberger/myhdl_lib",
"score": 3
}
|
#### File: myhdl_lib/examples/pipeline_control_simple.py
```python
from myhdl import *
from myhdl_lib.pipeline_control import pipeline_control
import myhdl_lib.simulation as sim
def twos_complement(rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat):
''' Two's complement conversion of a binary number
Input handshake & data
rx_rdy - (o) Ready
rx_vld - (i) Valid
rx_dat - (i) Data
Output handshake & data
tx_rdy - (i) Ready
tx_vld - (o) Valid
tx_dat - (o) Data
Implementation: 3-stage pipeline
stage 0: registers input data
stage 1: inverts data coming from stage 0 and registers the inverted data
stage 2: increments data coming from stage 1 and registers the incremented data
Each stage is implemented as a separate process controlled by a central pipeline control unit via an enable signal
The pipeline control unit manages the handshake and synchronizes the operation of the stages
'''
DATA_WIDTH = len(rx_dat)
NUM_STAGES = 3
stage_en = Signal(intbv(0)[NUM_STAGES:])
pipe_ctrl = pipeline_control( rst = rst,
clk = clk,
rx_vld = rx_vld,
rx_rdy = rx_rdy,
tx_vld = tx_vld,
tx_rdy = tx_rdy,
stage_enable = stage_en)
s0_dat = Signal(intbv(0)[DATA_WIDTH:])
@always_seq(clk.posedge, reset=rst)
def stage_0():
''' Register input data'''
if (stage_en[0]):
s0_dat.next = rx_dat
s1_dat = Signal(intbv(0)[DATA_WIDTH:])
@always_seq(clk.posedge, reset=rst)
def stage_1():
''' Invert data'''
if (stage_en[1]):
s1_dat.next = ~s0_dat
s2_dat = Signal(intbv(0)[DATA_WIDTH:])
@always_seq(clk.posedge, reset=rst)
def stage_2():
''' Add one to data'''
if (stage_en[2]):
s2_dat.next = s1_dat + 1
@always_comb
def comb():
tx_dat.next = s2_dat.signed()
return instances()
class Driver():
''' Drives input handshake interface '''
def __init__(self, rst, clk, rdy, vld, dat):
self.rst = rst
self.clk = clk
self.rdy = rdy
self.vld = vld
self.dat = dat
def write(self, dat):
while self.rst:
yield self.clk.posedge
self.vld.next = 1
self.dat.next = dat
yield self.clk.posedge
while not self.rdy:
yield self.clk.posedge
self.vld.next = 0
self.dat.next = 0
class Capture():
''' Captures output handshake interface '''
def __init__(self, rst, clk, rdy, vld, dat):
self.rst = rst
self.clk = clk
self.rdy = rdy
self.vld = vld
self.dat = dat
self.d = None
def read(self):
while self.rst:
yield self.clk.posedge
self.rdy.next = 1
yield self.clk.posedge
while not self.vld:
yield self.clk.posedge
self.rdy.next = 0
self.d = int(self.dat)
def pipeline_control_simple():
clk = sim.Clock(val=0, period=10, units="ns")
rst = sim.ResetSync(clk=clk, val=0, active=1)
rx_rdy, rx_vld, tx_rdy, tx_vld = [Signal(bool(0)) for _ in range(4)]
rx_dat = Signal(intbv(0)[7:0])
tx_dat = Signal(intbv(0, min=-128, max=1))
clkgen = clk.gen()
dut = twos_complement(rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat)
# dut = traceSignals(twos_complement, rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat)
drv = Driver(rst, clk, rx_rdy, rx_vld, rx_dat)
cap = Capture(rst, clk, tx_rdy, tx_vld, tx_dat)
data_in = []
data_out = []
def stim(GAP=0):
''' Stimulates the pipeline input '''
@instance
def _stim():
yield rst.pulse(10)
yield clk.posedge
for i in range(1, 10):
yield drv.write(i)
data_in.append(i)
for _ in range(GAP):
yield clk.posedge
for _ in range(10):
yield clk.posedge
raise StopSimulation
return _stim
def drain(GAP=0):
''' Drains the pipeline output '''
@instance
def _drain():
yield clk.posedge
while True:
yield cap.read()
data_out.append(cap.d)
for _ in range(GAP):
yield clk.posedge
return _drain
# You can play with the gap size at the input and at the output to see how the pipeline responds (see time diagrams)
Simulation(clkgen, dut, stim(GAP=0), drain(GAP=0)).run()
print "data_in ({}): {}".format(len(data_in), data_in)
print "data_out ({}): {}".format(len(data_out), data_out)
data_out_expected = [-d for d in data_in]
assert cmp(data_out_expected, data_out)==0, "expected: data_out ({}): {}".format(len(data_out_expected), data_out_expected)
if __name__ == '__main__':
pipeline_control_simple()
```
#### File: myhdl_lib/myhdl_lib/arbiter.py
```python
from myhdl import *
from myhdl_lib.utils import assign
def arbiter(rst, clk, req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None, gnt_rdy=None, ARBITER_TYPE="priority"):
''' Wrapper that provides common interface to all arbiters '''
if ARBITER_TYPE == "priority":
_arb = arbiter_priority(req_vec, gnt_vec, gnt_idx, gnt_vld)
elif (ARBITER_TYPE == "roundrobin"):
_arb = arbiter_roundrobin(rst, clk, req_vec, gnt_vec, gnt_idx, gnt_vld, gnt_rdy)
else:
assert "Arbiter: Unknown arbiter type: {}".format(ARBITER_TYPE)
return _arb
def arbiter_priority(req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None):
""" Static priority arbiter: grants the request with highest priority, which is the lower index
req_vec - (i) vector of request signals, req_vec[0] is with the highest priority
gnt_vec - (o) optional, vector of grants, one grant per request, only one grant can be active at at time
gnt_idx - (o) optional, grant index, index of the granted request
gnt_vld - (o) optional, grant valid, indicate that there is a granted request
"""
REQ_NUM = len(req_vec)
gnt_vec_s = Signal(intbv(0)[REQ_NUM:])
gnt_idx_s = Signal(intbv(0, min=0, max=REQ_NUM))
gnt_vld_s = Signal(bool(0))
@always_comb
def prioroty_encoder():
gnt_vec_s.next = 0
gnt_idx_s.next = 0
gnt_vld_s.next = 0
for i in range(REQ_NUM):
if ( req_vec[i]==1 ):
gnt_vec_s.next[i] = 1
gnt_idx_s.next = i
gnt_vld_s.next = 1
break
if gnt_vec!=None: _vec = assign(gnt_vec, gnt_vec_s)
if gnt_idx!=None: _idx = assign(gnt_idx, gnt_idx_s)
if gnt_vld!=None: _vld = assign(gnt_vld, gnt_vld_s)
return instances()
def arbiter_roundrobin(rst, clk, req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None, gnt_rdy=None):
""" Round Robin arbiter: finds the active request with highest priority and presents its index on the gnt_idx output.
req_vec - (i) vector of request signals, priority changes dynamically
gnt_vec - (o) optional, vector of grants, one grant per request, only one grant can be active at at time
gnt_idx - (o) optional, grant index, index of the granted request
gnt_vld - (o) optional, grant valid, indicate that there is a granted request
gnt_rdy - (i) grant ready, indicates that the current grant is consumed and priority should be updated.
The priority is updated only if there is a valid grant when gnt_rdy is activated.
When priority is updated, the currently granted req_vec[gnt_idx] gets the lowest priority and
req_vec[gnt_idx+1] gets the highest priority.
gnt_rdy should be activated in the same clock cycle when output the grant is used
"""
REQ_NUM = len(req_vec)
ptr = Signal(intbv(0, min=0, max=REQ_NUM))
gnt_vec_s = Signal(intbv(0)[REQ_NUM:])
gnt_idx_s = Signal(intbv(0, min=0, max=REQ_NUM))
gnt_vld_s = Signal(bool(0))
@always(clk.posedge)
def ptr_proc():
if (rst):
ptr.next = REQ_NUM-1
elif (gnt_rdy and gnt_vld_s):
ptr.next = gnt_idx_s
@always_comb
def roundrobin_encoder():
gnt_vec_s.next = 0
gnt_idx_s.next = 0
gnt_vld_s.next = 0
for i in range(REQ_NUM):
if (i>ptr):
if ( req_vec[i]==1 ):
gnt_vec_s.next[i] = 1
gnt_idx_s.next = i
gnt_vld_s.next = 1
return
for i in range(REQ_NUM):
if ( req_vec[i]==1 ):
gnt_vec_s.next[i] = 1
gnt_idx_s.next = i
gnt_vld_s.next = 1
return
if gnt_vec!=None: _vec = assign(gnt_vec, gnt_vec_s)
if gnt_idx!=None: _idx = assign(gnt_idx, gnt_idx_s)
if gnt_vld!=None: _vld = assign(gnt_vld, gnt_vld_s)
return instances()
if __name__ == '__main__':
pass
```
#### File: myhdl_lib/myhdl_lib/sfifo_beh.py
```python
class sfifo_beh:
''' Speculative FIFO: behavioral model '''
COMMIT = 0
DISCARD = 1
DISCARD_COMMIT = 2
def __init__(self, depth, afull_th=None, aempty_th=None):
self.depth = depth
self.data = [] # Data (written, committed, not read )
self.data_sw = [] # Speculatively written data
self.data_sr = [] # Speculatively read data
self.afull_th = afull_th if (afull_th != None) else depth//2
self.aempty_th = aempty_th if (aempty_th != None) else depth//2
self.ovf = False
self.udf = False
self.count_max = 0
def reset(self):
self.__init__(self.depth, self.afull_th, self.aempty_th)
def isFull(self):
return self.getCount() == self.depth
def isEmpty(self):
return len(self.data) == 0
def isAFull(self):
return (self.depth - self.getCount()) <= self.afull_th
def isAEmpty(self):
return len(self.data) <= self.aempty_th
def isOvf(self):
return self.ovf
def isUdf(self):
return self.udf
def getCount(self):
return len(self.data_sr) + len(self.data) + len(self.data_sw)
def getCountMax(self):
return self.count_max
def getDout(self):
return self.data[0]
def _rcmd(self, cmd):
if (cmd == self.DISCARD or cmd == self.DISCARD_COMMIT):
self.data = self.data_sr + self.data
self.data_sr = []
elif (cmd == self.COMMIT):
self.data_sr = []
def _wcmd(self, cmd):
if (cmd == self.DISCARD or cmd == self.DISCARD_COMMIT):
self.data_sw = []
elif (cmd == self.COMMIT):
self.data.extend(self.data_sw)
self.data_sw = []
def command(self, wcmd=None, rcmd=None):
self._wcmd(wcmd)
self._rcmd(rcmd)
def write(self, val, wcmd=None, rcmd=None):
if (not self.isFull()):
self.data_sw.append(val)
else:
self.ovf = True
self._wcmd(wcmd)
self._rcmd(rcmd)
if (self.getCount() > self.count_max):
self.count_max = self.getCount()
def read(self, wcmd=None, rcmd=None):
x = None
if (not self.isEmpty()):
x = self.data.pop(0)
self.data_sr.append(x)
else:
self.udf = True
self._wcmd(wcmd)
self._rcmd(rcmd)
return x
def write_read(self, val, wcmd=None, rcmd=None):
full = self.isFull()
empty = self.isEmpty()
if (not full):
self.data_sw.append(val)
else:
self.ovf = True
self._wcmd(wcmd)
x = None
if (not empty):
x = self.data.pop(0)
self.data_sr.append(x)
else:
self.udf = True
self._rcmd(rcmd)
if (self.getCount() > self.count_max):
self.count_max = self.getCount()
return x
def status(self):
return "FULL = {:}, EMPTY = {:}, COUNT = {:3}, AFULL = {:}, AEMPTY = {:}, OVF = {:}, UDF = {:}, COUNT_MAX = {:}, DOUT = {:}".format(int(self.isFull()), int(self.isEmpty()), int(self.getCount()), int(self.isAFull()), int(self.isAEmpty()), int(self.isOvf()), int(self.isUdf()), self.getCountMax(), self.data[0] if len(self.data)>0 else None)
return "FULL = {:}, EMPTY = {:}, COUNT = {:3}, AFULL = {:}, AEMPTY = {:}, OVF = {:}, UDF = {:}, COUNT_MAX = {:}, DOUT = {:}".format(int(self.isFull()), int(self.isEmpty()), int(self.count()), int(self.isAFull()), int(self.isAEmpty()), int(self.isOvf()), int(self.isUdf()), self.maxCount(), self.data[0] if len(self.data)>0 else None)
```
#### File: myhdl_lib/simulation/clock.py
```python
from myhdl import SignalType, instance, delay
# This code is a shameless copy of <NAME>'s _clock class: https://github.com/cfelton/minnesota/blob/master/mn/system/_clock.py
class Clock(SignalType):
''' Clock signal equipped with clock generator '''
_frequency_units = {"Hz":1, "kHz":1e3, "Mhz":1e6, "GHz":1e9}
_periode_units = {"sec":1,"s":1, "ms":1e-3, "us":1e-6, "ns":1e-9}
def __init__(self, val, frequency=None, period=10, units="ns", ratio_high_to_low=0.5):
''' Sets clock parameters
val - initial clock value
frequency - clock frequency
period - clock period, used if clock frequency is not defined
units - units in which frequency or period is defined
allowed frequency units: "Hz", "kHz", "Mhz", "GHz"
allowed period units: "sec","s", "ms":, "us", "ns"
ratio_high_to_low - clock signal ration, 0 < ratio_high_to_low < 1
'''
if frequency is not None:
assert Clock._frequency_units.has_key(units), "Unknown frequency units {}. Allowed values: {}".format(units, Clock._frequency_units.keys())
self._frequency_Hz = frequency*Clock._frequency_units[units]
self._period_sec = 1.0/frequency
else:
assert Clock._periode_units.has_key(units), "Unknown time units {}. Allowed values: {}".format(units, Clock._periode_units.keys())
self._period_sec = period*Clock._periode_units[units]
self._frequency_Hz = 1.0/period
self._periode_ns = self._period_sec*1e9
assert (0 < ratio_high_to_low), "Expected 0 < ratio_high_to_low, got ratio_high_to_low={}".format(ratio_high_to_low)
assert (ratio_high_to_low < 1), "Expected ratio_high_to_low < 1, got ratio_high_to_low={}".format(ratio_high_to_low)
self._periode_high_ns = long(self._periode_ns * ratio_high_to_low)
self._periode_low_ns = long(self._periode_ns - self._periode_high_ns)
self._periode_ns = self._periode_high_ns + self._periode_low_ns
self.periode_sec = self._periode_ns*1e-9
self._frequency_Hz = 1e9/self._periode_ns
SignalType.__init__(self, bool(val))
@property
def frequency(self):
return self._frequency_Hz
@property
def period(self):
return self._period_sec
def gen(self):
@instance
def clkgen():
if self.val:
self.next = True
yield delay(self._periode_high_ns)
while True:
self.next = False
yield delay(self._periode_low_ns)
self.next = True
yield delay(self._periode_high_ns)
return clkgen
if __name__ == '__main__':
pass
```
#### File: myhdl_lib/myhdl_lib/utils.py
```python
from myhdl import *
def assign(a,b):
''' Combinatorial assignment: a = b '''
@always_comb
def _assign():
a.next = b
return _assign
def byteorder(bv_di, bv_do, REVERSE=True):
""" Reverses the byte order of an input bit-vector
bv_di - (i) input bit-vector
bv_do - (o) output bit-vector
"""
BITS = len(bv_di)
assert (BITS % 8)==0, "byteorder: expects len(bv_in)=8*x, but len(bv_in)={}".format(BITS)
if REVERSE:
BYTES = BITS//8
y = ConcatSignal(*[bv_di(8*(b+1),8*b) for b in range(BYTES)])
@always_comb
def _reverse():
bv_do.next = y
else:
@always_comb
def _pass():
bv_do.next = bv_di
return instances()
if __name__ == '__main__':
pass
```
#### File: myhdl_lib/test/test_demux.py
```python
import unittest
from myhdl import *
from myhdl_lib.mux import demux
import myhdl_lib.simulation as sim
import random
class TestDemux(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.simulators = ["myhdl", "icarus"]
def testDemux5(self):
''' DEMUX: 5 outputs '''
DMAX = 100
NUM_OUTPUTS = 5
def demux_top(sel, di, do_0, do_1, do_2, do_3, do_4):
''' Needed when demux is co-simulated as top level'''
ls_do = [Signal(intbv(0,min=0,max=DMAX)) for _ in range(NUM_OUTPUTS)]
@always_comb
def _assign():
do_0.next = ls_do[0]
do_1.next = ls_do[1]
do_2.next = ls_do[2]
do_3.next = ls_do[3]
do_4.next = ls_do[4]
inst = demux(sel=sel, di=di, ls_do=ls_do)
return instances()
sel = Signal(intbv(0,min=0,max=NUM_OUTPUTS))
di = Signal(intbv(0,min=0,max=DMAX))
ls_do = [Signal(intbv(0,min=0,max=DMAX)) for _ in range(NUM_OUTPUTS)]
argl = {"sel":sel, "di":di, "do_0":ls_do[0], "do_1":ls_do[1], "do_2":ls_do[2], "do_3":ls_do[3], "do_4":ls_do[4]}
def stim():
@instance
def _inst():
sel.next = 0
for i in range(NUM_OUTPUTS):
di.next = 0
sel.next = 0
yield delay(10)
for _ in range(10):
d = [random.randint(0,DMAX-1) for _ in range(NUM_OUTPUTS)]
for s in range(NUM_OUTPUTS):
di.next = d[s]
sel.next = s
yield delay(10)
for i in range(NUM_OUTPUTS):
if s == i:
assert d[s] == ls_do[i], "Mux output {} (sel={}): expected {}, detected {}".format(i, sel, d[s], ls_do[i])
else:
assert 0 == ls_do[i], "Mux output {} (sel={}): expected {}, detected {}".format(i, sel, d[s], ls_do[i])
yield delay(10)
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
dut=getDut(demux_top, **argl)
stm = stim()
Simulation( dut, stm).run()
del dut, stm
def testDemux2(self):
''' DEMUX: 2 outputs, boolean Select '''
DMAX = 100
NUM_OUTPUTS = 2
def demux_top(sel, di, do_0, do_1):
''' Needed when demux is co-simulated as top level'''
ls_do = [Signal(intbv(0,min=0,max=DMAX)) for _ in range(NUM_OUTPUTS)]
@always_comb
def _assign():
do_0.next = ls_do[0]
do_1.next = ls_do[1]
inst = demux(sel=sel, di=di, ls_do=ls_do)
return instances()
sel = Signal(bool(0))
di = Signal(intbv(0,min=0,max=DMAX))
ls_do = [Signal(intbv(0,min=0,max=DMAX)) for _ in range(NUM_OUTPUTS)]
argl = {"sel":sel, "di":di, "do_0":ls_do[0], "do_1":ls_do[1]}
def stim():
@instance
def _inst():
sel.next = 0
for i in range(NUM_OUTPUTS):
di.next = 0
sel.next = 0
yield delay(10)
for _ in range(10):
d = [random.randint(0,DMAX-1) for _ in range(NUM_OUTPUTS)]
for s in range(NUM_OUTPUTS):
di.next = d[s]
sel.next = s
yield delay(10)
for i in range(NUM_OUTPUTS):
if s == i:
assert d[s] == ls_do[i], "Mux output {} (sel={}): expected {}, detected {}".format(i, sel, d[s], ls_do[i])
else:
assert 0 == ls_do[i], "Mux output {} (sel={}): expected {}, detected {}".format(i, sel, 0, ls_do[i])
yield delay(10)
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
dut=getDut(demux_top, **argl)
stm = stim()
Simulation( dut, stm).run()
del dut, stm
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: myhdl_lib/test/test_fifo_speculative.py
```python
import unittest
import itertools
from myhdl import *
from myhdl_lib.fifo_speculative import fifo_speculative
from myhdl_lib import sfifo_beh
import myhdl_lib.simulation as sim
class TestSFifo(unittest.TestCase):
COMMIT = 0
DISCARD = 1
DISCARD_COMMIT = 2
@classmethod
def setUpClass(cls):
cls.simulators = ["myhdl", "icarus"]
def setUp(self):
DATA_RANGE_MIN = 0
DATA_RANGE_MAX = 128
DEPTH_MAX = 101
self.sfifo_model = None
self.full = Signal(bool(0))
self.we = Signal(bool(0))
self.din = Signal(intbv(0, min=DATA_RANGE_MIN, max=DATA_RANGE_MAX))
self.empty = Signal(bool(0))
self.re = Signal(bool(0))
self.dout = Signal(intbv(0, min=DATA_RANGE_MIN, max=DATA_RANGE_MAX))
self.wr_commit = Signal(bool(0))
self.wr_discard = Signal(bool(0))
self.rd_commit = Signal(bool(0))
self.rd_discard = Signal(bool(0))
self.afull = Signal(bool(0))
self.aempty = Signal(bool(0))
self.afull_th = Signal(intbv(0, min=0, max=DEPTH_MAX))
self.aempty_th = Signal(intbv(0, min=0, max=DEPTH_MAX))
self.count = Signal(intbv(0, min=0, max=DEPTH_MAX))
self.count_max = Signal(intbv(0, min=0, max=DEPTH_MAX))
self.ovf = Signal(bool(0))
self.udf = Signal(bool(0))
self.clk = sim.Clock(val=0, period=10, units="ns")
self.rst = sim.ResetSync(clk=self.clk, val=0, active=1)
self.clkgen = self.clk.gen()
def tearDown(self):
pass
def reset(self):
yield self.rst.pulse(5)
def fifo_command(self, wcmd=None, rcmd=None):
self.sfifo_model.command(wcmd, rcmd)
self.wr_commit.next = 1 if (wcmd == self.COMMIT or wcmd == self.DISCARD_COMMIT) else 0
self.wr_discard.next = 1 if (wcmd == self.DISCARD or wcmd == self.DISCARD_COMMIT) else 0
self.rd_commit.next = 1 if (rcmd == self.COMMIT or rcmd == self.DISCARD_COMMIT) else 0
self.rd_discard.next = 1 if (rcmd == self.DISCARD or rcmd == self.DISCARD_COMMIT) else 0
yield self.clk.posedge
self.wr_commit.next = 0
self.wr_discard.next = 0
self.rd_commit.next = 0
self.rd_discard.next = 0
def fifo_write(self, val, wcmd=None, rcmd=None):
self.sfifo_model.write(val, wcmd, rcmd)
self.we.next = 1
self.din.next = val
self.wr_commit.next = 1 if (wcmd == self.COMMIT or wcmd == self.DISCARD_COMMIT) else 0
self.wr_discard.next = 1 if (wcmd == self.DISCARD or wcmd == self.DISCARD_COMMIT) else 0
self.rd_commit.next = 1 if (rcmd == self.COMMIT or rcmd == self.DISCARD_COMMIT) else 0
self.rd_discard.next = 1 if (rcmd == self.DISCARD or rcmd == self.DISCARD_COMMIT) else 0
yield self.clk.posedge
self.we.next = 0
self.din.next = 0
self.wr_commit.next = 0
self.wr_discard.next = 0
self.rd_commit.next = 0
self.rd_discard.next = 0
def fifo_read(self, wcmd=None, rcmd=None):
self.sfifo_model.read(wcmd, rcmd)
self.re.next = 1
self.wr_commit.next = 1 if (wcmd == self.COMMIT or wcmd == self.DISCARD_COMMIT) else 0
self.wr_discard.next = 1 if (wcmd == self.DISCARD or wcmd == self.DISCARD_COMMIT) else 0
self.rd_commit.next = 1 if (rcmd == self.COMMIT or rcmd == self.DISCARD_COMMIT) else 0
self.rd_discard.next = 1 if (rcmd == self.DISCARD or rcmd == self.DISCARD_COMMIT) else 0
yield self.clk.posedge
self.re.next = 0
self.wr_commit.next = 0
self.wr_discard.next = 0
self.rd_commit.next = 0
self.rd_discard.next = 0
def fifo_write_read(self, val, wcmd=None, rcmd=None):
self.sfifo_model.write_read(val, wcmd, rcmd)
self.we.next = 1
self.din.next = val
self.re.next = 1
self.wr_commit.next = 1 if (wcmd == self.COMMIT or wcmd == self.DISCARD_COMMIT) else 0
self.wr_discard.next = 1 if (wcmd == self.DISCARD or wcmd == self.DISCARD_COMMIT) else 0
self.rd_commit.next = 1 if (rcmd == self.COMMIT or rcmd == self.DISCARD_COMMIT) else 0
self.rd_discard.next = 1 if (rcmd == self.DISCARD or rcmd == self.DISCARD_COMMIT) else 0
yield self.clk.posedge
self.re.next = 0
self.din.next = 0
self.we.next = 0
self.wr_commit.next = 0
self.wr_discard.next = 0
self.rd_commit.next = 0
self.rd_discard.next = 0
def fifo_state_check(self):
yield delay(1)
m = self.sfifo_model
assert m.isFull()==self.full, "Full: expected={}, detected={}".format(m.isFull(), self.full)
assert m.isEmpty()==self.empty, "Empty: expected={}, detected={}".format(m.isEmpty(), self.empty)
assert m.isAFull()==self.afull, "AFull: expected={}, detected={}".format(m.isAFull(), self.afull)
assert m.isAEmpty()==self.aempty, "AEmpty: expected={}, detected={}".format(m.isAEmpty(), self.aempty)
assert m.getCount()==self.count, "Count: expected={}, detected={}".format(m.getCount(), self.count)
assert m.getCountMax()==self.count_max, "CountMax: expected={}, detected={}".format(m.getCountMax(), self.count_max)
assert m.isOvf()==self.ovf, "Overflow: expected={}, detected={}".format(m.isOvf(), self.ovf)
assert m.isUdf()==self.udf, "Underflow: expected={}, detected={}".format(m.isUdf(), self.udf)
if (not m.isEmpty() and not self.empty):
assert m.getDout()==self.dout,"Dout: expected={}, detected={}".format(m.getDout(), self.dout)
def testWriteThenReadCommitAtEveryOperation(self):
''' SFIFO: Write then Read, Commit at every operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i
for _ in range(1,i+1):
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0
for _ in range(1,i+1):
yield self.fifo_read(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteThenReadCommitAtTheLastOperation(self):
''' SFIFO: Write then Read, Commit at the last operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i
for w in range(1,i+1):
if (w != i):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
else:
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0
for r in range(1,i+1):
if (r != i):
yield self.fifo_read();
yield self.fifo_state_check()
else:
yield self.fifo_read(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteThenReadCommitAfterTheLastOperation(self):
''' SFIFO: Write then Read, Commit after the last operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i
for _ in range(1,i+1):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
self.fifo_command(wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0
for _ in range(1,i+1):
yield self.fifo_read();
yield self.fifo_state_check()
self.fifo_command(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteAndReadCommitAtEveryOperation(self):
''' SFIFO: Write and Read simultaneously, Commit at every operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT)
yield self.fifo_state_check()
for i in range(2,DEPTH+1):
for _ in range(1,i+1):
yield self.fifo_write_read(data_in.next(), wcmd=self.COMMIT, rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.fifo_read(data_in.next(), rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteAndReadCommitAtTheLastOperation(self):
''' SFIFO: Write and Read simultaneously, Commit at the last operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT);
yield self.fifo_state_check()
for i in range(2,DEPTH+1):
for w in range(1,i+1):
if (w != i):
yield self.fifo_write_read(data_in.next());
yield self.fifo_state_check()
else:
yield self.fifo_write_read(data_in.next(), wcmd=self.COMMIT, rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.fifo_read(data_in.next(), rcmd=self.COMMIT);
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteAndReadCommitAfterTheLastOperation(self):
''' SFIFO: Write and Read simultaneously, Commit after the last operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT);
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i
for _ in range(1,i+1):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
self.fifo_command(wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0
for _ in range(1,i+1):
yield self.fifo_read();
yield self.fifo_state_check()
self.fifo_command(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.fifo_read(rcmd=self.COMMIT);
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteThenReadDiscardAtEveryOperation(self):
''' SFIFO: Write then Read, Discard at every operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i and Discard
for _ in range(1,i+1):
yield self.fifo_write(data_in.next(), wcmd=self.DISCARD)
yield self.fifo_state_check()
# Fill up to i and Commit
for _ in range(1,i+1):
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0 and Discard
for _ in range(1,i+1):
yield self.fifo_read(rcmd=self.DISCARD)
yield self.fifo_state_check()
# Drain down to 0 and Commit
for _ in range(1,i+1):
yield self.fifo_read(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteThenReadDiscardAtTheLastOperation(self):
''' SFIFO: Write then Read, Discard at the last operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i and Discard
for w in range(1,i+1):
if (w != i):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
else:
yield self.fifo_write(data_in.next(), wcmd=self.DISCARD)
yield self.fifo_state_check()
# Fill up to i and Commit
for w in range(1,i+1):
if (w != i):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
else:
yield self.fifo_write(data_in.next(), wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0 and Discard
for r in range(1,i+1):
if (r != i):
yield self.fifo_read();
yield self.fifo_state_check()
else:
yield self.fifo_read(rcmd=self.DISCARD)
yield self.fifo_state_check()
# Drain down to 0 and Commit
for r in range(1,i+1):
if (r != i):
yield self.fifo_read();
yield self.fifo_state_check()
else:
yield self.fifo_read(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
def testWriteThenReadDiscardAfterTheLastOperation(self):
''' SFIFO: Write then Read, Discard after the last operation '''
DEPTH = [2, 4, 7, 8, 9, 10]
def stim(DEPTH):
@instance
def _inst():
data_in = itertools.cycle(xrange(128))
yield self.reset()
yield self.fifo_state_check()
for i in range(1,DEPTH+1):
# Fill up to i and Discard
for _ in range(1,i+1):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
self.fifo_command(wcmd=self.DISCARD)
yield self.fifo_state_check()
# Fill up to i and Commit
for _ in range(1,i+1):
yield self.fifo_write(data_in.next());
yield self.fifo_state_check()
self.fifo_command(wcmd=self.COMMIT)
yield self.fifo_state_check()
# Drain down to 0 and Discard
for _ in range(1,i+1):
yield self.fifo_read();
yield self.fifo_state_check()
self.fifo_command(rcmd=self.DISCARD)
yield self.fifo_state_check()
# Drain down to 0 and Commit
for _ in range(1,i+1):
yield self.fifo_read();
yield self.fifo_state_check()
self.fifo_command(rcmd=self.COMMIT)
yield self.fifo_state_check()
yield self.clk.posedge
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
for dpt in DEPTH:
self.sfifo_model = sfifo_beh.sfifo_beh(dpt)
dut = getDut( fifo_speculative,
rst = self.rst,
clk = self.clk,
full = self.full,
we = self.we,
din = self.din,
empty = self.empty,
re = self.re,
dout = self.dout,
wr_commit = self.wr_commit,
wr_discard = self.wr_discard,
rd_commit = self.rd_commit,
rd_discard = self.rd_discard,
afull = self.afull,
aempty = self.aempty,
count = self.count,
afull_th = None,
aempty_th = None,
ovf = self.ovf,
udf = self.udf,
count_max = self.count_max,
depth = dpt,
width = None)
stm = stim(dpt)
Simulation(self.clkgen, dut, stm).run()
del dut, stm
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testSFifo']
unittest.main()
```
#### File: myhdl_lib/test/test_ls_demux.py
```python
import unittest
from myhdl import *
from myhdl_lib.mux import ls_demux
import myhdl_lib.simulation as sim
import random
class TestLsDemux(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.simulators = ["myhdl", "icarus"]
def testLsDemux5(self):
''' LS_DEMUX: 5 outputs '''
DMAX = 100
NUM_OUTPUTS = 5
def ls_demux_top(sel,
di_0, di_1, di_2,
do_00, do_01, do_02,
do_10, do_11, do_12,
do_20, do_21, do_22,
do_30, do_31, do_32,
do_40, do_41, do_42):
''' Needed when ls_demux is co-simulated as top level'''
ls_di_0 = Signal(bool(0))
ls_di_1 = Signal(intbv(0,min=0,max=DMAX))
ls_di_2 = Signal(intbv(0,min=-DMAX,max=DMAX))
lsls_do_00 = Signal(bool(0))
lsls_do_01 = Signal(intbv(0,min=0,max=DMAX))
lsls_do_02 = Signal(intbv(0,min=-DMAX,max=DMAX))
lsls_do_10 = Signal(bool(0))
lsls_do_11 = Signal(intbv(0,min=0,max=DMAX))
lsls_do_12 = Signal(intbv(0,min=-DMAX,max=DMAX))
lsls_do_20 = Signal(bool(0))
lsls_do_21 = Signal(intbv(0,min=0,max=DMAX))
lsls_do_22 = Signal(intbv(0,min=-DMAX,max=DMAX))
lsls_do_30 = Signal(bool(0))
lsls_do_31 = Signal(intbv(0,min=0,max=DMAX))
lsls_do_32 = Signal(intbv(0,min=-DMAX,max=DMAX))
lsls_do_40 = Signal(bool(0))
lsls_do_41 = Signal(intbv(0,min=0,max=DMAX))
lsls_do_42 = Signal(intbv(0,min=-DMAX,max=DMAX))
@always_comb
def _assign():
ls_di_0.next = di_0
ls_di_1.next = di_1
ls_di_2.next = di_2
do_00.next = lsls_do_00
do_01.next = lsls_do_01
do_02.next = lsls_do_02
do_10.next = lsls_do_10
do_11.next = lsls_do_11
do_12.next = lsls_do_12
do_20.next = lsls_do_20
do_21.next = lsls_do_21
do_22.next = lsls_do_22
do_30.next = lsls_do_30
do_31.next = lsls_do_31
do_32.next = lsls_do_32
do_40.next = lsls_do_40
do_41.next = lsls_do_41
do_42.next = lsls_do_42
lsls_do = [[lsls_do_00, lsls_do_01, lsls_do_02],
[lsls_do_10, lsls_do_11, lsls_do_12],
[lsls_do_20, lsls_do_21, lsls_do_22],
[lsls_do_30, lsls_do_31, lsls_do_32],
[lsls_do_40, lsls_do_41, lsls_do_42]]
ls_di = [ls_di_0, ls_di_1, ls_di_2]
_ls_demux = ls_demux(sel=sel, ls_di=ls_di, lsls_do=lsls_do)
return instances()
sel = Signal(intbv(0,min=0,max=NUM_OUTPUTS))
ls_di = [Signal(bool(0)),
Signal(intbv(0,min=0,max=DMAX)),
Signal(intbv(0,min=-DMAX,max=DMAX))]
lsls_do = [[Signal(bool(0)),
Signal(intbv(0,min=0,max=DMAX)),
Signal(intbv(0,min=-DMAX,max=DMAX))] for _ in range(NUM_OUTPUTS)]
argl = {"sel":sel,
"di_0":ls_di[0],"di_1":ls_di[1],"di_2":ls_di[2],
"do_00":lsls_do[0][0], "do_01":lsls_do[0][1], "do_02":lsls_do[0][2],
"do_10":lsls_do[1][0], "do_11":lsls_do[1][1], "do_12":lsls_do[1][2],
"do_20":lsls_do[2][0], "do_21":lsls_do[2][1], "do_22":lsls_do[2][2],
"do_30":lsls_do[3][0], "do_31":lsls_do[3][1], "do_32":lsls_do[3][2],
"do_40":lsls_do[4][0], "do_41":lsls_do[4][1], "do_42":lsls_do[4][2]}
def stim():
@instance
def _inst():
sel.next = 0
for i in range(3):
ls_di[i].next = 0
yield delay(10)
for _ in range(2):
d = [[random.randint(0,1),
random.randint(0,DMAX-1),
random.randint(-DMAX,DMAX-1)] for _ in range(NUM_OUTPUTS)]
for s in range(NUM_OUTPUTS):
for j in range(3):
ls_di[j].next = d[s][j]
sel.next = s
yield delay(10)
for o in range(NUM_OUTPUTS):
if s == o:
assert d[s][0] == lsls_do[o][0], "Mux output {} (sel={}): expected {}, detected {}".format(o, sel, d[s], [int(x) for x in lsls_do[o]])
assert d[s][1] == lsls_do[o][1], "Mux output {} (sel={}): expected {}, detected {}".format(o, sel, d[s], [int(x) for x in lsls_do[o]])
assert d[s][2] == lsls_do[o][2], "Mux output {} (sel={}): expected {}, detected {}".format(o, sel, d[s], [int(x) for x in lsls_do[o]])
else:
assert 0 == lsls_do[o][0], "Mux output {} (sel={}): expected {}, detected {}".format(o, sel, 3*[0], [int(x) for x in lsls_do[o]])
assert 0 == lsls_do[o][1], "Mux output {} (sel={}): expected {}, detected {}".format(o, sel, 3*[0], [int(x) for x in lsls_do[o]])
assert 0 == lsls_do[o][2], "Mux output {} (sel={}): expected {}, detected {}".format(o, sel, 3*[0], [int(x) for x in lsls_do[o]])
yield delay(10)
raise StopSimulation
return _inst
getDut = sim.DUTer()
for s in self.simulators:
getDut.selectSimulator(s)
dut=getDut(ls_demux_top, **argl)
stm = stim()
Simulation( dut, stm).run()
del dut, stm
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
|
{
"source": "jdavidberger/vive_poses_ros",
"score": 2
}
|
#### File: jdavidberger/vive_poses_ros/VivePosesRos.py
```python
import openvr
import rospy
import sys
import tf
from geometry_msgs.msg import PoseStamped
class VivePosesRosPublisher:
def __init__(self, topic_prefix):
self.vr_system = openvr.init(openvr.VRApplication_Utility)
self.topic_prefix = topic_prefix
self.publishers = {};
def publisher(self, name):
if name in self.publishers:
return self.publishers[name]
self.publishers.update({name: rospy.Publisher(self.topic_prefix + name, PoseStamped, queue_size=1000) })
return self.publishers[name]
def idx_to_name(self, idx):
if idx == 0:
return "HMD"
return "obj" + str(idx)
def run(self):
seq=0
device_poses = (openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount)()
rate = rospy.Rate(500)
while not rospy.is_shutdown():
self.vr_system.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseRawAndUncalibrated, 0, openvr.k_unMaxTrackedDeviceCount, device_poses)
rate.sleep()
for i in range(0, len(device_poses)):
pose = device_poses[i]
if pose.bPoseIsValid:
pose_msg = PoseStamped()
pose_msg.header.seq = seq
pose_msg.header.stamp = rospy.Time.now()
pose_msg.header.frame_id = "openvr_world"
mat=pose.mDeviceToAbsoluteTracking
mat44 = [
[ mat[0][0], mat[0][1], mat[0][2], mat[0][3] ],
[ mat[1][0], mat[1][1], mat[1][2], mat[1][3] ],
[ mat[2][0], mat[2][1], mat[2][2], mat[2][3] ],
[ 0, 0, 0, 1 ]
]
pose_msg.pose.position.x = mat[0][3]
pose_msg.pose.position.y = mat[1][3]
pose_msg.pose.position.z = mat[2][3]
q=tf.transformations.quaternion_from_matrix(mat44)
pose_msg.pose.orientation.w = q[3]
pose_msg.pose.orientation.x = q[0]
pose_msg.pose.orientation.y = q[1]
pose_msg.pose.orientation.z = q[2]
seq = seq + 1
self.publisher(self.idx_to_name(i)).publish(pose_msg)
if __name__ == '__main__':
rospy.init_node("VivePosesRos", anonymous=True)
topic_prefix = ''
if len(sys.argv) > 1:
topic_prefix = sys.argv[1]
publisher = VivePosesRosPublisher(topic_prefix);
publisher.run()
pass
```
|
{
"source": "JDavidGuzman/sam-python-rdsdata",
"score": 2
}
|
#### File: sam-python-rdsdata/get_course/app.py
```python
import json
import os
import boto3
# Connect to the database
rdsData = boto3.client('rds-data')
parameters = boto3.client('ssm')
stage = os.environ['STAGE']
cluster_arn = os.environ['CLUSTERARN']
secret_arn = os.environ['SECRETARN']
dbname_param = parameters.get_parameter(Name='/school-vue-db-serverless/'+stage+'/database/name')
def lambda_handler(event, context):
school_id = event['pathParameters']['school_id']
course_id = event['pathParameters']['course_id']
sql1 = f'SELECT Teacher_ID, Firstname, Lastname, Email FROM teachers WHERE School_ID={school_id} AND Level="{course_id}"'
sql2 = f'SELECT Student_ID, Firstname, Lastname, Email FROM students WHERE School_ID={school_id} AND Level="{course_id}"'
teacher = rdsData.execute_statement(
resourceArn = cluster_arn,
secretArn = secret_arn,
database = dbname_param['Parameter']['Value'],
sql = sql1)
students = rdsData.execute_statement(
resourceArn = cluster_arn,
secretArn = secret_arn,
database = dbname_param['Parameter']['Value'],
sql = sql2)
return {
"statusCode": 200,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": json.dumps({
"teacher": teacher['records'],
"students": students['records']
})
}
```
|
{
"source": "jdavidheiser/amundsen",
"score": 2
}
|
#### File: models/dashboard/dashboard_chart.py
```python
import logging
from typing import (
Any, Iterator, Optional, Union,
)
from amundsen_rds.models import RDSModel
from amundsen_rds.models.dashboard import DashboardChart as RDSDashboardChart
from databuilder.models.dashboard.dashboard_query import DashboardQuery
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.table_serializable import TableSerializable
LOGGER = logging.getLogger(__name__)
class DashboardChart(GraphSerializable, TableSerializable):
"""
A model that encapsulate Dashboard's charts
"""
DASHBOARD_CHART_LABEL = 'Chart'
DASHBOARD_CHART_KEY_FORMAT = '{product}_dashboard://{cluster}.{dashboard_group_id}/' \
'{dashboard_id}/query/{query_id}/chart/{chart_id}'
CHART_RELATION_TYPE = 'HAS_CHART'
CHART_REVERSE_RELATION_TYPE = 'CHART_OF'
def __init__(self,
dashboard_group_id: Optional[str],
dashboard_id: Optional[str],
query_id: str,
chart_id: str,
chart_name: Optional[str] = None,
chart_type: Optional[str] = None,
chart_url: Optional[str] = None,
product: Optional[str] = '',
cluster: str = 'gold',
**kwargs: Any
) -> None:
self._dashboard_group_id = dashboard_group_id
self._dashboard_id = dashboard_id
self._query_id = query_id
self._chart_id = chart_id if chart_id else chart_name
self._chart_name = chart_name
self._chart_type = chart_type
self._chart_url = chart_url
self._product = product
self._cluster = cluster
self._node_iterator = self._create_node_iterator()
self._relation_iterator = self._create_relation_iterator()
self._record_iterator = self._create_record_iterator()
def create_next_node(self) -> Union[GraphNode, None]:
try:
return next(self._node_iterator)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
node_attributes = {
'id': self._chart_id
}
if self._chart_name:
node_attributes['name'] = self._chart_name
if self._chart_type:
node_attributes['type'] = self._chart_type
if self._chart_url:
node_attributes['url'] = self._chart_url
node = GraphNode(
key=self._get_chart_node_key(),
label=DashboardChart.DASHBOARD_CHART_LABEL,
attributes=node_attributes
)
yield node
def create_next_relation(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iterator)
except StopIteration:
return None
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
relationship = GraphRelationship(
start_label=DashboardQuery.DASHBOARD_QUERY_LABEL,
start_key=DashboardQuery.DASHBOARD_QUERY_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id
),
end_label=DashboardChart.DASHBOARD_CHART_LABEL,
end_key=self._get_chart_node_key(),
type=DashboardChart.CHART_RELATION_TYPE,
reverse_type=DashboardChart.CHART_REVERSE_RELATION_TYPE,
attributes={}
)
yield relationship
def _get_chart_node_key(self) -> str:
return DashboardChart.DASHBOARD_CHART_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id,
chart_id=self._chart_id
)
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iterator)
except StopIteration:
return None
def _create_record_iterator(self) -> Iterator[RDSModel]:
record = RDSDashboardChart(
rk=self._get_chart_node_key(),
id=self._chart_id,
query_rk=DashboardQuery.DASHBOARD_QUERY_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id
)
)
if self._chart_name:
record.name = self._chart_name
if self._chart_type:
record.type = self._chart_type
if self._chart_url:
record.url = self._chart_url
yield record
def __repr__(self) -> str:
return f'DashboardChart({self._dashboard_group_id!r}, {self._dashboard_id!r}, ' \
f'{self._query_id!r}, {self._chart_id!r}, {self._chart_name!r}, {self._chart_type!r}, ' \
f'{self._chart_url!r}, {self._product!r}, {self._cluster!r})'
```
#### File: databuilder/transformer/base_transformer.py
```python
import abc
from typing import (
Any, Iterable, Iterator, List, Optional,
)
from pyhocon import ConfigTree
from databuilder import Scoped
class Transformer(Scoped):
"""
A transformer transforms a record
"""
@abc.abstractmethod
def init(self, conf: ConfigTree) -> None:
pass
@abc.abstractmethod
def transform(self, record: Any) -> Any:
pass
class NoopTransformer(Transformer):
"""
A no-op transformer
"""
def init(self, conf: ConfigTree) -> None:
pass
def transform(self, record: Any) -> Any:
return record
def get_scope(self) -> str:
pass
class ChainedTransformer(Transformer):
"""
A chained transformer that iterates transformers and transforms a record.
Transfomers implemented using generator functons can yield multiple records,
which all get passed to the next transformer.
Returning None from a transformer filters the record out.
"""
def __init__(self,
transformers: Iterable[Transformer],
is_init_transformers: Optional[bool] = False) -> None:
self.transformers = transformers
self.is_init_transformers = is_init_transformers
def init(self, conf: ConfigTree) -> None:
if self.is_init_transformers:
for transformer in self.transformers:
transformer.init(Scoped.get_scoped_conf(conf, transformer.get_scope()))
def transform(self, record: Any) -> Any:
records = [record]
for t in self.transformers:
new_records: List[Any] = []
for r in records:
result = t.transform(r)
# Get all records if the transformer returns an Iterator.
if isinstance(result, Iterator):
new_records += list(result)
# Filter the record if it is None
elif result is not None:
new_records.append(result)
records = new_records
yield from records
def get_scope(self) -> str:
return 'transformer.chained'
def close(self) -> None:
for t in self.transformers:
t.close()
```
#### File: unit/extractor/test_feast_extractor.py
```python
import json
import re
import unittest
from feast.entity import Entity
from feast.feature_table import FeatureTable
from mock import MagicMock, call
from pyhocon import ConfigFactory
from databuilder import Scoped
from databuilder.extractor.feast_extractor import FeastExtractor
from databuilder.models.table_metadata import (
ColumnMetadata, DescriptionMetadata, TableMetadata,
)
class TestFeastExtractor(unittest.TestCase):
def test_no_feature_tables_registered(self) -> None:
self._init_extractor()
self.extractor._client.list_projects.return_value = ["default"]
self.assertIsNone(self.extractor.extract())
def test_every_project_is_scanned(self) -> None:
self._init_extractor()
self.extractor._client.list_projects.return_value = ["default", "dev", "prod"]
list_feature_tables_mock = self.extractor._client.list_feature_tables
list_feature_tables_mock.return_value = []
self.assertIsNone(self.extractor.extract())
list_feature_tables_mock.assert_has_calls(
[
call(project="default"),
call(project="dev"),
call(project="prod"),
]
)
def test_feature_table_extraction(self) -> None:
self._init_extractor(programmatic_description_enabled=False)
self.extractor._client.list_projects.return_value = ["default"]
self._mock_feature_table()
table = self.extractor.extract()
self.extractor._client.get_entity.assert_called_with(
"driver_id", project="default"
)
expected = TableMetadata(
database="feast",
cluster="unittest-feast-instance",
schema="default",
name="driver_trips",
description=None,
columns=[
ColumnMetadata(
"driver_id", "Internal identifier of the driver", "INT64", 0
),
ColumnMetadata("trips_today", None, "INT32", 1),
],
)
self.assertEqual(expected.__repr__(), table.__repr__())
self.assertIsNone(self.extractor.extract())
def test_feature_table_extraction_with_description_batch(self) -> None:
self._init_extractor(programmatic_description_enabled=True)
self.extractor._client.list_projects.return_value = ["default"]
self._mock_feature_table(labels={"label1": "value1"})
feature_table_definition = self.extractor.extract()
assert isinstance(feature_table_definition, TableMetadata)
description = self.extractor.extract()
assert isinstance(description, TableMetadata)
expected = DescriptionMetadata(
TestFeastExtractor._strip_margin(
"""* Created at **2020-01-01 00:00:00**
|* Labels:
| * label1: **value1**
|"""
),
"feature_table_details",
)
self.assertEqual(expected.__repr__(), description.description.__repr__())
batch_source = self.extractor.extract()
assert isinstance(batch_source, TableMetadata)
expected = DescriptionMetadata(
TestFeastExtractor._strip_margin(
"""```
|fileOptions:
| fileFormat:
| parquetFormat: {}
| fileUrl: file:///some/location
|type: BATCH_FILE
|```"""
),
"batch_source",
)
self.assertEqual(expected.__repr__(), batch_source.description.__repr__())
self.assertIsNone(self.extractor.extract())
def test_feature_table_extraction_with_description_stream(self) -> None:
self._init_extractor(programmatic_description_enabled=True)
self.extractor._client.list_projects.return_value = ["default"]
self._mock_feature_table(add_stream_source=True)
feature_table_definition = self.extractor.extract()
assert isinstance(feature_table_definition, TableMetadata)
description = self.extractor.extract()
assert isinstance(description, TableMetadata)
expected = DescriptionMetadata(
TestFeastExtractor._strip_margin(
"""* Created at **2020-01-01 00:00:00**
|"""
),
"feature_table_details",
)
self.assertEqual(expected.__repr__(), description.description.__repr__())
batch_source = self.extractor.extract()
assert isinstance(batch_source, TableMetadata)
expected = DescriptionMetadata(
TestFeastExtractor._strip_margin(
"""```
|fileOptions:
| fileFormat:
| parquetFormat: {}
| fileUrl: file:///some/location
|type: BATCH_FILE
|```"""
),
"batch_source",
)
self.assertEqual(expected.__repr__(), batch_source.description.__repr__())
stream_source = self.extractor.extract()
assert isinstance(stream_source, TableMetadata)
expected = DescriptionMetadata(
TestFeastExtractor._strip_margin(
"""```
|createdTimestampColumn: datetime
|eventTimestampColumn: datetime
|kafkaOptions:
| bootstrapServers: broker1
| messageFormat:
| avroFormat:
| schemaJson: '{"type": "record", "name": "DriverTrips", "fields": [{"name": "driver_id",
| "type": "long"}, {"name": "trips_today", "type": "int"}, {"name": "datetime",
| "type": {"type": "long", "logicalType": "timestamp-micros"}}]}'
| topic: driver_trips
|type: STREAM_KAFKA
|```"""
),
"stream_source",
)
self.assertEqual(expected.__repr__(), stream_source.description.__repr__())
self.assertIsNone(self.extractor.extract())
def _init_extractor(self, programmatic_description_enabled: bool = True) -> None:
conf = {
f'extractor.feast.{FeastExtractor.FEAST_ENDPOINT_CONFIG_KEY}': 'feast-core.example.com:6565',
f'extractor.feast.{FeastExtractor.FEAST_SERVICE_CONFIG_KEY}': 'unittest-feast-instance',
f'extractor.feast.{FeastExtractor.DESCRIBE_FEATURE_TABLES}': programmatic_description_enabled,
}
self.extractor = FeastExtractor()
self.extractor.init(
Scoped.get_scoped_conf(
conf=ConfigFactory.from_dict(conf), scope=self.extractor.get_scope()
)
)
self.extractor._client = MagicMock(return_value=None)
@staticmethod
def _strip_margin(text: str) -> str:
return re.sub("\n[ \t]*\\|", "\n", text)
def _mock_feature_table(
self, labels: dict = {}, add_stream_source: bool = False
) -> None:
table_spec = {
"name": "driver_trips",
"entities": ["driver_id"],
"features": [{"name": "trips_today", "valueType": "INT32"}],
"labels": labels,
"batchSource": {
"type": "BATCH_FILE",
"fileOptions": {
"fileFormat": {"parquetFormat": {}},
"fileUrl": "file:///some/location",
},
},
}
if add_stream_source:
avro_schema_json = json.dumps(
{
"type": "record",
"name": "DriverTrips",
"fields": [
{"name": "driver_id", "type": "long"},
{"name": "trips_today", "type": "int"},
{
"name": "datetime",
"type": {"type": "long", "logicalType": "timestamp-micros"},
},
],
}
)
table_spec["streamSource"] = {
"type": "STREAM_KAFKA",
"eventTimestampColumn": "datetime",
"createdTimestampColumn": "datetime",
"kafkaOptions": {
"bootstrapServers": "broker1",
"topic": "driver_trips",
"messageFormat": {
"avroFormat": {
"schemaJson": avro_schema_json,
}
},
},
}
self.extractor._client.list_feature_tables.return_value = [
FeatureTable.from_dict(
{
"spec": table_spec,
"meta": {"createdTimestamp": "2020-01-01T00:00:00Z"},
}
)
]
self.extractor._client.get_entity.return_value = Entity.from_dict(
{
"spec": {
"name": "driver_id",
"valueType": "INT64",
"description": "Internal identifier of the driver",
}
}
)
if __name__ == "__main__":
unittest.main()
```
#### File: unit/extractor/test_pandas_profiling_column_stats_extractor.py
```python
import unittest
from typing import Any
from mock import MagicMock
from pyhocon import ConfigFactory
from databuilder import Scoped
from databuilder.extractor.pandas_profiling_column_stats_extractor import PandasProfilingColumnStatsExtractor
from databuilder.models.table_stats import TableColumnStats
class TestPandasProfilingColumnStatsExtractor(unittest.TestCase):
report_data = {
'analysis': {
'date_start': '2021-05-17 10:10:15.142044'
},
'variables': {
'column_1': {
'mean': 5.120,
'max': 15.23456
},
'column_2': {
'mean': 10
}
}
}
def setUp(self) -> None:
config = {'extractor.pandas_profiling.file_path': None}
config = ConfigFactory.from_dict({**config, **self._common_params()})
self.config = config
@staticmethod
def _common_params() -> Any:
return {'extractor.pandas_profiling.table_name': 'table_name',
'extractor.pandas_profiling.schema_name': 'schema_name',
'extractor.pandas_profiling.database_name': 'database_name',
'extractor.pandas_profiling.cluster_name': 'cluster_name'}
def _get_extractor(self) -> Any:
extractor = PandasProfilingColumnStatsExtractor()
extractor.init(Scoped.get_scoped_conf(conf=self.config, scope=extractor.get_scope()))
return extractor
def test_extractor(self) -> None:
extractor = self._get_extractor()
extractor._load_report = MagicMock(return_value=self.report_data)
common = {
'db': self._common_params().get('extractor.pandas_profiling.database_name'),
'schema': self._common_params().get('extractor.pandas_profiling.schema_name'),
'table_name': self._common_params().get('extractor.pandas_profiling.table_name'),
'cluster': self._common_params().get('extractor.pandas_profiling.cluster_name'),
'start_epoch': '1621246215',
'end_epoch': '0'
}
compare_params = {'table', 'schema', 'db', 'col_name', 'start_epoch',
'end_epoch', 'cluster', 'stat_type', 'stat_val'}
expected = [
{x: spec[x] for x in compare_params if x in spec} for spec in
[
TableColumnStats(**{**dict(stat_name='Mean', stat_val='5.12', col_name='column_1'), **common}).__dict__,
TableColumnStats(
**{**dict(stat_name='Maximum', stat_val='15.235', col_name='column_1'), **common}).__dict__,
TableColumnStats(**{**dict(stat_name='Mean', stat_val='10.0', col_name='column_2'), **common}).__dict__,
]
]
result = []
while True:
stat = extractor.extract()
if stat:
result.append(stat)
else:
break
result_spec = [{x: spec.__dict__[x] for x in compare_params if x in spec.__dict__} for spec in result]
for r in result:
self.assertIsInstance(r, TableColumnStats)
self.assertListEqual(expected, result_spec)
```
#### File: amundsen_application/models/issue_results.py
```python
from amundsen_application.models.data_issue import DataIssue
from typing import List, Dict
class IssueResults:
def __init__(self,
issues: List[DataIssue],
total: int,
all_issues_url: str) -> None:
"""
Returns an object representing results from an issue tracker.
:param issues: Issues in the issue tracker matching the requested table
:param total: How many issues in all are associated with this table
:param all_issues_url: url to the all issues in the issue tracker
"""
self.issues = issues
self.total = total
self.all_issues_url = all_issues_url
def serialize(self) -> Dict:
return {'issues': [issue.serialize() for issue in self.issues],
'total': self.total,
'all_issues_url': self.all_issues_url}
```
#### File: metadata_service/api/tag.py
```python
from http import HTTPStatus
from typing import Any, Iterable, Mapping, Tuple, Union
from flasgger import swag_from
from flask import current_app as app
from flask_restful import Resource, fields, marshal
from metadata_service.entity.resource_type import ResourceType
from metadata_service.exception import NotFoundException
from metadata_service.proxy import get_proxy_client
from metadata_service.proxy.base_proxy import BaseProxy
tag_fields = {
'tag_name': fields.String,
'tag_count': fields.Integer
}
tag_usage_fields = {
'tag_usages': fields.List(fields.Nested(tag_fields))
}
BADGE_TYPE = 'badge'
class TagAPI(Resource):
def __init__(self) -> None:
self.client = get_proxy_client()
super(TagAPI, self).__init__()
@swag_from('swagger_doc/tag/tag_get.yml')
def get(self) -> Iterable[Union[Mapping, int, None]]:
"""
API to fetch all the existing tags with usage.
"""
tag_usages = self.client.get_tags()
return marshal({'tag_usages': tag_usages}, tag_usage_fields), HTTPStatus.OK
class TagCommon:
def __init__(self, client: BaseProxy) -> None:
self.client = client
def put(self, id: str, resource_type: ResourceType,
tag: str, tag_type: str = 'default') -> Tuple[Any, HTTPStatus]:
"""
Method to add a tag to existing resource.
:param id:
:param resource_type:
:param tag:
:param tag_type:
:return:
"""
whitelist_badges = app.config.get('WHITELIST_BADGES', [])
if tag_type == BADGE_TYPE:
return \
{'message': 'Badges should be added using /badges/, tag_type=badge no longer valid'}, \
HTTPStatus.NOT_ACCEPTABLE
else:
for badge in whitelist_badges:
if tag == badge.badge_name:
return \
{'message': 'The tag {} for id {} with type {} and resource_type {} '
'is not added successfully as tag '
'for it is reserved for badge'.format(tag,
id,
tag_type,
resource_type.name)}, \
HTTPStatus.CONFLICT
try:
self.client.add_tag(id=id,
tag=tag,
tag_type=tag_type,
resource_type=resource_type)
return {'message': 'The tag {} for id {} with type {} and resource_type {} '
'is added successfully'.format(tag,
id,
tag_type,
resource_type.name)}, HTTPStatus.OK
except NotFoundException:
return \
{'message': 'The tag {} for table_uri {} with type {} and resource_type {} '
'is not added successfully'.format(tag,
id,
tag_type,
resource_type.name)}, \
HTTPStatus.NOT_FOUND
def delete(self, id: str, tag: str,
resource_type: ResourceType, tag_type: str = 'default') -> Tuple[Any, HTTPStatus]:
"""
Method to remove a association between a given tag and a resource.
:param id:
:param resource_type:
:param tag:
:param tag_type:
:return:
"""
try:
self.client.delete_tag(id=id,
tag=tag,
tag_type=tag_type,
resource_type=resource_type)
return {'message': 'The tag {} for id {} with type {} and resource_type {} '
'is deleted successfully'.format(tag,
id,
tag_type,
resource_type.name)}, HTTPStatus.OK
except NotFoundException:
return \
{'message': 'The tag {} for id {} with type {} and resource_type {} '
'is not deleted successfully'.format(tag,
id,
tag_type,
resource_type.name)}, \
HTTPStatus.NOT_FOUND
```
#### File: api/feature/test_feature_tag_api.py
```python
from http import HTTPStatus
from metadata_service.entity.resource_type import ResourceType
from metadata_service.exception import NotFoundException
from tests.unit.api.feature.feature_test_case import FeatureTestCase
FEATURE_URI = 'test_feature_uri'
TAG = 'underage_wizards'
class TestFeatureTagAPI(FeatureTestCase):
def test_should_update_tag(self) -> None:
response = self.app.test_client().put(f'/feature/{FEATURE_URI}/tag/{TAG}')
self.assertEqual(response.status_code, HTTPStatus.OK)
self.mock_proxy.add_tag.assert_called_with(id=FEATURE_URI,
tag=TAG,
tag_type='default',
resource_type=ResourceType.Feature)
def test_should_fail_to_update_tag_when_feature_not_found(self) -> None:
self.mock_proxy.add_tag.side_effect = NotFoundException(message='cannot find feature')
response = self.app.test_client().put(f'/feature/{FEATURE_URI}/tag/{TAG}')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
def test_should_delete_tag(self) -> None:
response = self.app.test_client().delete(f'/feature/{FEATURE_URI}/tag/{TAG}')
self.assertEqual(response.status_code, HTTPStatus.OK)
self.mock_proxy.delete_tag.assert_called_with(id=FEATURE_URI,
tag=TAG,
tag_type='default',
resource_type=ResourceType.Feature)
def test_should_fail_to_delete_tag_when_feature_not_found(self) -> None:
self.mock_proxy.delete_tag.side_effect = NotFoundException(message='cannot find feature')
response = self.app.test_client().delete(f'/feature/{FEATURE_URI}/tag/{TAG}')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
```
#### File: search_service/models/table.py
```python
import time
from typing import (
List, Optional, Set,
)
import attr
from marshmallow3_annotations.ext.attrs import AttrsSchema
from search_service.models.tag import Tag
from .base import Base
@attr.s(auto_attribs=True, kw_only=True)
class Table(Base):
"""
This represents the part of a table stored in the search proxy
"""
id: str
database: str
cluster: str
schema: str
name: str
key: str
display_name: Optional[str] = None
tags: Optional[List[Tag]] = None
badges: Optional[List[Tag]] = None
description: Optional[str] = None
last_updated_timestamp: int = int(time.time())
# The following properties are lightly-transformed properties from the normal table object:
column_names: Optional[List[str]] = None
column_descriptions: Optional[List[str]] = None
programmatic_descriptions: Optional[List[str]] = None
# The following are search-only properties:
total_usage: int = 0
schema_description: Optional[str] = attr.ib(default=None)
def get_id(self) -> str:
return self.id
def get_attrs_dict(self) -> dict:
attrs_dict = self.__dict__.copy()
if self.tags is not None:
attrs_dict['tags'] = [str(tag) for tag in self.tags]
else:
attrs_dict['tags'] = None
if self.badges is not None:
attrs_dict['badges'] = [str(badge) for badge in self.badges]
else:
attrs_dict['badges'] = None
return attrs_dict
@classmethod
def get_attrs(cls) -> Set:
return {
'id',
'name',
'key',
'description',
'cluster',
'database',
'schema',
'column_names',
'tags',
'badges',
'last_updated_timestamp',
'display_name',
'programmatic_descriptions',
'total_usage',
'schema_description'
}
@staticmethod
def get_type() -> str:
return 'table'
class TableSchema(AttrsSchema):
class Meta:
target = Table
register_as_scheme = True
@attr.s(auto_attribs=True, kw_only=True)
class SearchTableResult:
total_results: int = attr.ib()
results: List[Table] = attr.ib(factory=list)
class SearchTableResultSchema(AttrsSchema):
class Meta:
target = SearchTableResult
register_as_scheme = True
```
#### File: unit/proxy/test_elasticsearch.py
```python
import unittest
from typing import ( # noqa: F401
Any, Iterable, List,
)
from unittest.mock import MagicMock, patch
from search_service import create_app
from search_service.api.table import TABLE_INDEX
from search_service.api.user import USER_INDEX
from search_service.models.dashboard import Dashboard
from search_service.models.search_result import SearchResult
from search_service.models.table import Table
from search_service.models.tag import Tag
from search_service.models.user import User
from search_service.proxy import get_proxy_client
from search_service.proxy.elasticsearch import ElasticsearchProxy
class MockSearchResult:
def __init__(self, *,
name: str,
key: str,
description: str,
cluster: str,
database: str,
schema: str,
column_names: Iterable[str],
tags: Iterable[Tag],
badges: Iterable[Tag],
last_updated_timestamp: int,
programmatic_descriptions: List[str] = None) -> None:
self.name = name
self.key = key
self.description = description
self.cluster = cluster
self.database = database
self.schema = schema
self.column_names = column_names
self.tags = tags
self.badges = badges
self.last_updated_timestamp = last_updated_timestamp
self.programmatic_descriptions = programmatic_descriptions
class MockUserSearchResult:
def __init__(self, *,
first_name: str,
last_name: str,
full_name: str,
team_name: str,
email: str,
manager_email: str,
github_username: str,
is_active: bool,
employee_type: str,
role_name: str,
new_attr: str) -> None:
self.full_name = full_name
self.first_name = first_name
self.last_name = last_name
self.team_name = team_name
self.email = email
self.manager_email = manager_email
self.github_username = github_username
self.is_active = is_active
self.employee_type = employee_type
self.new_attr = new_attr
self.role_name = role_name
class Response:
def __init__(self,
result: Any):
self._d_ = result
class TableResponse:
def __init__(self,
result: Any):
self._d_ = result
self.meta = {'id': result['key']}
class UserResponse:
def __init__(self,
result: Any):
self._d_ = result
self.meta = {'id': result['email']}
class TestElasticsearchProxy(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app(config_module_class='search_service.config.LocalConfig')
self.app_context = self.app.app_context()
self.app_context.push()
mock_elasticsearch_client = MagicMock()
self.es_proxy = ElasticsearchProxy(client=mock_elasticsearch_client)
self.mock_badge = Tag(tag_name='name')
self.mock_tag = Tag(tag_name='match')
self.mock_empty_badge = [] # type: List[Tag]
self.mock_empty_tag = [] # type: List[Tag]
self.mock_result1 = MockSearchResult(name='test_table',
key='test_key',
description='test_description',
cluster='gold',
database='test_db',
schema='test_schema',
column_names=['test_col1', 'test_col2'],
tags=self.mock_empty_tag,
badges=self.mock_empty_badge,
last_updated_timestamp=1527283287,
programmatic_descriptions=[])
self.mock_result2 = MockSearchResult(name='test_table2',
key='test_key2',
description='test_description2',
cluster='gold',
database='test_db2',
schema='test_schema2',
column_names=['test_col1', 'test_col2'],
tags=self.mock_empty_tag,
badges=self.mock_empty_badge,
last_updated_timestamp=1527283287)
self.mock_result3 = Table(id='test_key3',
name='test_table3',
key='test_key3',
description='test_description3',
cluster='gold',
database='test_db3',
schema='test_schema3',
column_names=['test_col1', 'test_col2'],
tags=[self.mock_tag],
badges=[self.mock_badge],
last_updated_timestamp=1527283287)
self.mock_result4 = MockUserSearchResult(full_name='<NAME>',
first_name='First',
last_name='Last',
team_name='Test team',
email='<EMAIL>',
github_username='ghub',
manager_email='<EMAIL>',
is_active=True,
employee_type='FTE',
role_name='swe',
new_attr='aaa')
self.mock_dashboard_result = Dashboard(id='mode_dashboard',
uri='dashboard_uri',
cluster='gold',
group_name='mode_dashboard_group',
group_url='mode_dashboard_group_url',
product='mode',
name='mode_dashboard',
url='mode_dashboard_url',
description='test_dashboard',
last_successful_run_timestamp=1000)
def test_setup_client(self) -> None:
self.es_proxy = ElasticsearchProxy(
host="http://0.0.0.0:9200",
user="elastic",
password="<PASSWORD>"
)
a = self.es_proxy.elasticsearch
for client in [a, a.cat, a.cluster, a.indices, a.ingest, a.nodes, a.snapshot, a.tasks]:
self.assertEqual(client.transport.hosts[0]['host'], "0.0.0.0")
self.assertEqual(client.transport.hosts[0]['port'], 9200)
@patch('search_service.proxy.elasticsearch.Elasticsearch', autospec=True)
def test_setup_client_with_username_and_password(self, elasticsearch_mock: MagicMock) -> None:
self.es_proxy = ElasticsearchProxy(
host='http://unit-test-host',
user='unit-test-user',
password='<PASSWORD>'
)
elasticsearch_mock.assert_called_once()
elasticsearch_mock.assert_called_once_with(
'http://unit-test-host',
http_auth=('<PASSWORD>-user', '<PASSWORD>')
)
@patch('search_service.proxy.elasticsearch.Elasticsearch', autospec=True)
def test_setup_client_without_username(self, elasticsearch_mock: MagicMock) -> None:
self.es_proxy = ElasticsearchProxy(
host='http://unit-test-host',
user=''
)
elasticsearch_mock.assert_called_once()
elasticsearch_mock.assert_called_once_with('http://unit-test-host', http_auth=None)
@patch('search_service.proxy._proxy_client', None)
def test_setup_config(self) -> None:
es: Any = get_proxy_client()
a = es.elasticsearch
for client in [a, a.cat, a.cluster, a.indices, a.ingest, a.nodes, a.snapshot, a.tasks]:
self.assertEqual(client.transport.hosts[0]['host'], "0.0.0.0")
self.assertEqual(client.transport.hosts[0]['port'], 9200)
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_empty_query_string(self, mock_search: MagicMock) -> None:
expected = SearchResult(total_results=0, results=[])
result = self.es_proxy.fetch_table_search_results(query_term='')
# check the output was empty list
self.assertDictEqual(vars(result), vars(expected),
"Received non-empty search results!")
# ensure elasticsearch_dsl Search endpoint was not called
# assert_not_called doesn't work. See here: http://engineroom.trackmaven.com/blog/mocking-mistakes/
self.assertTrue(mock_search.call_count == 0)
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_empty_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 0
mock_search.return_value = mock_results
expected = SearchResult(total_results=0, results=[])
result = self.es_proxy.fetch_table_search_results(query_term='test_query_term')
self.assertDictEqual(vars(result), vars(expected),
"Received non-empty search results!")
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_one_table_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 1
mock_results.__iter__.return_value = [TableResponse(result=vars(self.mock_result1))]
mock_search.return_value = mock_results
expected = SearchResult(total_results=1,
results=[Table(id='test_key',
name='test_table',
key='test_key',
description='test_description',
cluster='gold',
database='test_db',
schema='test_schema',
column_names=['test_col1', 'test_col2'],
tags=[],
badges=self.mock_empty_badge,
last_updated_timestamp=1527283287,
programmatic_descriptions=[])])
resp = self.es_proxy.fetch_table_search_results(query_term='test_query_term')
self.assertEqual(resp.total_results, expected.total_results,
"search result is not of length 1")
self.assertIsInstance(resp.results[0],
Table,
"Search result received is not of 'Table' type!")
self.assertDictEqual(vars(resp.results[0]), vars(expected.results[0]),
"Search Result doesn't match with expected result!")
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_multiple_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 2
mock_results.__iter__.return_value = [TableResponse(result=vars(self.mock_result1)),
TableResponse(result=vars(self.mock_result2))]
mock_search.return_value = mock_results
expected = SearchResult(total_results=2,
results=[Table(id='test_key',
name='test_table',
key='test_key',
description='test_description',
cluster='gold',
database='test_db',
schema='test_schema',
column_names=['test_col1', 'test_col2'],
tags=[],
badges=self.mock_empty_badge,
last_updated_timestamp=1527283287,
programmatic_descriptions=[]),
Table(id='test_key2',
name='test_table2',
key='test_key2',
description='test_description2',
cluster='gold',
database='test_db2',
schema='test_schema2',
column_names=['test_col1', 'test_col2'],
tags=[],
badges=self.mock_empty_badge,
last_updated_timestamp=1527283287)])
resp = self.es_proxy.fetch_table_search_results(query_term='test_query_term')
self.assertEqual(resp.total_results, expected.total_results,
"search result is not of length 2")
for i in range(2):
self.assertIsInstance(resp.results[i],
Table,
"Search result received is not of 'Table' type!")
self.assertDictEqual(vars(resp.results[i]),
vars(expected.results[i]),
"Search result doesn't match with expected result!")
@patch('elasticsearch_dsl.Search.execute')
def test_search_table_filter(self, mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 1
mock_results.__iter__.return_value = [TableResponse(result=vars(self.mock_result1))]
mock_search.return_value = mock_results
expected = SearchResult(total_results=1,
results=[Table(id='test_key',
name='test_table',
key='test_key',
description='test_description',
cluster='gold',
database='test_db',
schema='test_schema',
column_names=['test_col1', 'test_col2'],
tags=self.mock_empty_tag,
badges=self.mock_empty_badge,
last_updated_timestamp=1527283287,
programmatic_descriptions=[])])
search_request = {
'type': 'AND',
'filters': {
'database': ['hive', 'bigquery'],
'schema': ['test-schema1', 'test-schema2'],
'table': ['*amundsen*'],
'column': ['*ds*'],
'tag': ['test-tag'],
}
}
resp = self.es_proxy.fetch_search_results_with_filter(search_request=search_request, query_term='test')
self.assertEqual(resp.total_results, expected.total_results)
self.assertIsInstance(resp.results[0], Table)
self.assertDictEqual(vars(resp.results[0]), vars(expected.results[0]))
def test_search_table_filter_return_no_results_if_no_search_request(self) -> None:
resp = self.es_proxy.fetch_search_results_with_filter(search_request=None, query_term='test')
self.assertEqual(resp.total_results, 0)
self.assertEqual(resp.results, [])
def test_search_table_filter_return_no_results_if_dsl_conversion_error(self) -> None:
search_request = {
'type': 'AND',
'filters': {}
}
with patch.object(self.es_proxy, 'convert_query_json_to_query_dsl') as mock:
mock.side_effect = MagicMock(side_effect=Exception('Test'))
resp = self.es_proxy.fetch_search_results_with_filter(search_request=search_request,
query_term='test')
self.assertEqual(resp.total_results, 0)
self.assertEqual(resp.results, [])
def test_get_model_by_index_table(self) -> None:
self.assertEqual(self.es_proxy.get_model_by_index(TABLE_INDEX), Table)
def test_get_model_by_index_user(self) -> None:
self.assertEqual(self.es_proxy.get_model_by_index(USER_INDEX), User)
def test_get_model_by_index_raise_exception(self) -> None:
self.assertRaises(Exception, self.es_proxy.convert_query_json_to_query_dsl, 'some_fake_index')
def test_parse_filters_return_results(self) -> None:
filter_list = {
'database': ['hive', 'bigquery'],
'schema': ['test-schema1', 'test-schema2'],
'table': ['*amundsen*'],
'column': ['*ds*'],
'tag': ['test-tag'],
}
expected_result = "database.raw:(hive OR bigquery) " \
"AND schema.raw:(test-schema1 OR test-schema2) " \
"AND name.raw:(*amundsen*) " \
"AND column_names.raw:(*ds*) " \
"AND tags:(test-tag)"
self.assertEqual(self.es_proxy.parse_filters(filter_list,
index=TABLE_INDEX), expected_result)
def test_parse_filters_return_no_results(self) -> None:
filter_list = {
'unsupported_category': ['fake']
}
self.assertEqual(self.es_proxy.parse_filters(filter_list,
index=TABLE_INDEX), '')
def test_validate_wrong_filters_values(self) -> None:
search_request = {
"type": "AND",
"filters": {
"schema": ["test_schema:test_schema"],
"table": ["test/table"]
},
"query_term": "",
"page_index": 0
}
self.assertEqual(self.es_proxy.validate_filter_values(search_request), False)
def test_validate_accepted_filters_values(self) -> None:
search_request = {
"type": "AND",
"filters": {
"schema": ["test_schema"],
"table": ["test_table"]
},
"query_term": "a",
"page_index": 0
}
self.assertEqual(self.es_proxy.validate_filter_values(search_request), True)
def test_parse_query_term(self) -> None:
term = 'test'
expected_result = "(name:(*test*) OR name:(test) OR schema:(*test*) OR " \
"schema:(test) OR description:(*test*) OR description:(test) OR " \
"column_names:(*test*) OR column_names:(test) OR " \
"column_descriptions:(*test*) OR column_descriptions:(test))"
self.assertEqual(self.es_proxy.parse_query_term(term,
index=TABLE_INDEX), expected_result)
def test_convert_query_json_to_query_dsl_term_and_filters(self) -> None:
term = 'test'
test_filters = {
'database': ['hive', 'bigquery'],
'schema': ['test-schema1', 'test-schema2'],
'table': ['*amundsen*'],
'column': ['*ds*'],
'tag': ['test-tag'],
}
search_request = {
'type': 'AND',
'filters': test_filters
}
expected_result = self.es_proxy.parse_filters(test_filters, index=TABLE_INDEX) + " AND " + \
self.es_proxy.parse_query_term(term, index=TABLE_INDEX)
ret_result = self.es_proxy.convert_query_json_to_query_dsl(search_request=search_request,
query_term=term,
index=TABLE_INDEX)
self.assertEqual(ret_result, expected_result)
def test_convert_query_json_to_query_dsl_no_term(self) -> None:
term = ''
test_filters = {
'database': ['hive', 'bigquery'],
}
search_request = {
'type': 'AND',
'filters': test_filters
}
expected_result = self.es_proxy.parse_filters(test_filters,
index=TABLE_INDEX)
ret_result = self.es_proxy.convert_query_json_to_query_dsl(search_request=search_request,
query_term=term,
index=TABLE_INDEX)
self.assertEqual(ret_result, expected_result)
def test_convert_query_json_to_query_dsl_no_filters(self) -> None:
term = 'test'
search_request = {
'type': 'AND',
'filters': {}
}
expected_result = self.es_proxy.parse_query_term(term,
index=TABLE_INDEX)
ret_result = self.es_proxy.convert_query_json_to_query_dsl(search_request=search_request,
query_term=term,
index=TABLE_INDEX)
self.assertEqual(ret_result, expected_result)
def test_convert_query_json_to_query_dsl_raise_exception_no_term_or_filters(self) -> None:
term = ''
search_request = {
'type': 'AND',
'filters': {}
}
self.assertRaises(Exception, self.es_proxy.convert_query_json_to_query_dsl, search_request, term)
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_one_user_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 1
mock_results.__iter__.return_value = [UserResponse(result=vars(self.mock_result4))]
mock_search.return_value = mock_results
expected = SearchResult(total_results=1,
results=[User(id='<EMAIL>',
full_name='First Last',
first_name='First',
last_name='Last',
team_name='Test team',
email='<EMAIL>',
github_username='ghub',
manager_email='<EMAIL>',
is_active=True,
role_name='swe',
employee_type='FTE')])
resp = self.es_proxy.fetch_user_search_results(query_term='test_query_term',
index='user_search_index')
self.assertEqual(resp.total_results, expected.total_results,
"search result is not of length 1")
self.assertIsInstance(resp.results[0],
User,
"Search result received is not of 'Table' type!")
self.assertDictEqual(vars(resp.results[0]), vars(expected.results[0]),
"Search Result doesn't match with expected result!")
def test_create_document_with_no_data(self) -> None:
expected = ''
result = self.es_proxy.create_document(data=None, index='table_search_index')
print('result: {}'.format(result))
self.assertEqual(expected, result)
@patch('uuid.uuid4')
def test_create_document(self, mock_uuid: MagicMock) -> None:
mock_elasticsearch = self.es_proxy.elasticsearch
new_index_name = 'tester_index_name'
mock_uuid.return_value = new_index_name
mock_elasticsearch.indices.get_alias.return_value = dict([(new_index_name, {})])
start_data = [
Table(id='snowflake://blue.test_schema/bank_accounts', cluster='blue', column_names=['1', '2'],
database='snowflake', schema='test_schema', description='A table for something',
key='snowflake://blue.test_schema/bank_accounts',
last_updated_timestamp=0, name='bank_accounts', tags=[], badges=self.mock_empty_badge,
column_descriptions=['desc'], schema_description='schema description 1'),
Table(id='snowflake://blue.test_schema/bitcoin_wallets', cluster='blue', column_names=['5', '6'],
database='snowflake', schema='test_schema', description='A table for lots of things!',
key='snowflake://blue.test_schema/bitcoin_wallets',
last_updated_timestamp=0, name='bitcoin_wallets', tags=[], badges=self.mock_empty_badge,
schema_description='schema description 2', programmatic_descriptions=["test"])
]
expected_data = [
{
'index': {
'_index': new_index_name,
'_type': 'table',
'_id': 'snowflake://blue.test_schema/bank_accounts'
}
},
{
'id': 'snowflake://blue.test_schema/bank_accounts',
'cluster': 'blue',
'column_names': ['1', '2'],
'column_descriptions': ['desc'],
'database': 'snowflake',
'schema': 'test_schema',
'description': 'A table for something',
'display_name': None,
'key': 'snowflake://blue.test_schema/bank_accounts',
'last_updated_timestamp': 0,
'name': 'bank_accounts',
'tags': [],
'badges': [],
'total_usage': 0,
'programmatic_descriptions': None,
'schema_description': 'schema description 1',
},
{
'index': {
'_index': new_index_name,
'_type': 'table',
'_id': 'snowflake://blue.test_schema/bitcoin_wallets'
}
},
{
'id': 'snowflake://blue.test_schema/bitcoin_wallets',
'cluster': 'blue',
'column_names': ['5', '6'],
'column_descriptions': None,
'database': 'snowflake',
'schema': 'test_schema',
'description': 'A table for lots of things!',
'display_name': None,
'key': 'snowflake://blue.test_schema/bitcoin_wallets',
'last_updated_timestamp': 0,
'name': 'bitcoin_wallets',
'tags': [],
'badges': [],
'total_usage': 0,
'schema_description': 'schema description 2',
'programmatic_descriptions': ["test"]
}
]
mock_elasticsearch.bulk.return_value = {'errors': False}
expected_alias = 'table_search_index'
result = self.es_proxy.create_document(data=start_data, index=expected_alias)
self.assertEqual(expected_alias, result)
mock_elasticsearch.bulk.assert_called_with(expected_data)
def test_update_document_with_no_data(self) -> None:
expected = ''
result = self.es_proxy.update_document(data=None, index='table_search_index')
self.assertEqual(expected, result)
@patch('uuid.uuid4')
def test_update_document(self, mock_uuid: MagicMock) -> None:
mock_elasticsearch = self.es_proxy.elasticsearch
new_index_name = 'tester_index_name'
mock_elasticsearch.indices.get_alias.return_value = dict([(new_index_name, {})])
mock_uuid.return_value = new_index_name
table_key = 'snowflake://blue.test_schema/bitcoin_wallets'
expected_alias = 'table_search_index'
data = [
Table(id=table_key, cluster='blue', column_names=['5', '6'], database='snowflake',
schema='test_schema', description='A table for lots of things!',
key=table_key, last_updated_timestamp=0, name='bitcoin_wallets',
tags=[], column_descriptions=['hello'], badges=self.mock_empty_badge,
schema_description='schema description 1')
]
expected_data = [
{
'update': {
'_index': new_index_name,
'_type': 'table',
'_id': table_key
}
},
{
'doc': {
'id': table_key,
'cluster': 'blue',
'column_names': ['5', '6'],
'column_descriptions': ['hello'],
'database': 'snowflake',
'schema': 'test_schema',
'description': 'A table for lots of things!',
'display_name': None,
'key': table_key,
'last_updated_timestamp': 0,
'name': 'bitcoin_wallets',
'tags': [],
'badges': [],
'total_usage': 0,
'programmatic_descriptions': None,
'schema_description': 'schema description 1',
}
}
]
result = self.es_proxy.update_document(data=data, index=expected_alias)
self.assertEqual(expected_alias, result)
mock_elasticsearch.bulk.assert_called_with(expected_data)
@patch('uuid.uuid4')
def test_delete_table_document(self, mock_uuid: MagicMock) -> None:
mock_elasticsearch = self.es_proxy.elasticsearch
new_index_name = 'tester_index_name'
mock_uuid.return_value = new_index_name
mock_elasticsearch.indices.get_alias.return_value = dict([(new_index_name, {})])
expected_alias = 'table_search_index'
data = ['id1', 'id2']
expected_data = [
{'delete': {'_index': new_index_name, '_id': 'id1', '_type': 'table'}},
{'delete': {'_index': new_index_name, '_id': 'id2', '_type': 'table'}}
]
result = self.es_proxy.delete_document(data=data, index=expected_alias)
self.assertEqual(expected_alias, result)
mock_elasticsearch.bulk.assert_called_with(expected_data)
@patch('uuid.uuid4')
def test_delete_user_document(self, mock_uuid: MagicMock) -> None:
mock_elasticsearch = self.es_proxy.elasticsearch
new_index_name = 'tester_index_name'
mock_uuid.return_value = new_index_name
mock_elasticsearch.indices.get_alias.return_value = dict([(new_index_name, {})])
expected_alias = 'user_search_index'
data = ['id1', 'id2']
expected_data = [
{'delete': {'_index': new_index_name, '_id': 'id1', '_type': 'user'}},
{'delete': {'_index': new_index_name, '_id': 'id2', '_type': 'user'}}
]
result = self.es_proxy.delete_document(data=data, index=expected_alias)
self.assertEqual(expected_alias, result)
mock_elasticsearch.bulk.assert_called_with(expected_data)
def test_get_instance_string(self) -> None:
result = self.es_proxy._get_instance('column', 'value')
self.assertEqual('value', result)
def test_get_instance_tag(self) -> None:
result = self.es_proxy._get_instance('tags', ['value'])
tags = [Tag(tag_name='value')]
self.assertEqual(tags, result)
def test_get_instance_badge(self) -> None:
result = self.es_proxy._get_instance('badges', ['badge1'])
badges = [Tag(tag_name='badge1')]
self.assertEqual(badges, result)
@patch('search_service.proxy.elasticsearch.ElasticsearchProxy._search_helper')
def test_fetch_dashboard_search_results(self,
mock_search: MagicMock) -> None:
self.mock_dashboard_result = Dashboard(id='mode_dashboard',
uri='dashboard_uri',
cluster='gold',
group_name='mode_dashboard_group',
group_url='mode_dashboard_group_url',
product='mode',
name='mode_dashboard',
url='mode_dashboard_url',
description='test_dashboard',
last_successful_run_timestamp=1000)
mock_search.return_value = SearchResult(total_results=1,
results=[self.mock_dashboard_result])
expected = SearchResult(total_results=1,
results=[Dashboard(id='mode_dashboard',
uri='dashboard_uri',
cluster='gold',
group_name='mode_dashboard_group',
group_url='mode_dashboard_group_url',
product='mode',
name='mode_dashboard',
url='mode_dashboard_url',
description='test_dashboard',
last_successful_run_timestamp=1000)])
resp = self.es_proxy.fetch_dashboard_search_results(query_term='test_query_term',
page_index=0,
index='dashboard_search_index')
self.assertEqual(resp.total_results, expected.total_results)
self.assertDictEqual(vars(resp.results[0]),
vars(expected.results[0]),
"Search result doesn't match with expected result!")
```
|
{
"source": "jdavidls/aiomongodb",
"score": 3
}
|
#### File: aiomongodb/testsuite/basictest.py
```python
import asyncio, bson, collections, aiomongodb
async def main():
c = aiomongodb.Client()
await c.connect()
db = c.database('test')
query = db._cmd.find_one({'listCommands': 1})
async for item in query:
for command, info in item.commands.items():
print(command)
print(' ', info)
#doc = {'listCommandss':1}
#response = await c.OP_QUERY(b'test.$cmd', bson.BSON.encode(doc), 1)
#response = bson.BSON.decode(response.payload, as_class=collections.OrderedDict)
#print(response)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
|
{
"source": "jdavidls/scrap",
"score": 3
}
|
#### File: scrap/scrap/__main__.py
```python
import sys, asyncio, aiohttp, async_timeout, collections, csv
from lxml import html, etree
from tld import get_tld
from urllib.parse import urlparse
USER_AGENT = 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
'''
analizar doctype html5
soltar tabs
'''
columns =['url', 'origin', 'doctype', 'tableCount', 'error']
Row = collections.namedtuple('Row', columns)
headRow = Row(*columns)
def readFileSet(filename):
with open(filename, 'r') as f:
return set([x.strip() for x in f.readlines()])
blacklist = readFileSet('./data/blacklist.txt')
class Scrapper:
def __init__(self, loop, timeout=10):
self.loop = loop
self.timeout = timeout
self.headers = {
'User-Agent': USER_AGENT
}
async def get(self, url):
with async_timeout.timeout(self.timeout):
async with aiohttp.ClientSession(loop=self.loop, headers=self.headers) as session:
try:
async with session.get(url) as response:
## check status code
if response.status != 200:
print(url, 'response', response.status, ':', response.reason)
return
else:
try:
text = await response.text()
except Exception as e:
print(url, 'has an unicode error')
return e
try:
return html.fromstring(text)
except Exception as e:
print(url, 'has a XML/HTML parsing error')
return e
except Exception as e:
print(url, 'has a HTTP/SSL errors')
return e
async def google(scrapper, keywords, pages=50):
url = '/search?filter=0&query='+keywords
for n in range(pages):
print('GOOGLE SEARCH FOR', keywords, 'PAGE', n)
html = await scrapper.get('https://www.google.com'+url)
if isinstance(html, Exception) or html is None:
print('Error loading google page', url)
break
organicLinks = html.xpath('//h3[@class="r"]//a/@href')
for link in organicLinks:
yield link, 'organic'
# next page
url = html.xpath('//a[@id="pnnext"]/@href')
if not url: break
url = url[0]
#sleep
await asyncio.sleep(10) # IDEANTION FIX
async def bing(scrapper, keywords, pages=50):
url = '/search?q='+keywords
for n in range(pages):
html = await scrapper.get('https://www.bing.com'+url)
if isinstance(html, Exception):
print('Error loading google page', url)
continue;
organicLinks = html.xpath('//h3[@class="r"]//a/@href')
for link in organicLinks:
yield link, 'organic'
# next page
url = html.xpath('//a[@id="pnnext"]/@href')
if not url: break
url = url[0]
async def searchLoop(loop, searchEngine, keywords):
scrapper = Scrapper(loop)
pages = set()
async for link, origin in searchEngine(scrapper, keywords):
urlparts = urlparse(link)
link = '{url.scheme}://{url.netloc}'.format(url=urlparts)
tld = get_tld(link)
if tld in pages or tld in blacklist: continue
pages.add(tld)
print('Scanning', tld)
#print('scanning', link)
page = await scrapper.get(link)
if isinstance(page, Exception):
yield Row(url=link, origin=origin, doctype=None, tableCount=None, error=str(page))
else:
# cuenta el numero de tablas
doctType=page.getroottree().docinfo.doctype;
tableCount = len(page.xpath('//table'))
yield Row(url=link, origin=origin, doctype=doctType, tableCount=tableCount, error=None)
async def search(loop, keywords):
outputFilename = './data/' + keywords + '.csv'
with open(outputFilename, 'w', newline='') as csvFile:
csvWriter = csv.writer(csvFile)
csvWriter.writerow(headRow)
async for row in searchLoop(loop, google, keywords):
csvWriter.writerow(row)
loop = asyncio.get_event_loop()
loop.run_until_complete(search(loop, '+'.join(sys.argv[1:])))
loop.close()
```
|
{
"source": "jdavidrcamacho/artgpn",
"score": 3
}
|
#### File: artgpn/artgpn/node.py
```python
import numpy as np
#because it makes my life easier down the line
pi, exp, sine, cosine, sqrt = np.pi, np.exp, np.sin, np.cos, np.sqrt
class nodeFunction(object):
"""
Definition the node functions kernels of our network, by default and
because it simplifies my life, all kernels include a white noise term
"""
def __init__(self, *args):
"""
Puts all kernel arguments in an array pars
"""
self.pars = np.array(args, dtype=float)
def __call__(self, r, t1 = None, t2=None):
"""
r = t - t'
Not sure if this is a good approach since will make our life harder
when defining certain non-stationary kernels, e.g linear kernel.
"""
print('call r', r.shape)
raise NotImplementedError
def __repr__(self):
"""
Representation of each kernel instance
"""
return "{0}({1})".format(self.__class__.__name__,
", ".join(map(str, self.pars)))
##### Constant #################################################################
class Constant(nodeFunction):
"""
This kernel returns its constant argument c with white noise
Parameters:
c = constant
wn = white noise amplitude
"""
def __init__(self, c):
super(Constant, self).__init__(c)
self.c = c
self.type = 'non-stationary and anisotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return self.c * np.ones_like(r)
##### White Noise ##############################################################
class WhiteNoise(nodeFunction):
"""
Definition of the white noise kernel.
Parameters
wn = white noise amplitude
"""
def __init__(self, wn):
super(WhiteNoise, self).__init__(wn)
self.wn = wn
self.type = 'stationary'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
if r[0,:].shape == r[:,0].shape:
return self.wn**2 * np.diag(np.diag(np.ones_like(r)))
else:
return np.zeros_like(r)
##### Squared exponential ######################################################
class SquaredExponential(nodeFunction):
"""
Squared Exponential kernel, also known as radial basis function or RBF
kernel in other works.
Parameters:
ell = length-scale
wn = white noise
"""
def __init__(self, ell):
super(SquaredExponential, self).__init__(ell)
self.ell = ell
self.type = 'stationary and anisotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return exp(-0.5 * r**2 / self.ell**2)
##### Periodic #################################################################
class Periodic(nodeFunction):
"""
Definition of the periodic kernel.
Parameters:
ell = lenght scale
P = period
wn = white noise
"""
def __init__(self, P, ell):
super(Periodic, self).__init__(P, ell)
self.P = P
self.ell = ell
self.type = 'non-stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return exp( -2 * sine(pi*np.abs(r)/self.P)**2 / self.ell**2)
##### Quasi Periodic ###########################################################
class QuasiPeriodic(nodeFunction):
"""
This kernel is the product between the exponential sine squared kernel
and the squared exponential kernel, commonly known as the quasi-periodic
kernel.
Parameters:
ell_e = evolutionary time scale
P = kernel periodicity
ell_p = length scale of the periodic component
wn = white noise
"""
def __init__(self, ell_e, P, ell_p):
super(QuasiPeriodic, self).__init__(ell_e, P, ell_p)
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
self.type = 'non-stationary and anisotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
##### Rational Quadratic #######################################################
class RationalQuadratic(nodeFunction):
"""
Definition of the rational quadratic kernel.
Parameters:
alpha = weight of large and small scale variations
ell = characteristic lenght scale to define the kernel "smoothness"
wn = white noise amplitude
"""
def __init__(self, alpha, ell):
super(RationalQuadratic, self).__init__(alpha, ell)
self.alpha = alpha
self.ell = ell
self.type = 'stationary and anisotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return 1 / (1+ r**2/ (2*self.alpha*self.ell**2))**self.alpha
##### RQP kernel ###############################################################
class RQP(nodeFunction):
"""
Definition of the product between the exponential sine squared kernel
and the rational quadratic kernel that we called RQP kernel.
If I am thinking this correctly then this kernel should tend to the
QuasiPeriodic kernel as alpha increases, although I am not sure if its true.
Parameters:
ell_e and ell_p = aperiodic and periodic lenght scales
alpha = alpha of the rational quadratic kernel
P = periodic repetitions of the kernel
wn = white noise amplitude
"""
def __init__(self, alpha, ell_e, P, ell_p):
super(RQP, self).__init__(alpha, ell_e, P, ell_p)
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
self.type = 'non-stationary and anisotropic'
self.derivatives = 4 #number of derivatives in this kernel
self.params_size = 4 #number of hyperparameters
def __call__(self, r):
a = exp(- 2*sine(pi*np.abs(r)/self.P)**2 / self.ell_p**2)
b = (1+ r**2/ (2*self.alpha*self.ell_e**2))#**self.alpha
return a / (np.sign(b) * (np.abs(b)) ** self.alpha)
##### Cosine ###################################################################
class Cosine(nodeFunction):
"""
Definition of the cosine kernel.
Parameters:
P = period
wn = white noise amplitude
"""
def __init__(self, P):
super(Cosine, self).__init__(P)
self.P = P
self.type = 'non-stationary and isotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return cosine(2*pi*np.abs(r) / self.P)
##### Exponential ##############################################################
class Exponential(nodeFunction):
"""
Definition of the exponential kernel.
Parameters:
ell = characteristic lenght scale
wn = white noise amplitude
"""
def __init__(self, ell):
super(Exponential, self).__init__(ell)
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return exp(- np.abs(r)/self.ell)
##### Matern 3/2 ###############################################################
class Matern32(nodeFunction):
"""
Definition of the Matern 3/2 kernel. This kernel arise when setting
v=3/2 in the matern family of kernels
Parameters:
ell = characteristic lenght scale
wn = white noise amplitude
"""
def __init__(self, ell):
super(Matern32, self).__init__(ell)
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return (1.0 + np.sqrt(3.0)*np.abs(r)/self.ell) \
*np.exp(-np.sqrt(3.0)*np.abs(r) / self.ell)
#### Matern 5/2 ################################################################
class Matern52(nodeFunction):
"""
Definition of the Matern 5/2 kernel. This kernel arise when setting
v=5/2 in the matern family of kernels
Parameters:
ell = characteristic lenght scale
wn = white noise amplitude
"""
def __init__(self, ell):
super(Matern52, self).__init__(ell)
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return (1.0 + ( 3*np.sqrt(5)*self.ell*np.abs(r) \
+5*np.abs(r)**2)/(3*self.ell**2) ) \
*exp(-np.sqrt(5.0)*np.abs(r)/self.ell)
#### Linear ####################################################################
class Linear(nodeFunction):
"""
Definition of the Linear kernel.
c = constant
wn = white noise amplitude
"""
def __init__(self, c):
super(Linear, self).__init__(c)
self.c = c
self.type = 'non-stationary and anisotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r, t1, t2):
return (t1 - self.c) * (t2 - self.c)
##### Gamma-exponential ########################################################
class GammaExp(nodeFunction):
"""
Definition of the gamma-exponential kernel
gamma = shape parameter ( 0 < gamma <= 2)
ell = lenght scale
wn = white noise amplitude
"""
def __init__(self, gamma, ell):
super(GammaExp, self).__init__(gamma, ell)
self.gamma = gamma
self.ell = ell
self.type = 'non-stationary and anisotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return exp( - (np.abs(r)/self.ell) ** self.gamma)
##### Polinomial ###############################################################
class Polynomial(nodeFunction):
"""
Definition of the polinomial kernel
a = real value > 0
b = real value >= 0
c = integer value
wn = white noise amplitude
"""
def __init__(self, a, b, c):
super(Polynomial, self).__init__(a, b, c)
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return (self.a * t1 * t2 + self.b)**self.c
##### END
```
#### File: artgpn/artgpn/weight.py
```python
import numpy as np
#because it makes my life easier down the line
pi, exp, sine, cosine, sqrt = np.pi, np.exp, np.sin, np.cos, np.sqrt
class weightFunction(object):
"""
Definition the weight functions kernels of our network.
Kernels not fully implemented yet:
Matern32, and Matern52
"""
def __init__(self, *args):
"""
Puts all kernel arguments in an array pars
"""
self.pars = np.array(args, dtype=float)
def __call__(self, r, t1 = None, t2=None):
"""
r = t - t'
Not sure if this is a good approach since will make our life harder
when defining certain non-stationary kernels, e.g linear kernel.
"""
raise NotImplementedError
def __repr__(self):
"""
Representation of each kernel instance
"""
return "{0}({1})".format(self.__class__.__name__,
", ".join(map(str, self.pars)))
#Not working yet!
# def __minus__(self, b):
# return Minus(self, b)
# def __rminus__(self, b):
# return self.__minus__(b)
#
#
#class _operator(weightFunction):
# """
# To allow operations between two kernels
# """
# def __init__(self, k1):
# self.k1 = k1
#
# @property
# def pars(self):
# return np.append(self.k1.pars)
#
#
#class Minus(_operator):
# """
# To allow a "minus" linear kernel
# """
# def __repr__(self):
# return "-{0}".format(self.k1)
#
# def __call__(self, r):
# return -self.k1(r)
##### Constant #################################################################
class Constant(weightFunction):
"""
This kernel returns its constant argument c
Parameters:
c = constant
"""
def __init__(self, c):
super(Constant, self).__init__(c)
self.c = c
self.type = 'non-stationary and anisotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return self.c**2 * np.ones_like(r)
class dConstant_dc(Constant):
"""
Log-derivative in order to c
"""
def __init__(self, c):
super(dConstant_dc, self).__init__(c)
self.c = c
def __call__(self, r):
return 2*self.c * np.ones_like(r)
##### White Noise ##############################################################
class WhiteNoise(weightFunction):
"""
Definition of the white noise kernel.
Parameters
wn = white noise amplitude
"""
def __init__(self, wn):
super(WhiteNoise, self).__init__(wn)
self.wn = wn
self.type = 'stationary'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
if r[0,:].shape == r[:,0].shape:
return self.wn**2 * np.diag(np.diag(np.ones_like(r)))
else:
return np.zeros_like(r)
class dWhiteNoise_dwn(WhiteNoise):
"""
Log-derivative in order to the amplitude
"""
def __init__(self, wn):
super(dWhiteNoise_dwn, self).__init__(wn)
self.wn = wn
def __call__(self, r):
return 2 * self.wn**2 * np.diag(np.diag(np.ones_like(r)))
##### Squared exponential ######################################################
class SquaredExponential(weightFunction):
"""
Squared Exponential kernel, also known as radial basis function or RBF
kernel in other works.
Parameters:
weight = weight/amplitude of the kernel
ell = length-scale
"""
def __init__(self, weight, ell):
super(SquaredExponential, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and anisotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(-0.5 * r**2 / self.ell**2)
class dSquaredExponential_dweight(SquaredExponential):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dSquaredExponential_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * exp(-0.5 * r**2 / self.ell**2)
class dSquaredExponential_dell(SquaredExponential):
"""
Log-derivative in order to the ell
"""
def __init__(self, weight, ell):
super(dSquaredExponential_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return (r**2 * self.weight**2 / self.ell**2) \
* exp(-0.5 * r**2 / self.ell**2)
##### Periodic #################################################################
class Periodic(weightFunction):
"""
Definition of the periodic kernel.
Parameters:
weight = weight/amplitude of the kernel
ell = lenght scale
P = period
"""
def __init__(self, weight, P, ell):
super(Periodic, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
self.type = 'non-stationary and isotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp( -2 * sine(pi*np.abs(r)/self.P)**2 /self.ell**2)
class dPeriodic_dweight(Periodic):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, P, ell):
super(dPeriodic_dweight, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * exp(-2 * sine(pi * np.abs(r) / self.P)**2 \
/ self.ell**2)
class dPeriodic_dell(Periodic):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, P, ell,):
super(dPeriodic_dell, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
def __call__(self, r):
return (4* self.weight**2 * sine(pi * np.abs(r) / self.P)**2 \
*exp(-2 * sine(pi * np.abs(r) / self.P)**2 \
/ self.ell**2)) / self.ell**2
class dPeriodic_dP(Periodic):
"""
Log-derivative in order to P
"""
def __init__(self, weight, ell, P):
super(dPeriodic_dP, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
def __call__(self, r):
return (4 * pi * r * self.weight**2 \
* cosine(pi*np.abs(r) / self.P) *sine(pi*np.abs(r) / self.P) \
* exp(-2 * sine(pi*np.abs(r) / self.P)**2 / self.ell**2)) \
/ (self.ell**2 * self.P)
##### Quasi Periodic ###########################################################
class QuasiPeriodic(weightFunction):
"""
This kernel is the product between the exponential sine squared kernel
and the squared exponential kernel, commonly known as the quasi-periodic
kernel.
Parameters:
weight = weight/amplitude of the kernel
ell_e = evolutionary time scale
ell_p = length scale of the Periodic component
P = kernel Periodicity
"""
def __init__(self, weight, ell_e, P, ell_p):
super(QuasiPeriodic, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
self.type = 'non-stationary and anisotropic'
self.derivatives = 4 #number of derivatives in this kernel
self.params_size = 4 #number of hyperparameters
def __call__(self, r):
return self.weight**2 *exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
class dQuasiPeriodic_dweight(Periodic):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_dweight, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 2 * self.weight**2 *exp(-2 * sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
class dQuasiPeriodic_delle(QuasiPeriodic):
"""
Log-derivative in order to ell_e
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_delle, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return (r**2 * self.weight**2 / self.ell_e**2) \
*exp(-2 * sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
class dQuasiPeriodic_dP(QuasiPeriodic):
"""
Log-derivative in order to P
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_dP, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 4 * pi * r * self.weight**2 \
* cosine(pi*np.abs(r)/self.P) * sine(pi*np.abs(r)/self.P) \
* exp(-2 * sine(pi * np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2)) \
/ (self.ell_p**2 * self.P)
class dQuasiPeriodic_dellp(QuasiPeriodic):
"""
Log-derivative in order to ell_p
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_dellp, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 4 * self.weight**2 * sine(pi*r/self.P)**2 \
* exp(-2 * sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2)) / self.ell_p**2
##### Rational Quadratic #######################################################
class RationalQuadratic(weightFunction):
"""
Definition of the rational quadratic kernel.
Parameters:
weight = weight/amplitude of the kernel
alpha = weight of large and small scale variations
ell = characteristic lenght scale to define the kernel "smoothness"
"""
def __init__(self, weight, alpha, ell):
super(RationalQuadratic, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
self.type = 'stationary and anisotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return self.weight**2 / (1+ r**2/ (2*self.alpha*self.ell**2))**self.alpha
class dRationalQuadratic_dweight(RationalQuadratic):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, alpha, ell):
super(dRationalQuadratic_dweight, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 \
/ (1+ r**2/ (2*self.alpha*self.ell**2))**self.alpha
class dRationalQuadratic_dalpha(RationalQuadratic):
"""
Log-derivative in order to alpha
"""
def __init__(self, weight, alpha, ell):
super(dRationalQuadratic_dalpha, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
def __call(self, r):
return ((r**2/(2*self.alpha*self.ell**2*(r**2/(2*self.alpha*self.ell**2)+1))\
- np.log(r**2/(2*self.alpha*self.ell**2)+1)) \
* self.weight**2 * self.alpha) \
/ (1+r**2/(2*self.alpha*self.ell**2))**self.alpha
class dRationalQuadratic_dell(RationalQuadratic):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, alpha, ell):
super(dRationalQuadratic_dell, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
def __call(self, r):
return r**2 * (1+r**2/(2*self.alpha*self.ell**2))**(-1-self.alpha) \
* self.weight**2 / self.ell**2
##### RQP kernel ###############################################################
class RQP(weightFunction):
"""
Definition of the product between the exponential sine squared kernel
and the rational quadratic kernel that we called RQP kernel.
If I am thinking this correctly then this kernel should tend to the
QuasiPeriodic kernel as alpha increases, although I am not sure if we can
say that it tends to the QuasiPeriodic kernel as alpha tends to infinity.
Parameters:
weight = weight/amplitude of the kernel
ell_e and ell_p = aperiodic and periodic lenght scales
alpha = alpha of the rational quadratic kernel
P = periodic repetitions of the kernel
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(RQP, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
self.type = 'non-stationary and anisotropic'
self.derivatives = 5 #number of derivatives in this kernel
self.params_size = 5 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/ self.ell_p**2) \
/(1+ r**2/ (2*self.alpha*self.ell_e**2))**self.alpha
class dRQP_dweight(RQP):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dweight, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 2 * self.weight**2 * exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/ self.ell_p**2) \
/(1+ r**2/ (2*self.alpha*self.ell_e**2))**self.alpha
class dRQP_dalpha(RQP):
"""
Log-derivative in order to alpha
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dalpha, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call__(self, r):
return self.alpha * ((r**2 / (2*self.alpha \
*self.ell_e**2*(r**2/(2*self.alpha*self.ell_e**2)+1)) \
-np.log(r**2/(2*self.alpha*self.ell_e**2)+1)) \
*self.weight**2*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2)) \
/(1+r**2/(2*self.alpha*self.ell_e**2))**self.alpha
class dRQP_delle(RQP):
"""
Log-derivative in order to ell_e
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_delle, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call__(self, r):
return (r**2*(1+r**2/(2*self.alpha*self.ell_e**2))**(-1-self.alpha) \
*self.weight**2 \
*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2))/self.ell_e**2
class dRQP_dP(RQP):
"""
Log-derivative in order to P
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dP, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call__(self, r):
return (4*pi*r*self.weight**2*cosine(pi*np.abs(r)/self.P)*sine(pi*np.abs(r)/self.P) \
*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2)) \
/(self.ell_p**2*(1+r**2/(2*self.alpha*self.ell_e**2))^self.alpha*self.P)
class dRQP_dellp(RQP):
"""
Log-derivative in order to ell_p
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dellp, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return (4*self.weight**2*sine(pi*np.abs(r)/self.P)**2 \
*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2)) \
/(self.ell_p**2*(1+r**2/(2*self.alpha*self.ell_e**2))**self.alpha)
##### Cosine ###################################################################
class Cosine(weightFunction):
"""
Definition of the cosine kernel.
Parameters:
weight = weight/amplitude of the kernel
P = period
"""
def __init__(self, weight, P):
super(Cosine, self).__init__(weight, P)
self.weight = weight
self.P = P
self.type = 'non-stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * cosine(2*pi*np.abs(r) / self.P)
class dCosine_dweight(Cosine):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, P):
super(dCosine_dweight, self).__init__(weight, P)
self.weight = weight
self.P = P
def __call__(self, r):
return 2 * self.weight**2 * cosine(2*pi*np.abs(r) / self.P)
class dCosine_dP(Cosine):
"""
Log-derivative in order to P
"""
def __init__(self, weight, P):
super(dCosine_dP, self).__init__(weight, P)
self.weight = weight
self.P = P
def __call__(self, r):
return self.weight**2 * r * pi * sine(2*pi*np.abs(r) / self.P) / self.P
##### Laplacian ##############################################################
class Laplacian(weightFunction):
"""
Definition of the Laplacian kernel.
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Laplacian, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(- np.abs(r)/self.ell)
class dLaplacian_dweight(Laplacian):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dLaplacian_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * exp(- np.abs(r)/self.ell)
class dLaplacian_dell(Laplacian):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dLaplacian_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return -0.5 * self.weight**2 * r * exp(- np.abs(r)/self.ell) / self.ell
##### Exponential ##############################################################
class Exponential(weightFunction):
"""
Definition of the exponential kernel. This kernel arises when
setting v=1/2 in the matern family of kernels
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Exponential, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(- np.abs(r)/self.ell)
class dExponential_dweight(Exponential):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dExponential_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
raise NotImplementedError
class dExpoential_dell(Exponential):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dExpoential_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
raise NotImplementedError
##### Matern 3/2 ###############################################################
class Matern32(weightFunction):
"""
Definition of the Matern 3/2 kernel. This kernel arise when setting
v=3/2 in the matern family of kernels
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Matern32, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 *(1.0 + np.sqrt(3.0)*np.abs(r)/self.ell) \
*np.exp(-np.sqrt(3.0)*np.abs(r) / self.ell)
class dMatern32_dweight(Matern32):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dMatern32_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 *(1.0 + np.sqrt(3.0)*np.abs(r)/self.ell) \
*np.exp(-np.sqrt(3.0)*np.abs(r) / self.ell)
class dMatern32_dell(Matern32):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dMatern32_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return (sqrt(3) * r * (1+ (sqrt(3) * r) / self.ell) \
*exp(-(sqrt(3)*r) / self.ell) * self.weight**2) / self.ell \
-(sqrt(3) * r * exp(-(sqrt(3)*r) / self.ell)*self.weight**2)/self.ell
#### Matern 5/2 ################################################################
class Matern52(weightFunction):
"""
Definition of the Matern 5/2 kernel. This kernel arise when setting
v=5/2 in the matern family of kernels
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Matern52, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * (1.0 + ( 3*np.sqrt(5)*self.ell*np.abs(r) \
+5*np.abs(r)**2)/(3*self.ell**2) ) \
*exp(-np.sqrt(5.0)*np.abs(r)/self.ell)
class dMatern52_dweight(Matern52):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dMatern52_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * (1.0 + ( 3*np.sqrt(5)*self.ell*np.abs(r) \
+5*np.abs(r)**2)/(3*self.ell**2) ) \
*exp(-np.sqrt(5.0)*np.abs(r)/self.ell)
class dMatern52_dell(Matern52):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dMatern52_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return self.ell * ((sqrt(5)*r*(1+(sqrt(5)*r) \
/self.ell+(5*r**2)/(3*self.ell**2)) \
*exp(-(sqrt(5)*r)/self.ell)*self.weight**2) \
/self.ell**2 +(-(sqrt(5)*r)/self.ell**2-(10*r**2) \
/(3*self.ell**3)) \
*exp(-(sqrt(5)*r)/self.ell)*self.weight**2)
#### Linear ####################################################################
class Linear(weightFunction):
"""
Definition of the Linear kernel.
weight = weight/amplitude of the kernel
c = constant
"""
def __init__(self, weight, c):
super(Linear, self).__init__(weight, c)
self.weight = weight
self.c = c
self.type = 'non-stationary and anisotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r, t1, t2):
return self.weight**2 * (t1 - self.c) * (t2 - self.c)
class dLinear_dweight(Linear):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, c):
super(dLinear_dweight, self).__init__(weight, c)
self.weight = weight
self.c = c
def __call__(self, r, t1, t2):
return 2 * self.weight**2 * (t1 - self.c) * (t2 - self.c)
class dLinear_dc(Linear):
"""
Log-derivative in order to c
"""
def __init__(self, weight, c):
super(dLinear_dc, self).__init__(weight, c)
self.weight = weight
self.c = c
def __call__(self, r, t1, t2):
return self.c * (-t1 - t2 + 2*self.c) * self.weight**2
##### Gamma-exponential ########################################################
class GammaExp(weightFunction):
"""
Definition of the gamma-exponential kernel
weight = weight/amplitude
gamma = shape parameter ( 0 < gamma <= 2)
ell = lenght scale
"""
def __init__(self, weight, gamma, ell):
super(GammaExp, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
self.type = 'non-stationary and anisotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp( -(np.abs(r)/self.ell)**self.gamma)
class dGammaExp_dweight(Linear):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, gamma, ell):
super(dGammaExp_dweight, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
def __call__(self, r, t1, t2):
return 2 * self.weight**2 * exp( -(np.abs(r)/self.ell)**self.gamma)
class dGammaExp_dgamma(GammaExp):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, gamma, ell):
super(dGammaExp_dgamma, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
def __call__(self, r):
return -self.weight**2 * self.gamma * (np.abs(r)/self.ell)**self.gamma \
*np.log(np.abs(r)/self.ell) * exp(-(np.abs(r)/self.ell)**self.gamma)
class dGammaExp_dell(GammaExp):
"""
Log-derivative in order to gamma
"""
def __init__(self, weight, gamma, ell):
super(dGammaExp_dell, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
def __call__(self, r):
return self.weight**2 * (np.abs(r)/self.ell)**self.gamma \
* self.gamma*exp(-(np.abs(r)/self.ell)**self.gamma)
##### Polinomial ###############################################################
class Polynomial(weightFunction):
"""
Definition of the polinomial kernel
weight = weight/amplitude
a = real value > 0
b = real value >= 0
c = integer value
"""
def __init__(self, weight, a, b, c):
super(Polynomial, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * (self.a * t1 * t2 + self.b)**self.c
class dPolynomial_dweight(Polynomial):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_dweight, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return 2 * self.weight**2 * (self.a * t1 * t2 + self.b)**self.c
class dPolynomial_da(Polynomial):
"""
Log-derivative in order to a
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_da, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * self.a * self.c * t1 * t2 \
* (self.b + self.a * t1 * t2)**(self.c-1)
class dPolynomial_db(Polynomial):
"""
Log-derivative in order to b
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_db, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * self.c * self.b \
* (self.b +self.a * t1 * t2)**(self.c-1)
class dPolynomial_dc(Polynomial):
"""
Log-derivative in order to c
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_dc, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * self.c * (self.b + self.a * t1 * t2)**self.c \
* np.log(self.a * t1 * t2 + self.b)
##### END
```
#### File: examples/Sun/priors.py
```python
import numpy as np
time,rv,rverr,bis,biserr,fwhm,fwhmerr= np.loadtxt("sun50points.txt",
skiprows = 1, unpack = True,
usecols = (0,1,2,7,8,9,10))
val1, val1err, val2,val2err, val3, val3err = rv, rverr, bis, biserr, fwhm, fwhmerr
##### Setting priors #####
from scipy import stats
from loguniform import ModifiedLogUniform
stats.loguniform = stats.reciprocal
#node function
neta1 = stats.loguniform(0.1, 2*val1.ptp())
neta2 = stats.loguniform(np.ediff1d(time).mean(), time.ptp())
neta3 = stats.uniform(10, 50 -10)
neta4 = stats.loguniform(0.1, 5)
#weight function
weta1_1 = stats.loguniform(0.1, 2*val1.ptp())
weta2_1 = stats.loguniform(np.ediff1d(time).mean(), 10*time.ptp())
weta1_2 = stats.loguniform(0.1, 2*val2.ptp())
weta2_2 = stats.loguniform(np.ediff1d(time).mean(), 10*time.ptp())
weta1_3 = stats.loguniform(0.1, 2*val3.ptp())
weta2_3 = stats.loguniform(np.ediff1d(time).mean(), 10*time.ptp())
#Mean function
#(1/pi)*(1/(1+slope*slope))
slope1 = stats.norm(0, 1)
offset1 = stats.uniform(val1.min(), val1.max() -val1.min())
slope2 = stats.norm(0, 1)
offset2 = stats.uniform(val2.min(), val2.max() -val2.min())
slope3 = stats.norm(0, 1)
offset3 = stats.uniform(val3.min(), val3.max() -val3.min())
#Jitter
jitt1 = ModifiedLogUniform(0.1, 2*val1.ptp())
jitt2 = ModifiedLogUniform(0.1, 2*val2.ptp())
jitt3 = ModifiedLogUniform(0.1, 2*val3.ptp())
def priors():
return np.array([neta1, neta2, neta3, neta4,
weta1_1, weta2_1, weta1_2, weta2_2, weta1_3, weta2_3,
slope1, offset1, slope2, offset2, slope3, offset3,
jitt1, jitt2, jitt3])
```
|
{
"source": "jdavidrcamacho/gpyrn",
"score": 3
}
|
#### File: gpyrn/gpyrn/covfunc.py
```python
from gpyrn.meanfunc import array_input
import numpy as np
class covFunction():
"""
A base class for covariance functions (kernels) used for nodes and weights
in the GPRN.
"""
def __init__(self, *args):
""" Puts all kernel arguments in an array pars """
self.pars = np.array(args, dtype=float)
# self.pars[self.pars > 1e50] = 1e50
def __call__(self, r, t1=None, t2=None):
"""
r = t - t'
Not sure if this is a good approach since will make our life harder
when defining certain non-stationary kernels, e.g linear kernel.
"""
raise NotImplementedError
def __repr__(self):
""" Representation of each kernel instance """
if hasattr(self, '_param_names'):
pars = ', '.join(
[f'{p}={v}' for p, v in zip(self._param_names, self.pars)])
else:
pars = ', '.join(map(str, self.pars))
return f"{self.__class__.__name__}({pars})"
def get_parameters(self):
return self.pars
@array_input
def set_parameters(self, p):
msg = f'too few parameters for kernel {self.__class__.__name__}'
assert len(p) >= self.pars.size, msg
if len(p) > self.pars.size:
p = list(p)
self.pars = np.array(p[:self.pars.size], dtype=float)
for _ in range(self.pars.size):
p.pop(0)
return np.array(p)
else:
self.pars = p
def __add__(self, b):
return Sum(self, b)
def __radd__(self, b):
return self.__add__(b)
def __mul__(self, b):
return Multiplication(self, b)
def __rmul__(self, b):
return self.__mul__(b)
class _operator(covFunction):
""" To allow operations between two kernels """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
self.kerneltype = 'complex'
self.pars = np.r_[self.k1.pars, self.k2.pars]
class Sum(_operator):
""" To allow the sum of kernels """
def __call__(self, r):
return self.k1(r) + self.k2(r)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Multiplication(_operator):
""" To allow the multiplication of kernels """
def __call__(self, r):
return self.k1(r) * self.k2(r)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
##### Constant #################################################################
class Constant(covFunction):
"""
This kernel returns the square of its constant argument c
Parameters
----------
c: float
Constant
"""
_param_names = 'c',
_tag = 'C'
def __init__(self, c):
super(Constant, self).__init__(c)
def __call__(self, r):
c = self.pars[0]
return np.full_like(r, c**2)
##### White Noise ##############################################################
class WhiteNoise(covFunction):
"""
Definition of the white noise kernel.
Parameters
----------
wn: float
White noise amplitude
"""
_param_names = 'wn',
_tag = 'WN'
def __init__(self, wn):
super(WhiteNoise, self).__init__(wn)
def __call__(self, r):
wn = self.pars[0]
if r.ndim == 2 and r[0, :].shape == r[:, 0].shape:
return wn**2 * np.diag(np.diag(np.ones_like(r)))
return np.full_like(r, wn**2)
##### Squared exponential ######################################################
class SquaredExponential(covFunction):
"""
Squared Exponential kernel, also known as radial basis function or RBF
kernel in other works.
Parameters
----------
theta: float
Amplitude
ell: float
Length-scale
"""
_param_names = 'theta', 'ell'
_tag = 'SE'
def __init__(self, theta, ell):
super(SquaredExponential, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * np.exp(-0.5 * r**2 / self.pars[1]**2)
##### Periodic #################################################################
class Periodic(covFunction):
"""
Definition of the periodic kernel.
Parameters
----------
theta: float
Amplitude
P: float
Period
lp: float
Lenght scale
"""
_param_names = 'theta', 'P', 'lp'
_tag = 'P'
def __init__(self, theta, P, lp):
super(Periodic, self).__init__(theta, P, lp)
def __call__(self, r):
θ, P, lp = self.pars
return θ**2 * np.exp(-2 * np.sin(np.pi * np.abs(r) / P)**2 / lp**2)
##### Quasi Periodic ###########################################################
class QuasiPeriodic(covFunction):
"""
This kernel is the product between the exponential sine squared kernel
and the squared exponential kernel, commonly known as the quasi-periodic
kernel
Parameters
----------
theta: float
Amplitude
le: float
Evolutionary time scale
P: float
Kernel periodicity
lp: float
Length scale of the periodic component
"""
_param_names = 'theta', 'le', 'P', 'lp'
_tag = 'QP'
def __init__(self, theta, le, P, lp):
super(QuasiPeriodic, self).__init__(theta, le, P, lp)
def __call__(self, r):
θ, le, P, lp = self.pars
return θ**2 * np.exp(-2 * np.sin(np.pi * np.abs(r) / P)**2 / lp**2 - \
r**2 / (2 * le**2))
##### Rational Quadratic #######################################################
class RationalQuadratic(covFunction):
"""
Definition of the rational quadratic kernel.
Parameters
----------
theta: float
Amplitude of the kernel
alpha: float
Amplitude of large and small scale variations
ell: float
Characteristic lenght scale to define the kernel "smoothness"
"""
_param_names = 'theta', 'alpha', 'ell'
_tag = 'RQ'
def __init__(self, amplitude, alpha, ell):
super(RationalQuadratic, self).__init__(theta, alpha, ell)
def __call__(self, r):
θ, α, ell = self.pars
return θ**2 * (1 + 0.5 * r**2 / (α * ell**2))**(-α)
##### RQP kernel ###############################################################
class RQP(covFunction):
"""
Definition of the product between the exponential sine squared kernel and
the rational quadratic kernel that we called RQP kernel. If I am thinking
this correctly then this kernel should tend to the QuasiPeriodic kernel as
alpha increases, although I am not sure if we can say that it tends to the
QuasiPeriodic kernel as alpha tends to infinity.
Parameters
----------
theta: float
Amplitude
alpha: float
Alpha of the rational quadratic kernel
ell_e: float
Aperiodic length scale
P: float
Periodic repetitions of the kernel
ell_p: float
Periodic length scale
"""
_param_names = 'theta', 'alpha', 'ell_e', 'ell_p', 'P'
_tag = 'RQP'
def __init__(self, theta, alpha, ell_e, P, ell_p):
super(RQP, self).__init__(theta, alpha, ell_e, P, ell_p)
def __call__(self, r):
θ, α, ℓe, P, ℓp = self.pars
return θ**2 * np.exp(-2 * np.sin(np.pi * np.abs(r) / P)**2 /
ℓp**2) * (1 + r**2 / (2 * α * ℓe**2))**(-α)
##### Cosine ###################################################################
class COSINE(covFunction):
"""
Definition of the cosine kernel
Parameters
----------
theta: float
Amplitude
P: float
Period
"""
_param_names = 'theta', 'P'
_tag = 'COS'
def __init__(self, theta, P):
super(COSINE, self).__init__(theta, P)
def __call__(self, r):
return self.pars[0]**2 * np.cos(2 * np.pi * np.abs(r) / self.pars[1])
##### Laplacian ##############################################################
class Laplacian(covFunction):
"""
Definition of the Laplacian kernel
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'LAP'
def __init__(self, theta, ell):
super(Laplacian, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * np.exp(-np.abs(r) / self.pars[1])
##### Exponential ##############################################################
class Exponential(covFunction):
"""
Definition of the exponential kernel
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'EXP'
def __init__(self, theta, ell):
super(Exponential, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * np.exp(-np.abs(r) / self.pars[1])
##### Matern 3/2 ###############################################################
class Matern32(covFunction):
"""
Definition of the Matern 3/2 kernel. This kernel arise when setting
v=3/2 in the matern family of kernels
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'M32'
def __init__(self, theta, ell):
super(Matern32, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * (
1.0 + np.sqrt(3.0) * np.abs(r) / self.pars[1]) * np.exp(
-np.sqrt(3.0) * np.abs(r) / self.pars[1])
#### Matern 5/2 ################################################################
class Matern52(covFunction):
"""
Definition of the Matern 5/2 kernel. This kernel arise when setting v=5/2
in the matern family of kernels
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'M52'
def __init__(self, theta, ell):
super(Matern52, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * (
1.0 +
(3 * np.sqrt(5) * self.pars[1] * np.abs(r) + 5 * np.abs(r)**2) /
(3 * self.pars[1]**2)) * np.exp(
-np.sqrt(5.0) * np.abs(r) / self.pars[1])
#### Linear ####################################################################
class Linear(covFunction):
"""
Definition of the Linear kernel
Parameters
----------
theta: float
Amplitude (should we even have an amplitude???)
c: float
Constant
"""
def __init__(self, theta, c):
super(Linear, self).__init__(theta, c)
self.tag = 'LIN'
self.theta = theta
self.c = c
def __call__(self, r, t1, t2):
return (t1 - self.pars[1]) * (t2 - self.pars[1])
##### Gamma-exponential ########################################################
class GammaExp(covFunction):
"""
Definition of the gamma-exponential kernel
Parameters
----------
theta: float
Amplitude
gamma: float
Shape parameter ( 0 < gamma <= 2)
l: float
Lenght scale
Returns
-------
"""
def __init__(self, theta, gamma, l):
super(GammaExp, self).__init__(theta, gamma, l)
self.tag = 'GammaExp'
self.theta = theta
self.gamma = gamma
self.l = l
def __call__(self, r):
return self.pars[0]**2 *np.exp(-(np.abs(r)/self.pars[2])**self.pars[1])
##### Polynomial ###############################################################
class Polynomial(covFunction):
"""
Definition of the polynomial kernel
Parameters
----------
theta: float
Amplitude ???
a: float
Real value > 0
b: foat
Real value >= 0
c: int
Integer value
wn: float
White noise amplitude
Returns
-------
"""
def __init__(self, theta, a, b, c):
super(Polynomial, self).__init__(theta, a, b, c)
self.tag = 'POLY'
self.theta = theta
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return (self.pars[1] * t1 * t2 + self.pars[2])**self.pars[3]
##### Piecewise ################################################################
class Piecewise(covFunction):
"""
WARNING: EXPERIMENTAL KERNEL
Parameters
----------
"""
def __init__(self, eta):
super(Piecewise, self).__init__(eta)
self.eta = eta
self.type = 'unknown'
def __call__(self, r):
r = r/(0.5*self.pars[0])
piecewise = (3*np.abs(r) +1) * (1 - np.abs(r))**3
piecewise = np.where(np.abs(r)>1, 0, piecewise)
return piecewise
### END
```
|
{
"source": "jdavidrcamacho/mini-frame",
"score": 3
}
|
#### File: mini-frame/miniframe/kernels_autograd.py
```python
import autograd.numpy as np
from autograd import elementwise_grad as egrad
class kernel(object):
""" Definition the base kernel class """
is_kernel = True
def __init__(self, *args):
""" Puts all kernel arguments in an array pars """
self.pars = np.array(args)
def __call__(self, r):
""" r = t - t' """
raise NotImplementedError
def __add__(self, b):
if not hasattr(b, "is_kernel"):
return Sum(Constant(c=float(b)), self)
return Sum(self, b)
def __radd__(self, b):
return self.__add__(b)
def __mul__(self, b):
if not hasattr(b, "is_kernel"):
return Product(Constant(c=float(b)), self)
return Product(self, b)
def __rmul__(self, b):
return self.__mul__(b)
def __repr__(self):
""" Representation of each kernel instance """
return "{0}({1})".format(self.__class__.__name__,
", ".join(map(str, self.pars)))
class _operator(kernel):
""" To allow operations between two kernels """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
@property
def pars(self):
return np.append(self.k1.pars, self.k2.pars)
class Sum(_operator):
""" Sum of two kernels """
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
def __call__(self, r):
return self.k1(r) + self.k2(r)
class Product(_operator):
""" Product of two kernels """
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
def __call__(self, r):
return self.k1(r) * self.k2(r)
class Constant(kernel):
""" This kernel returns its constant argument c """
def __init__(self, c):
super(Constant, self).__init__(c)
self.c = c
def __call__(self, r):
return self.c * np.ones_like(r)
return0 = lambda self, r: np.zeros_like(r)
d_t1 = return0
d_t2 = return0
dd_t = return0
ddd_t1 = return0
ddd_t2 = return0
dddd_t = return0
class SquaredExponential(kernel):
"""
Squared Exponential kernel, also known as radial basis function
(RBF kernel) in other works.
Parameters:
ell = length-scale, lambda in the paper
"""
def __init__(self, ell):
super(SquaredExponential, self).__init__(ell)
self.ell = ell
def __call__(self, r):
return np.exp( -0.5 * (r**2)/(self.ell**2) )
def d_t1(self, r):
""" derivative with respect to t1 (or t, or ti in the paper) """
return egrad(self.__call__)(r)
def _d_t1_check(self, r):
""" hand-coded derivative to check """
f1 = r
f2 = self.ell**2
return -f1/f2 *np.exp(-0.5*f1*f1/f2)
def d_t2(self, r):
""" derivative with respect to t2 (or t', or tj in the paper) """
return -egrad(self.__call__)(r)
def _d_t2_check(self, r):
""" hand-coded derivative to check """
f1 = r
f2 = self.ell**2
return f1/f2 *np.exp(-0.5*f1*f1/f2)
# now comes the magic...
def dd_t(self, r):
""" second derivative with respect to t1 and t2 """
# same as ddSE_dt2dt1
return egrad(self.d_t2)(r)
def ddd_t1(self, r):
""" third derivative with respect to t1 """
# same as dddSE_dt2ddt1
return egrad(self.dd_t)(r)
def ddd_t2(self, r):
""" third derivative with respect to t2 """
# same as dddSE_ddt2dt1
return -egrad(self.dd_t)(r)
def dddd_t(self, r):
""" fourth derivative with respect to t1 and t2 """
# same as ddddSE_ddt2ddt1
return egrad(self.ddd_t2)(r)
class QuasiPeriodic(kernel):
"""
This kernel is the product between the exponential sine squared kernel
and the squared exponential kernel. It is known as the quasi-periodic kernel.
Equation 27 in the paper.
Parameters:
ell_p = length scale of the periodic component
ell_e = evolutionary time scale
period
"""
def __init__(self, ell_p, ell_e, period):
super(QuasiPeriodic, self).__init__(ell_p, ell_e, period)
self.ell_p = ell_p
self.ell_e = ell_e
self.period = period
def __call__(self, r):
s = np.sin(np.pi*r/self.period)**2
return np.exp( - 2*s/(self.ell_p**2) - (r*r)/(2*self.ell_e**2))
def d_t1(self, r):
""" derivative with respect to t1 (or t, or ti in the paper) """
# same as dQP_dt1
return egrad(self.__call__)(r)
def d_t2(self, r):
""" derivative with respect to t2 (or t', or tj in the paper) """
# same as dQP_dt2
return -egrad(self.__call__)(r)
def dd_t(self, r):
""" second derivative with respect to t1 and t2 """
# same as ddQP_dt2dt1
return egrad(self.d_t2)(r)
def ddd_t1(self, r):
""" third derivative with respect to t1 """
# same as dddQP_dt2ddt1
return egrad(self.dd_t)(r)
def ddd_t2(self, r):
""" third derivative with respect to t2 """
# same as dddQP_ddt2dt1
return -egrad(self.dd_t)(r)
def dddd_t(self, r):
""" fourth derivative with respect to t1 and t2 """
# same as ddddQP_ddt2ddt1
return egrad(self.ddd_t2)(r)
```
|
{
"source": "jdavidrcamacho/PyTank",
"score": 3
}
|
#### File: PyTank/pytank/classes.py
```python
import pygame
import numpy as np
class bgColor(object):
def __init__(self, color):
self.background = color
class tank(object):
def __init__(self, x, y, size, screen):
self.xinit, self.yinit, self.screen = x, y, screen
self.tankRADIUS = size
self.tankGUN = 2*size
self.tankTRACK = size
self.tankCOLOR = pygame.Color('black')
self.xfinit, self.yfinit = x+size, y+size
def showTank(self, showCOLOR = False, angle=False):
if showCOLOR:
pass
else:
showCOLOR = self.tankCOLOR
pygame.draw.circle(self.screen, showCOLOR,
(self.xinit, self.yinit), self.tankRADIUS)
pygame.draw.line(self.screen, showCOLOR,
[self.xinit, self.yinit],
[self.xinit+self.tankGUN,
self.yinit], 5)
pygame.draw.line(self.screen, showCOLOR,
[self.xinit-self.tankRADIUS-1, self.yinit-self.tankRADIUS-1],
[self.xinit+self.tankRADIUS-1, self.yinit-self.tankRADIUS-1],
10)
pygame.draw.line(self.screen, showCOLOR,
[self.xinit+self.tankRADIUS-1, self.yinit+self.tankRADIUS-1],
[self.xinit-self.tankRADIUS-1, self.yinit+self.tankRADIUS-1],
10)
if angle:
pass
# self.yinit = int(self.yinit+angle)
# pygame.draw.line(self.screen, showCOLOR,
# [self.xinit, self.yinit],
#
# [self.xinit+self.tankGUN,
# self.yinit], 5)
def updateTank(self, bgCOLOR, pressed = False, mouse = False):
if pressed:
self.showTank(bgCOLOR)
if pressed == 100:
self.xinit = self.xinit+5
if pressed == 97:
self.xinit = self.xinit-5
self.showTank(self.tankCOLOR, self.xinit)
if mouse:
self.showTank(bgCOLOR)
angle = np.tanh((self.yinit-mouse)/(self.xinit+self.tankRADIUS))
angle = angle*180/np.pi
self.showTank(self.tankCOLOR, angle)
else:
self.showTank(bgCOLOR)
self.showTank(self.tankCOLOR)
```
|
{
"source": "jdavidrcamacho/SWEET-Cat",
"score": 4
}
|
#### File: jdavidrcamacho/SWEET-Cat/newline_clear.py
```python
import argparse
def _parse():
""" Calculate the surface gravity from M and R """
p = argparse.ArgumentParser(description = 'Remove new line from last line of file.')
p.add_argument('filein', help = 'file to edit last line', type = str)
return p.parse_args()
def remove_newline_last_line(filein):
""" Remove new line from last line of file """
f = open(filein, 'r')
d = f.read()
f.close()
d = d[:-1]
open(filein, 'w').write(d)
return
def main():
args = _parse()
remove_newline_last_line(args.filein)
if __name__ == '__main__':
main()
```
#### File: jdavidrcamacho/SWEET-Cat/ParallaxSpec.py
```python
import numpy as np
def bolcor(teff):
"""
Calculate the bolometric correction, given the temperature
"""
lteff = np.log10(teff)
bcflow=np.nan
if lteff < 3.7:
bcflow = -0.190537291496456e+05 + 0.155144866764412e+05 * lteff -\
0.421278819301717e+04 * (lteff * lteff) +\
0.381476328422343e+03 * (lteff*lteff*lteff)
if lteff >= 3.7 and lteff < 3.9:
bcflow = -0.370510203809015e+05 + 0.385672629965804e+05 * lteff -\
0.150651486316025e+05 * (lteff * lteff) +\
0.261724637119416e+04 * (lteff*lteff*lteff) -\
0.170623810323864e+03 * (lteff * lteff * lteff * lteff)
else:
bcflow = -0.118115450538963e+06 + 0.137145973583929e+06 * lteff -\
0.636233812100225e+05 * (lteff * lteff) +\
0.147412923562646e+05 * (lteff * lteff * lteff) -\
0.170587278406872e+04 * (lteff * lteff * lteff * lteff) +\
0.788731721804990e+02 * (lteff * lteff * lteff * lteff * lteff)
return bcflow
def parallax(teff,eteff, logg,elogg,vmag,evmag, mass,emass, Av,eAv):
"""
Calculate the parallax, given the mass Santos 2004
"""
if eteff=='NULL' or elogg=='NULL' or evmag=='NULL' or emass=='NULL' or eAv=='NULL':
bcflow = bolcor(teff)
return 10.**((logg - 4.44 - np.log(mass)/np.log(10.) - 4.*np.log(teff) / np.log(10.) + \
4.*np.log(5777.)/np.log(10.) - 0.4*(vmag + bcflow-Av) - 0.11) * 0.5) * 1000, np.nan
ntrials = 10000
randomteff = float(teff) + float(eteff)*np.random.randn(ntrials)
randomlogg = float(logg) + float(elogg)*np.random.randn(ntrials)
randommass = abs(mass + emass*np.random.randn(ntrials))
randomvmag = float(vmag) + float(evmag)*np.random.randn(ntrials)
randomAv = Av + eAv*np.random.randn(ntrials)
par=[]
for i in range(ntrials):
bcflow = bolcor(randomteff[i])
par.append(10.**((randomlogg[i] - 4.44 - np.log(randommass[i])/np.log(10.) \
- 4.*np.log(randomteff[i]) / np.log(10.) + 4.*np.log(5777.)/np.log(10.) - 0.4*\
(randomvmag[i] + bcflow-randomAv[i]) - 0.11) * 0.5) * 1000)
sig=np.sqrt(np.sum((par - np.mean(par))**2) / (ntrials - 1))
return np.mean(par),sig
```
|
{
"source": "j-davies-astro/tangos",
"score": 2
}
|
#### File: tangos/core/creator.py
```python
from __future__ import absolute_import
from __future__ import print_function
from sqlalchemy import Column, Integer, DateTime, Text
from sqlalchemy.orm import Session
from sqlalchemy import event
from . import Base, get_default_session
_current_creator = None
class Creator(Base):
__tablename__ = 'creators'
id = Column(Integer, primary_key=True)
command_line = Column(Text)
dtime = Column(DateTime)
host = Column(Text)
username = Column(Text)
cwd = Column(Text)
def __repr__(self):
return "<Creator " + self.username + " on " + self.host + " @ " + self.dtime.strftime("%d/%m/%y %H:%M") + " via " + self.command_line.split(" ")[0].split("/")[-1] + ">"
def __init__(self, argv=None):
import socket
import getpass
import datetime
import os
if argv == None:
import sys
argv = sys.argv
self.command_line = " ".join(argv)
self.host = socket.gethostname()
self.username = getpass.getuser()
self.dtime = datetime.datetime.now()
self.cwd = os.getcwd()
def print_info(self):
run = self
print("*" * 60)
print("Run ID = ", self.id)
print("Command line = ", self.command_line)
print("Host = ", self.host)
print("Username = ", self.username)
print("Time = ", self.dtime.strftime("%d/%m/%y %H:%M"))
if len(run.simulations) > 0:
print(">>> ", len(run.simulations), "simulations")
if run.timesteps.count() > 0:
print(">>> ", (run.timesteps).count(), "timesteps")
if run.halos.count() > 0:
print(">>> ", run.halos.count(), "halos")
if run.halolinks.count() > 0:
print(">>> ", run.halolinks.count(), "halolinks")
if run.properties.count() > 0:
print(">>> ", run.properties.count(), "halo properties")
if run.simproperties.count() > 0:
print(">>> ", run.simproperties.count(), "simulation properties")
def get_creator(session=None):
"""Get a Creator object for this process, for the specified session.
If session is None, return the object for the default session."""
global _current_creator
_ensure_current_creator_is_valid()
if session is None:
session = get_default_session()
if session is get_default_session():
return _current_creator
else:
return session.query(Creator).filter_by(id=_current_creator.id).first()
def _ensure_current_creator_is_valid():
from sqlalchemy import inspect
global _current_creator
default_session = get_default_session()
if _current_creator is None:
_current_creator = Creator()
default_session.add(_current_creator)
default_session.commit()
else:
current_creator_session = Session.object_session(_current_creator)
if current_creator_session is not default_session:
if not inspect(_current_creator).persistent:
current_creator_session.commit()
with default_session.no_autoflush:
_current_creator = default_session.query(Creator).filter_by(id=_current_creator.id).first()
assert inspect(_current_creator).persistent
def get_creator_id():
return get_creator().id
def set_creator(creator):
"""Set the Creator object to be used in all writes during the lifetime of the current process.
A Creator object is normally constructed automatically, but this function allows all future writes to be
associated with a different Creator. This is mainly used by MPI runs to give the illusion that all data was
written by one process."""
global _current_creator
_current_creator = creator
```
#### File: tangos/core/dictionary.py
```python
from __future__ import absolute_import
import sqlalchemy, sqlalchemy.exc
from sqlalchemy import Column, Integer, String
from . import Base, get_default_session
_dict_id = {} # maps dictionary text -> database ID
_dict_obj = {} # maps session, dictionary text -> database object
class DictionaryItem(Base):
__tablename__ = 'dictionary'
id = Column(Integer, primary_key=True)
text = Column(String(128), unique=True)
def __repr__(self):
return "<DictionaryItem " + self.text + ">"
def __init__(self, text):
self.text = text
def providing_class(self, handler):
from .. import properties
return properties.providing_class(self.text, handler)
raise_exception = object()
def get_dict_id(text, default=raise_exception, session=None, allow_query=True):
"""Get a DictionaryItem id for text (possibly cached). Raises KeyError if
no dictionary object exists for the specified text, unless a default is provided
in which case the default value is returned instead."""
from . import Session
if session is None:
session = Session()
_dict_id = _get_dict_cache_for_session(get_default_session())
close_session=True
else:
_dict_id = _get_dict_cache_for_session(session)
close_session=False
try:
return _dict_id[text]
except KeyError:
if allow_query:
try:
obj = session.query(DictionaryItem).filter_by(text=text).first()
except:
if default is raise_exception:
raise
else:
return default
finally:
if close_session:
session.close()
else:
obj = None
if obj is None:
if default is raise_exception:
raise
else:
return default
_dict_id[text] = obj.id
return obj.id
def get_or_create_dictionary_item(session, name):
"""This tries to get the DictionaryItem corresponding to name from
the database. If it doesn't exist, it creates a pending
object. Note that this must be called *while the database is
locked under the specified session* to prevent duplicate items
being created"""
if session not in _dict_obj:
_dict_obj[session] = {}
# try to get it from the cache
obj = _dict_obj[session].get(name, None)
if obj is not None:
return obj
# try to get it from the db
obj = session.query(DictionaryItem).filter_by(text=name).first()
if obj is None:
# try to create it
try:
obj = DictionaryItem(name)
session.add(obj)
#session.commit()
except sqlalchemy.exc.IntegrityError:
session.rollback()
obj = session.query(DictionaryItem).filter_by(text=name).first()
if obj is None:
raise # can't get it from the DB, can't create it from the DB... who knows...
_dict_obj[session][name] = obj
return obj
def _get_dict_cache_for_session(session):
session_dict = _dict_id.get(session, None)
if session_dict is None:
session_dict = {}
for dict_item in session.query(DictionaryItem):
session_dict[dict_item.text] = dict_item.id
_dict_id[session] = session_dict
return session_dict
def get_lexicon(session):
"""Get a list of all strings known in the dictionary table"""
dict_cache = _get_dict_cache_for_session(session)
return dict_cache.keys()
```
#### File: tangos/core/halo.py
```python
from __future__ import absolute_import
import weakref
import numpy as np
from sqlalchemy import Column, Integer, ForeignKey, orm, types
from sqlalchemy.orm import relationship, backref, Session
from sqlalchemy.ext.declarative import declared_attr
from . import extraction_patterns
from . import Base
from . import creator
from .dictionary import get_dict_id, get_or_create_dictionary_item
from .timestep import TimeStep
import six
class UnsignedInteger(types.TypeDecorator):
"""Stores an unsigned int64 as a signed int64"""
impl = types.Integer
cache_ok = True
def process_bind_param(self, value, dialect):
return np.uint64(value).astype(np.int64)
def process_result_value(self, value, dialect):
if value is not None:
if type(value) is bytes:
return np.frombuffer(value, dtype=np.uint64)[0]
else:
return np.int64(value).astype(np.uint64)
else:
return None
class SimulationObjectBase(Base):
__tablename__= "halos"
id = Column(Integer, primary_key=True) #the unique ID value of the database object created for this halo
halo_number = Column(Integer) #by default this will be the halo's rank in terms of particle count
finder_id = Column(UnsignedInteger) #raw halo ID from the halo catalog
finder_offset = Column(Integer) #index of halo within halo catalog, primary identifier used when reading catalog/simulation data
timestep_id = Column(Integer, ForeignKey('timesteps.id'))
timestep = relationship(TimeStep, backref=backref(
'objects', order_by=halo_number, cascade_backrefs=False, lazy='dynamic'), cascade='')
NDM = Column(Integer)
NStar = Column(Integer)
NGas = Column(Integer)
creator = relationship(creator.Creator, backref=backref(
'halos', cascade_backrefs=False, lazy='dynamic'), cascade='save-update')
creator_id = Column(Integer, ForeignKey('creators.id'))
object_typecode = Column(Integer, nullable=False,
name='halo_type') # name for backwards compatibility
tag = "abstract_base_class_for_halos_etc" # tag will be halo, bh, tracker etc. Don't use the base class!
__mapper_args__ = {
'polymorphic_identity':-1,
'polymorphic_on':object_typecode
}
@classmethod
def _all_subclasses(cls):
for c in cls.__subclasses__():
yield c
for c_sub in c._all_subclasses():
yield c_sub
@staticmethod
def class_from_tag(match_tag):
match_tag = match_tag.lower()
for c in SimulationObjectBase._all_subclasses():
if match_tag == c.tag.lower():
return c
raise ValueError("Unknown object type %r"%match_tag)
@staticmethod
def object_typecode_from_tag(match_tag):
if isinstance(match_tag, six.string_types):
return SimulationObjectBase.class_from_tag(match_tag).__mapper_args__['polymorphic_identity']
else:
return match_tag
@staticmethod
def object_typetag_from_code(typecode):
for c in SimulationObjectBase._all_subclasses():
if c.__mapper_args__['polymorphic_identity'] == typecode:
return c.tag
raise ValueError("Unknown object typecode %d",typecode)
def __init__(self, timestep, halo_number, finder_id, finder_offset, NDM, NStar, NGas, object_typecode=None):
self.timestep = timestep
self.halo_number = int(halo_number)
self.finder_id = int(finder_id)
self.finder_offset = int(finder_offset)
self.NDM = int(NDM)
self.NStar = int(NStar)
self.NGas = int(NGas)
if object_typecode is not None:
self.object_typecode = int(object_typecode)
self.init_on_load()
self.creator = creator.get_creator(Session.object_session(timestep))
@orm.reconstructor
def init_on_load(self):
self._dict_is_complete = False
self._d = {}
def __repr__(self):
return "<%s %r | NDM=%d Nstar=%d Ngas=%d>"%(self.__class__.__name__, self.path, self.NDM, self. NStar, self.NGas)
@property
def basename(self):
return self.tag+"_"+str(self.halo_number)
@property
def path(self):
return self.timestep.path+"/"+self.basename
@property
def handler(self):
if not hasattr(self, "_handler"):
self._handler = self.timestep.simulation.get_output_handler()
return self._handler
@property
def handler_class(self):
if not hasattr(self, "_handler_class"):
self._handler_class = self.timestep.simulation.output_handler_class
return self._handler_class
def load(self, mode=None):
"""Load the data for this halo, if it is present on this computer's filesystem.
:param mode: the load mode to pass to the relevant input handler. For example with the pynbody input
handler this can be None or 'partial' (in a normal session) and, when running inside an MPI session,
'server' or 'server-partial'. See https://pynbody.github.io/tangos/mpi.html.
"""
halo_number = self.halo_number
if not hasattr(self, "finder_id"):
finder_id = self.halo_number # backward compatibility
else:
finder_id = self.finder_id
if not hasattr(self, "finder_offset"):
finder_offset = finder_id
else:
finder_offset = self.finder_offset
return self.handler.load_object(self.timestep.extension, finder_id, finder_offset, object_typetag=self.tag, mode=mode)
def calculate(self, calculation, return_description=False):
"""Use the live-calculation system to calculate a user-specified function of the stored data.
See the data exploration tutorials at https://pynbody.github.io/tangos/data_exploration.html
for an introduction to the system.
:param calculation: the calculation (or a string representation of it to be parsed)
:param return_description: if True, return both the value and the PropertyCalculation class describing it.
:returns: The result of the calculation, or a tuple containing the result and the description if
return_description is True.
"""
from .. import live_calculation
calculation = live_calculation.parser.parse_property_name_if_required(calculation)
(value,), description = calculation.values_sanitized_and_description([self], Session.object_session(self))
if len(value)==1:
retval = value[0]
else:
retval = value
if return_description:
return retval, description
else:
return retval
def __getitem__(self, key):
"""Highest-level method for retrieving data or link"""
return self.get_data(key)
def get(self, key, default=None):
"""Highest-level method for retrieving data or link, with default return value if no data is found
under the specified key"""
try:
return self.get_data(key)
except KeyError:
return default
def get_data(self, key, raw=False, always_return_array=False):
"""High-level data access to the property or linked halo named by key.
:param key: string with the name of the property or link
:param raw: if True, get the raw data rather than attempt to reassemble the data into a science-ready form
:param always_return_array: if True, always return the data as a list of values, even if there is only one
stored property with the specified name
"""
if raw:
getters=[extraction_patterns.HaloPropertyRawValueGetter()]
else:
getters=[extraction_patterns.HaloPropertyValueGetter()]
getters+=[extraction_patterns.HaloLinkTargetGetter()]
return_data = self.get_objects(key, getters)
if (not always_return_array) and len(return_data) == 1:
return_data = return_data[0]
return return_data
def get_objects(self, key, getters=None):
"""Get objects belonging to this halo named by the specified key.
Compared to get_data, this allows access to the underlying HaloProperty or HaloLink objects, or to perform
custom processing of the data by specifying particular extraction patterns to getters. For more information,
see halo_data_extraction_patterns."""
if getters is None:
getters = [extraction_patterns.HaloPropertyGetter(), extraction_patterns.HaloLinkGetter()]
from . import Session
session = Session.object_session(self)
key_id = get_dict_id(key, session=session)
ret_values = []
for g in getters:
ret_values += g.get(self, key_id, session)
if len(ret_values) == 0:
raise KeyError("No such property %r" % key)
return ret_values
def get_description(self, key, getters=None):
"""Get a description of a named property or link, in the form of an object capable of calculating it.
This can be helpful to extract meta-data such as the size of an image or steps of an array."""
if getters is None:
getters = [extraction_patterns.HaloPropertyGetter(), extraction_patterns.HaloLinkGetter()]
object = self.get_objects(key, getters)[0]
return object.description
def __setitem__(self, key, obj):
if isinstance(obj, SimulationObjectBase):
self._setitem_one_halo(key, obj)
elif hasattr(obj, '__len__') and all([isinstance(x,SimulationObjectBase) for x in obj]):
self._setitem_multiple_halos(key, obj)
else:
self._setitem_property(key, obj)
def _setitem_property(self, key, obj):
from . import Session
from .halo_data import HaloProperty
session = Session.object_session(self)
key = get_or_create_dictionary_item(session, key)
X = self.properties.filter_by(name_id=key.id).first()
if X is not None:
X.data = obj
X.creator = creator.get_creator(session)
else:
X = HaloProperty(self, key, obj)
X.creator = creator.get_creator(session)
session.add(X)
session.commit()
def _setitem_one_halo(self, key, obj):
from . import Session, HaloLink
session = Session.object_session(self)
key = get_or_create_dictionary_item(session, key)
X = self.links.filter_by(halo_from_id=self.id, relation_id=key.id).first()
if X is None:
X = HaloLink(self, obj, key)
X.creator = creator.get_creator(session)
session.add(X)
else:
X.halo_to = obj
X.creator = creator.get_creator(session)
session.commit()
def _setitem_multiple_halos(self, key, obj):
from . import Session
from .halo_data import HaloLink
session = Session.object_session(self)
key = get_or_create_dictionary_item(session, key)
self.links.filter_by(halo_from_id=self.id, relation_id=key.id).delete()
links = [HaloLink(self, halo_to, key) for halo_to in obj]
session.add_all(links)
session.commit()
def keys(self, getters = None):
if getters is None:
getters = [extraction_patterns.HaloPropertyGetter(), extraction_patterns.HaloLinkGetter()]
from . import Session
names = []
session = Session.object_session(self)
for g in getters:
names+=g.keys(self, session)
return names
def __contains__(self, item):
return item in list(self.keys())
@property
def earliest(self):
if not hasattr(self, '_earliest'):
from .. import relation_finding
strategy = relation_finding.MultiHopMajorProgenitorsStrategy(self,order_by=['time_asc'],include_startpoint=True)
self._earliest=strategy.first()
return self._earliest
@property
def latest(self):
if not hasattr(self, '_latest'):
from .. import relation_finding
strategy = relation_finding.MultiHopMajorDescendantsStrategy(self,order_by=['time_desc'],include_startpoint=True)
self._latest=strategy.first()
return self._latest
def plot(self, name, *args, **kwargs):
from . import Session
name_id = get_dict_id(name, Session.object_session(self))
data = self.properties.filter_by(name_id=name_id).first()
return data.plot(*args, **kwargs)
def calculate_for_descendants(self, *plist, **kwargs):
"""Run the specified calculations on this halo and its descendants
Each argument is a string (or an instance of live_calculation.Calculation), following the syntax
described in live_calculation.md.
*kwargs*:
:param nmax: The maximum number of descendants to consider (default 1000)
:param strategy: The class to use to find the descendants (default relation_finding.MultiHopMajorDescendantsStrategy)
"""
from .. import live_calculation
from .. import relation_finding
from .. import temporary_halolist as thl
from . import Session
from .. import query as db_query
nmax = kwargs.get('nmax',1000)
strategy = kwargs.get('strategy', relation_finding.MultiHopMajorDescendantsStrategy)
strategy_kwargs = kwargs.get('strategy_kwargs', {})
if isinstance(plist[0], live_calculation.Calculation):
property_description = plist[0]
else:
property_description = live_calculation.parser.parse_property_names(*plist)
# must be performed in its own session as we intentionally load in a lot of
# objects with incomplete lazy-loaded properties
session = Session()
try:
with strategy(db_query.get_halo(self.id, session), nhops_max=nmax,
include_startpoint=True, **strategy_kwargs).temp_table() as tt:
raw_query = thl.halo_query(tt)
query = property_description.supplement_halo_query(raw_query)
results = query.all()
return property_description.values_sanitized(results, Session.object_session(self))
finally:
session.close()
def calculate_for_progenitors(self, *plist, **kwargs):
"""Run the specified calculations on the progenitors of this halo
For more information see calculate_for_descendants.
"""
from .. import relation_finding
kwargs['strategy'] = relation_finding.MultiHopMajorProgenitorsStrategy
return self.calculate_for_descendants(*plist, **kwargs)
def reverse_property_cascade(self, *args, **kwargs):
"""The old alias for calculate_for_progenitors, retained for compatibility"""
return self.calculate_for_progenitors(*args, **kwargs)
def property_cascade(self, *args, **kwargs):
"""The old alias for calculate_for_descendants, retained for compatibility"""
return self.calculate_for_descendants(*args, **kwargs)
@property
def next(self):
if not hasattr(self, '_next'):
from .. import relation_finding
strategy = relation_finding.HopMajorDescendantStrategy(self)
self._next=strategy.first()
return self._next
@property
def previous(self):
if not hasattr(self, '_previous'):
from .. import relation_finding
strategy = relation_finding.HopMajorProgenitorStrategy(self)
self._previous=strategy.first()
return self._previous
def short(self):
return "<Halo " + str(self.halo_number) + " of ...>"
class Halo(SimulationObjectBase):
__mapper_args__ = {
'polymorphic_identity': 0
}
tag = "halo"
class Tracker(SimulationObjectBase):
__mapper_args__ = {
'polymorphic_identity':3
}
tag = "tracker"
def __init__(self, timestep, halo_number):
super(Tracker, self).__init__(timestep, halo_number, halo_number, halo_number, 0,0,0,
self.__mapper_args__['polymorphic_identity'])
@property
def tracker(self):
return self.timestep.simulation.trackers.filter_by(halo_number=self.halo_number).first()
def load(self, mode=None):
handler = self.timestep.simulation.get_output_handler()
return handler.load_tracked_region(self.timestep.extension, self.tracker, mode=mode)
class BH(Tracker):
__mapper_args__ = {
'polymorphic_identity': 1
}
tag = "BH"
class Group(SimulationObjectBase):
__mapper_args__ = {
'polymorphic_identity':2
}
tag = "group"
def __init__(self, *args):
super(Group, self).__init__(*args)
self.object_typecode = 2
class PhantomHalo(SimulationObjectBase):
__mapper_args__ = {
'polymorphic_identity': 4
}
tag = "phantom"
def __init__(self, timestep, halo_number, finder_id):
super(PhantomHalo, self).__init__(timestep, halo_number, finder_id, finder_id, 0,0,0,
self.__mapper_args__['polymorphic_identity'])
TimeStep.halos = orm.relationship(Halo, lazy='dynamic',viewonly=True,
order_by=SimulationObjectBase.halo_number)
TimeStep.trackers = orm.relationship(Tracker,lazy='dynamic',viewonly=True,
order_by=Tracker.halo_number)
TimeStep.bhs = orm.relationship(BH, lazy='dynamic',viewonly=True,
order_by=BH.halo_number)
TimeStep.groups = orm.relationship(Group, lazy='dynamic',viewonly=True,
order_by=Group.halo_number)
TimeStep.phantoms = orm.relationship(PhantomHalo, lazy='dynamic',viewonly=True,
order_by=PhantomHalo.halo_number)
```
#### File: tangos/relation_finding/multi_source.py
```python
from __future__ import absolute_import
from .. import core
from ..util import consistent_collection
from .multi_hop import MultiHopStrategy
from .one_hop import HopStrategy
from six.moves import range
from six.moves import zip
import sqlalchemy
from sqlalchemy import orm
class MultiSourceMultiHopStrategy(MultiHopStrategy):
"""A variant of MultiHopStrategy that finds halos corresponding to multiple start points.
Note that the behaviour is necessarily somewhat different to the other classes which start from a single
halo. Specifically, a target *must* be specified, and the direction of the hops to follow is inferred
from the nature of the target.
Additionally, as soon as any halo is "matched" in the target, the entire query is stopped. In other words,
this class assumes that the number of hops is the same to reach all target halos."""
def __init__(self, halos_from, target, **kwargs):
"""Construct a strategy for finding Halos via multiple "hops" along HaloLinks from multiple start-points
:param halos_from: a list of all halos to start from.
:param target: a TimeStep or Simulation object to target.
:param one_match_per_input: if True (default), return one halo per starting point in order.
The returned halo in each case should be the one with the
highest weight link (i.e. the major progenitor or similar)
if False, *all* linked halos are returned and the caller has to figure out
which one belongs to which starting halo, e.g. by calling sources()
Other parameters are passed onto an underlying MultiHopStrategy. However note that the order_by parameter
has no effect unless one_match_per_input is False.
"""
directed = kwargs.get("directed", self._infer_direction(halos_from, target))
kwargs["directed"] = directed
kwargs["target"] = target
self._return_only_highest_weights = kwargs.pop('one_match_per_input', True)
# For 'backwards' or 'forwards' searches (basically major progenitors or descendants), keep only the
# strongest link at each _step_ rather than waiting to the end to select the highest weight.
# This makes sure one never "hops" from one branch to another (see
# test_hop_strategy.test_major_progenitor_from_minor_progenitor for an example that exposes
# this former bug). The actual implementation of the per-step restriction is in the override to
# _supplement_halolink_query_with_filter, below.
self._keep_only_highest_weights_per_hop = (directed == "forwards" or directed == "backwards")
self._keep_only_highest_weights_per_hop&=self._return_only_highest_weights | (target is None)
super(MultiSourceMultiHopStrategy, self).__init__(halos_from[0], **kwargs)
self._all_halo_from = halos_from
def _infer_direction(self, halos_from, target):
if isinstance(target, core.simulation.Simulation):
return "across"
elif isinstance(target, core.timestep.TimeStep):
collected_halos = consistent_collection.ConsistentCollection(halos_from)
if collected_halos.timestep.simulation_id!=target.simulation_id:
return "across"
elif collected_halos.timestep.time_gyr<target.time_gyr:
return "forwards"
else:
return "backwards"
def _seed_temp_table(self):
insert_dictionaries = []
for i,halo_from in enumerate(self._all_halo_from):
insert_dictionaries.append({'halo_from_id': halo_from.id, 'halo_to_id': halo_from.id, 'weight': 1.0,
'nhops': 0, 'source_id': i})
self._connection.execute(self._table.insert(), insert_dictionaries)
def _generate_next_level_prelim_links(self, from_nhops=0):
if self._should_halt():
return 0
else:
return super(MultiSourceMultiHopStrategy, self)._generate_next_level_prelim_links(from_nhops)
def _supplement_halolink_query_with_filter(self, query, table=None):
query = super(MultiSourceMultiHopStrategy, self)._supplement_halolink_query_with_filter(query,table)
if self._keep_only_highest_weights_per_hop:
query = self._extract_max_weight_rows_from_query(query, table)
return query
def _extract_max_weight_rows_from_query(self, query, table):
from ..util.sql_argmax import argmax
return argmax(query, table.c.weight,
[table.c.halo_from_id, table.c.source_id])
def _should_halt(self):
# should halt if there are some results available. We can tell this with the parent classes's
# _generate_query. (Our own _generate_query always returns one result per source_id.)
return super()._generate_query(True).count()>0
def _order_by_clause(self, halo_alias, timestep_alias):
if self._return_only_highest_weights:
return [] # _return_only_highest weights is already ordered
else:
return [self._link_orm_class.source_id] \
+ super()._order_by_clause(halo_alias, timestep_alias)
def all(self):
results = self._get_query_all()
if self._return_only_highest_weights:
# query results include a source_id column which we now wish to ignore (see
# _query_ordered below for more information)
return [x[1].halo_to if x[1] is not None else None
for x in results]
else:
return [x.halo_to for x in results]
def sources(self):
"""Returns the offset in the original list that generated each result returned by all().
For example, if the class is constructed for two halos, but the results have two results for
the first halo, sources() will return [0,0,1]."""
results = self._get_query_all()
if self._return_only_highest_weights:
return [x[0] for x in results]
else:
return [x.source_id for x in results]
def _generate_query(self, halo_ids_only):
if self._return_only_highest_weights:
# need to find the highest weight result for each source_id
subquery = self._extract_max_weight_rows_from_query(super()._generate_query(halo_ids_only=False),
self._table).subquery()
# we now need to restructure the results such that they appear in the same order as the input
# halo list, and a NULL result is returned for 'missing' halos. This way, the result is
# guaranteed to be in 1-1 correspondence with the input.
#
# We do this by joining to the initial seeds in the temp table
subquery_orm_alias = orm.aliased(self._link_orm_class, subquery)
source_ids = orm.aliased(self._link_orm_class)
if halo_ids_only:
# if going straight into a temptable, we only want to see the halo_to_id
query = self.session.query(subquery_orm_alias.halo_to_id).select_from(source_ids)
else:
# if going back to SQLAlchemy ORM, we explicitly include the source_id, otherwise the sqlalchemy dedup
# process removes duplicates rows (e.g. if there are several null results, only the
# first will be returned!) resulting in a query result that is too short and unrecoverable
# errors in functions that rely on getting back a 1-1 mapping
query = self.session.query(source_ids.source_id,subquery_orm_alias).\
select_from(source_ids)
query = query.filter(source_ids.nhops==0).order_by(source_ids.source_id)
query = query.outerjoin(subquery_orm_alias,
source_ids.source_id==subquery_orm_alias.source_id)
if not halo_ids_only:
query = query.options(orm.joinedload(subquery_orm_alias.halo_to))
else:
query = super()._generate_query(halo_ids_only)
return query
class MultiSourceAllMajorProgenitorsStrategy(MultiSourceMultiHopStrategy):
def __init__(self, halos_from, **kwargs):
super().__init__(halos_from, None, one_match_per_input=False, directed='backwards',
include_startpoint=True)
def _should_halt(self):
return False
class MultiSourceAllMajorDescendantsStrategy(MultiSourceMultiHopStrategy):
def __init__(self, halos_from, **kwargs):
super().__init__(halos_from, None, one_match_per_input=False, directed='forwards',
include_startpoint=True)
def _should_halt(self):
return False
```
#### File: tangos/relation_finding/one_hop.py
```python
from __future__ import absolute_import
import sqlalchemy
import sqlalchemy.exc
import sqlalchemy.orm
import sqlalchemy.orm.dynamic
import sqlalchemy.orm.query
from sqlalchemy.orm import Session, contains_eager
from .. import core, temporary_halolist
class HopStrategy(object):
"""HopStrategy and its descendants define methods helpful for finding related halos, e.g. progenitors/descendants,
or corresponding halos in other simulation runs"""
def __init__(self, halo_from, target=None, order_by=None):
"""Construct a HopStrategy starting from the specified halo"""
assert isinstance(halo_from, core.halo.SimulationObjectBase)
self.session = Session.object_session(halo_from)
self.halo_from = halo_from
self._initialise_order_by(order_by)
self._link_orm_class = core.halo_data.HaloLink
self._target = target
self._all = None
def _target_timestep(self, query, ts):
"""Only return those hops which reach the specified timestep"""
if ts is None:
query = query.filter(0 == 1)
else:
query = query.join(self._link_orm_class.halo_to).filter(core.halo.SimulationObjectBase.timestep_id == ts.id)
return query
def _target_simulation(self, query, sim):
"""Only return those hops which reach the specified simulation"""
query = query.join(self._link_orm_class.halo_to).join(core.SimulationObjectBase.timestep).filter(
core.timestep.TimeStep.simulation_id == sim.id)
return query
def _filter_query_for_target(self, query, db_obj):
"""Only return those hops which reach the specifid simulation or timestep"""
if db_obj is None:
return query
elif isinstance(db_obj, core.timestep.TimeStep):
return self._target_timestep(query, db_obj)
elif isinstance(db_obj, core.simulation.Simulation):
return self._target_simulation(query, db_obj)
else:
raise ValueError("Unknown target type")
def _initialise_order_by(self, names):
"""Specify an ordering for the output hop suggestions.
Accepted names are:
- 'weight' - the weight of the link, ascending (default). In the case of MultiHopStrategy, this is the
product of the weights along the path found.
- 'time_asc' - the time of the snapshot, ascending order
- 'time_desc' - the time of the snapshot, descending order
- 'halo_number_asc' - the halo number, ascending
- 'halo_number_desc' - the halo number, descending
- 'nhops' - the number of hops taken to reach the halo (MultiHopStrategy only)
Multiple names can be given to order by more than one property.
"""
if names is None:
names = ['weight']
elif isinstance(names, str):
names = [names]
self._order_by_names = [x.lower() for x in names]
def count(self):
"""Return the number of hops matching the conditions"""
return len(self._get_query_all())
def _execute_query(self):
try:
query = self._filter_query_for_target(self.halo_from.links, self._target)
results = self._order_query(query).all()
except sqlalchemy.exc.ResourceClosedError:
results = []
results = [x for x in results if x is not None]
self._all = results
def _get_query_all(self):
if self._all is None:
self._execute_query()
return self._all
def temp_table(self):
# N.B. this could be made more efficient
ids_list = [x.id if hasattr(x,'id') else None for x in self.all() ]
return temporary_halolist.temporary_halolist_table(self.session, ids_list)
def all(self):
"""Return all possible hops matching the conditions"""
return [x.halo_to for x in self._get_query_all()]
def weights(self):
"""Return the weights for the possible hops"""
return [x.weight for x in self._get_query_all()]
def all_and_weights(self):
"""Return all possible hops matching the conditions, along with
the weights"""
all = self._get_query_all()
weights = [x.weight for x in all]
halos = [x.halo_to for x in all]
return halos, weights
def first(self):
"""Return the suggested hop."""
link = self._get_query_all()
if len(link) == 0:
return None
else:
return link[0].halo_to
def _order_by_clause(self, halo_alias, timestep_alias):
return [self._generate_order_arg_from_name(name, halo_alias, timestep_alias) for name in self._order_by_names]
def _generate_order_arg_from_name(self, name, halo_alias, timestep_alias):
if name == 'weight':
return self._link_orm_class.weight.desc()
elif name == 'time_asc':
return timestep_alias.time_gyr
elif name == 'time_desc':
return timestep_alias.time_gyr.desc()
elif name == 'halo_number_asc':
return halo_alias.halo_number
elif name == 'halo_number_desc':
return halo_alias.halo_number.desc()
else:
raise ValueError("Unknown ordering method %r" % name)
def _ordering_requires_join(self):
return 'time_asc' in self._order_by_names \
or 'time_desc' in self._order_by_names \
or 'halo_number_asc' in self._order_by_names \
or 'halo_number_desc' in self._order_by_names
def _order_query(self, query):
assert isinstance(query, sqlalchemy.orm.query.Query)
timestep_alias = None
halo_alias = None
if self._ordering_requires_join():
timestep_alias = sqlalchemy.orm.aliased(core.timestep.TimeStep)
halo_alias = sqlalchemy.orm.aliased(core.halo.SimulationObjectBase)
query = query.join(halo_alias, self._link_orm_class.halo_to_id == halo_alias.id)\
.join(timestep_alias)\
.options(
contains_eager(self._link_orm_class.halo_to.of_type(halo_alias))\
.contains_eager(halo_alias.timestep.of_type(timestep_alias))
)
query = query.order_by(*self._order_by_clause(halo_alias, timestep_alias))
return query
class HopMajorDescendantStrategy(HopStrategy):
"""A hop strategy that suggests the major descendant for a halo"""
def __init__(self, halo_from):
target_ts = halo_from.timestep.next
if target_ts:
super(HopMajorDescendantStrategy, self).__init__(halo_from, target=target_ts)
else:
self._all = []
class HopMajorProgenitorStrategy(HopStrategy):
"""A hop strategy that suggests the major progenitor for a halo"""
def __init__(self, halo_from):
target_ts = halo_from.timestep.previous
if target_ts:
super(HopMajorProgenitorStrategy, self).__init__(halo_from, target=target_ts)
else:
self._all = []
```
#### File: tangos/scripts/__init__.py
```python
from . import manager, writer, timelink, crosslink
from .. import parallel_tasks
import tangos.tools
import sys
import os
def add_generic_tool(subparse, class_, command, help):
this_subparser = subparse.add_parser(command, help=help)
class_.add_parser_arguments(this_subparser)
def run(options):
obj = class_()
obj.process_options(options)
parallel_tasks.launch(obj.run_calculation_loop, 2, [])
this_subparser.set_defaults(func=run)
def add_serve_tool(subparse):
def serve(options):
from pkg_resources import load_entry_point
ini_file = options.config
if os.path.exists(ini_file):
ini_path = ini_file
else:
ini_path = os.path.join(__path__[0],"web",ini_file)
sys.argv = ["",ini_path]
sys.exit(
load_entry_point('pyramid','console_scripts','pserve')()
)
web_subparser = subparse.add_parser("serve", help="Start a web server (shortcut to Pyramid's pserve)")
web_subparser.add_argument('config', action='store', nargs="?",
help="The name of the pserve configuration file; either a path or production.ini/development.ini to use tangos' suggested configurations",
default="production.ini")
web_subparser.set_defaults(func=serve)
def add_commands(subparse):
tangos.tools.GenericTangosTool.add_tools(subparse)
add_serve_tool(subparse)
def main(argv=None):
parser, subparse = manager.get_argument_parser_and_subparsers()
add_commands(subparse)
args = parser.parse_args(argv)
from .. import core
core.process_options(args)
args.func(args)
```
#### File: tangos/tests/test_blocking_session.py
```python
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
import tangos as db
import tangos.blocking
import tangos.core.halo
import tangos.core.simulation
import tangos.core.timestep
import tangos.parallel_tasks as pt
from tangos import log, testing
import time
import os
import sqlalchemy.exc
import contextlib
import tangos
from tangos.config import testing_db_backend
from nose.plugins.skip import SkipTest
def setup():
pt.use("multiprocessing")
testing.init_blank_db_for_testing(timeout=0.1, verbose=False)
session = db.core.get_default_session()
sim = tangos.core.simulation.Simulation("sim")
session.add(sim)
ts1 = tangos.core.timestep.TimeStep(sim, "ts1")
session.add(ts1)
halo_1 = tangos.core.halo.Halo(ts1, 1, 1, 1, 0, 0, 0, 0)
session.add_all([halo_1])
session.commit()
def teardown():
tangos.core.close_db()
try:
os.remove("test.db")
except OSError:
pass
def _multiprocess_block():
session = db.core.get_default_session()
ts = tangos.get_timestep("sim/ts1")
tangos.core.creator.get_creator(session) # this stops a warning being issued if the creator object is created during creation of Halo
new_halo = tangos.core.halo.Halo(ts, 5, 5, 5, 0, 0, 0, 0)
session.add(new_halo)
session.flush()
time.sleep(1.0)
session.commit()
def _multiprocess_test():
session = db.core.get_default_session()
time.sleep(0.5)
ts = tangos.get_timestep("sim/ts1")
tangos.core.creator.get_creator(session) # this stops a warning being issued if the creator object is created during creation of Halo
new_halo = tangos.core.halo.Halo(ts, 6, 6, 6, 0, 0, 0, 0)
session.add(new_halo)
session.commit()
def _perform_test(use_blocking=True):
if pt.backend.rank()==1:
db.init_db("sqlite:///test_dbs/test_blocking_session.db", timeout=0.1, verbose=False)
pt.barrier()
else:
pt.barrier()
db.init_db("sqlite:///test_dbs/test_blocking_session.db", timeout=0.1, verbose=False)
if use_blocking:
db.blocking.make_engine_blocking()
if pt.backend.rank()==1:
_multiprocess_block()
elif pt.backend.rank()==2:
_multiprocess_test()
@contextlib.contextmanager
def _suppress_exception_report():
import tangos.parallel_tasks.backends.multiprocessing as backend
backend._print_exceptions = False # to prevent confusing error appearing in stdout
yield
backend._print_exceptions = True
def test_non_blocking_exception():
if testing_db_backend != "sqlite":
raise SkipTest("This test is only relevant for sqlite databases")
with _suppress_exception_report():
with assert_raises(sqlalchemy.exc.OperationalError):
with log.LogCapturer():
pt.launch(_perform_test,3, (False,))
db.core.get_default_session().rollback()
def test_blocking_avoids_exception():
if testing_db_backend != "sqlite":
raise SkipTest("This test is only relevant for sqlite databases")
assert tangos.get_halo("sim/ts1/6") is None
db.core.get_default_session().commit()
with log.LogCapturer():
pt.launch(_perform_test,3, (True,))
assert tangos.get_halo("sim/ts1/6") is not None
```
#### File: tangos/tests/test_db_writer.py
```python
from __future__ import absolute_import
import tangos as db
import tangos.config
import os
from tangos.tools import add_simulation
from tangos.tools import property_writer
from tangos.input_handlers import output_testing
from tangos import parallel_tasks, log, testing
from tangos import properties
from tangos.util import proxy_object
from numpy import testing as npt
from nose import with_setup
def setup_func():
parallel_tasks.use('null')
testing.init_blank_db_for_testing()
db.config.base = os.path.join(os.path.dirname(__file__), "test_simulations")
manager = add_simulation.SimulationAdderUpdater(output_testing.TestInputHandler("dummy_sim_1"))
with log.LogCapturer():
manager.scan_simulation_and_add_all_descendants()
def teardown_func():
db.core.close_db()
class DummyProperty(properties.PropertyCalculation):
names = "dummy_property",
requires_particle_data = True
def requires_property(self):
return []
def calculate(self, data, entry):
return data.time*data.halo,
class DummyProperty2(properties.PropertyCalculation):
"""Used by test_property_deleter"""
names = "another_dummy_property",
requires_particle_data = True
def requires_property(self):
return []
def calculate(self, data, entry):
return data.time*data.halo+1,
class DummyPropertyCausingException(properties.PropertyCalculation):
names = "dummy_property_with_exception",
requires_particle_data = True
def calculate(self, data, entry):
raise RuntimeError("Test of exception handling")
class DummyPropertyWithReconstruction(properties.PropertyCalculation):
names = "dummy_property_with_reconstruction",
requries_particle_data = False
callback = None
def calculate(self, data, entry):
return 1.0,
def reassemble(self, property_name):
if self.callback:
self.callback() # hook to allow us to know reassemble has been called
return 2.0
class DummyPropertyAccessingSimulationProperty(properties.PropertyCalculation):
names = "dummy_property_accessing_simulation_property",
requires_particle_data = False
def preloop(self, sim_data, db_timestep):
self._num_queries = 0
def calculate(self, data, entry):
with tangos.testing.SqlExecutionTracker() as ctr:
result = self.get_simulation_property("dummy_sim_property", None)
assert result == '42'
self._num_queries += ctr.count_statements_containing("simulationproperties")
assert self._num_queries<=1 # don't want to see simulationproperties queried more than once
check_null_result = object()
result2 = self.get_simulation_property("nonexistent_sim_property",check_null_result)
assert result2 is check_null_result
# store the value 1 to indicate that everything above passed (assertion errors will be
# caught by the db_writer so wouldn't directly result in a failure)
return 1,
def run_writer_with_args(*args):
stored_log = log.LogCapturer()
writer = property_writer.PropertyWriter()
writer.parse_command_line(args)
with stored_log:
writer.run_calculation_loop()
return stored_log.get_output()
@with_setup(setup_func, teardown_func)
def test_basic_writing():
run_writer_with_args("dummy_property")
_assert_properties_as_expected()
@with_setup(setup_func, teardown_func)
def test_parallel_writing():
parallel_tasks.use('multiprocessing')
try:
parallel_tasks.launch(run_writer_with_args, 3, ["dummy_property"])
finally:
parallel_tasks.use('null')
_assert_properties_as_expected()
def _assert_properties_as_expected():
assert db.get_halo("dummy_sim_1/step.1/1")['dummy_property'] == 1.0
assert db.get_halo("dummy_sim_1/step.1/2")['dummy_property'] == 2.0
assert db.get_halo("dummy_sim_1/step.2/1")['dummy_property'] == 2.0
@with_setup(setup_func, teardown_func)
def test_error_ignoring():
log = run_writer_with_args("dummy_property", "dummy_property_with_exception")
assert "Uncaught exception during property calculation" in log
assert db.get_halo("dummy_sim_1/step.1/1")['dummy_property'] == 1.0
assert db.get_halo("dummy_sim_1/step.1/2")['dummy_property'] == 2.0
assert db.get_halo("dummy_sim_1/step.2/1")['dummy_property'] == 2.0
assert 'dummy_property' in list(db.get_halo("dummy_sim_1/step.1/1").keys())
assert 'dummy_property_with_exception' not in list(db.get_halo("dummy_sim_1/step.1/1").keys())
class DummyRegionProperty(properties.PropertyCalculation):
names = "dummy_region_property",
def requires_property(self):
return "dummy_property",
def region_specification(self, db_data):
assert 'dummy_property' in db_data
return slice(1,5)
def calculate(self, data, entry):
assert data.message=="Test string"[1:5]
return 100.0,
@with_setup(setup_func, teardown_func)
def test_region_property():
run_writer_with_args("dummy_property","dummy_region_property")
_assert_properties_as_expected()
assert db.get_halo("dummy_sim_1/step.2/1")['dummy_region_property']==100.0
@with_setup(setup_func, teardown_func)
def test_no_duplication():
run_writer_with_args("dummy_property")
assert db.get_default_session().query(db.core.HaloProperty).count()==15
run_writer_with_args("dummy_property") # should not create duplicates
assert db.get_default_session().query(db.core.HaloProperty).count() == 15
run_writer_with_args("dummy_property", "--force") # should create duplicates
assert db.get_default_session().query(db.core.HaloProperty).count() == 30
class DummyLink(DummyProperty):
names = "dummy_link"
def calculate(self, data, entry):
return proxy_object.IncompleteProxyObjectFromFinderId(1,'halo')
class DummyPropertyRequiringLink(DummyProperty):
names = "dummy_property_requiring_link",
def requires_property(self):
return ["dummy_link"]
@with_setup(setup_func, teardown_func)
def test_link_property():
run_writer_with_args("dummy_link")
assert db.get_default_session().query(db.core.HaloLink).count() == 15
db.testing.assert_halolists_equal([db.get_halo(2)['dummy_link']], [db.get_halo(1)])
@with_setup(setup_func, teardown_func)
def test_link_dependency():
run_writer_with_args("dummy_property_requiring_link")
assert db.get_default_session().query(db.core.HaloProperty).count() == 0
run_writer_with_args("dummy_link")
run_writer_with_args("dummy_property_requiring_link")
assert db.get_default_session().query(db.core.HaloProperty).count() == 15
@with_setup(setup_func, teardown_func)
def test_writer_sees_raw_properties():
# regression test for issue #121
run_writer_with_args("dummy_property_with_reconstruction")
assert db.get_halo(2)['dummy_property_with_reconstruction']==2.0
assert db.get_halo(2).calculate('raw(dummy_property_with_reconstruction)')==1.0
def raise_exception(obj):
raise RuntimeError("reconstruct has been called")
DummyPropertyWithReconstruction.callback = raise_exception
run_writer_with_args("dummy_property_with_reconstruction") # should not try to reconstruct the existing data stream
@with_setup(setup_func, teardown_func)
def test_writer_handles_sim_properties():
"""Test for issue where simulation properties could be queried from within a calculation.
This could lead to unexpected database locks. Tangos 1.3 provides a safe route to doing this.
The test ensures that the results are cached to prevent hammering the database
However it does not directly test that the parallel_tasks locking mechanism is called,
which is hard. Ideally this test would therefore be completed at some point..."""
parallel_tasks.use('multiprocessing')
try:
parallel_tasks.launch(run_writer_with_args, 3, ["dummy_property_accessing_simulation_property"])
finally:
parallel_tasks.use('null')
for i in range(1,3):
ts = db.get_timestep("dummy_sim_1/step.%d"%i)
x, = ts.calculate_all("dummy_property_accessing_simulation_property")
npt.assert_equal(x,[1]*ts.halos.count())
```
#### File: tangos/tests/test_halo_setitem.py
```python
from __future__ import absolute_import
import tangos as db
import tangos.testing
import tangos
from six.moves import range
import numpy as np
import tangos.testing.simulation_generator
def setup():
tangos.testing.init_blank_db_for_testing()
generator = tangos.testing.simulation_generator.TestSimulationGenerator()
for i in range(3):
generator.add_timestep()
generator.add_objects_to_timestep(3)
def teardown():
tangos.core.close_db()
def test_setitem():
tangos.get_halo("sim/ts1/1")['bla'] = 23
db.core.get_default_session().commit()
assert tangos.get_halo("sim/ts1/1")['bla'] == 23
def test_set_another_item():
tangos.get_halo("sim/ts1/2")['bla'] = 42
db.core.get_default_session().commit()
assert tangos.get_halo("sim/ts1/2")['bla'] == 42
def test_update_item():
assert tangos.get_halo("sim/ts1/1")['bla'] == 23
tangos.get_halo("sim/ts1/1")['bla'] = 96
db.core.get_default_session().commit()
assert tangos.get_halo("sim/ts1/1")['bla'] == 96
def test_set_large_item():
"Test inserting arrays with size up to a few MiB"
for length in [2**i for i in range(8, 20)]:
value = np.random.rand(length)
tangos.get_halo("sim/ts1/2")['this_is_large'] = value
db.core.get_default_session().commit()
np.testing.assert_array_equal(
tangos.get_halo("sim/ts1/2")['this_is_large'],
value
)
```
#### File: tangos/tests/test_yt.py
```python
from __future__ import absolute_import
import tangos as db
import tangos.input_handlers.yt as yt_outputs
import tangos.tools.add_simulation as add
from tangos import config
from tangos import log, testing
import os
import numpy.testing as npt
import tangos
def setup():
global output_manager
testing.init_blank_db_for_testing()
db.config.base = os.path.join(os.path.dirname(__file__), "test_simulations")
output_manager = yt_outputs.YtChangaAHFInputHandler("test_tipsy_yt")
add.SimulationAdderUpdater(output_manager).scan_simulation_and_add_all_descendants()
def teardown():
tangos.core.close_db()
def test_handler():
assert isinstance(db.get_simulation("test_tipsy_yt").get_output_handler(), yt_outputs.YtChangaAHFInputHandler)
def test_timestep():
ts = db.get_timestep("test_tipsy_yt/tiny.000640")
npt.assert_allclose(ts.time_gyr, 2.173594670375)
npt.assert_allclose(ts.redshift, 2.96382819878)
def test_halos():
ts = db.get_timestep("test_tipsy_yt/tiny.000640")
assert ts.halos.count()==9
def test_load():
yt_obj = db.get_halo("test_tipsy_yt/tiny.000640/halo_2").load()
import yt.data_objects.data_containers
assert isinstance(yt_obj, yt.data_objects.data_containers.YTDataContainer)
```
|
{
"source": "jdavies-st/asdf-transform-schemas",
"score": 2
}
|
#### File: asdf-transform-schemas/tests/test_extension.py
```python
from asdf.extension import default_extensions, AsdfExtensionList
from asdf import generic_io
import yaml
from asdf_transform_schemas.extension import AsdfTransformSchemasExtension
def test_extension_registered():
assert any(
isinstance(e, AsdfTransformSchemasExtension)
for e in default_extensions.extensions
)
def test_resolver():
extension_list = AsdfExtensionList([AsdfTransformSchemasExtension()])
schema_id = "http://asdf-format.org/schemas/transform/add-2.0.0"
url = extension_list.resolver(schema_id)
with generic_io.get_file(url) as f:
schema = yaml.safe_load(f.read())
assert schema["id"] == schema_id
```
|
{
"source": "jdavies-st/glue-jupyter",
"score": 2
}
|
#### File: bqplot/image/viewer.py
```python
import bqplot
from glue.viewers.image.state import ImageViewerState
from glue.viewers.image.composite_array import CompositeArray
from ...link import on_change
from ..common.viewer import BqplotBaseView
from ..scatter.layer_artist import BqplotScatterLayerArtist
from .layer_artist import BqplotImageLayerArtist, BqplotImageSubsetLayerArtist
from .frb_mark import FRBImage
from glue_jupyter.common.state_widgets.layer_scatter import ScatterLayerStateWidget
from glue_jupyter.common.state_widgets.layer_image import (ImageLayerStateWidget,
ImageSubsetLayerStateWidget)
from glue_jupyter.common.state_widgets.viewer_image import ImageViewerStateWidget
__all__ = ['BqplotImageView']
class BqplotImageView(BqplotBaseView):
allow_duplicate_data = False
allow_duplicate_subset = False
large_data_size = 2e7
_layer_style_widget_cls = {BqplotImageLayerArtist: ImageLayerStateWidget,
BqplotImageSubsetLayerArtist: ImageSubsetLayerStateWidget,
BqplotScatterLayerArtist: ScatterLayerStateWidget}
_state_cls = ImageViewerState
_options_cls = ImageViewerStateWidget
tools = ['bqplot:panzoom', 'bqplot:rectangle', 'bqplot:circle']
def __init__(self, session):
super(BqplotImageView, self).__init__(session)
on_change([(self.state, 'aspect')])(self._sync_figure_aspect)
self._sync_figure_aspect()
self._composite = CompositeArray()
self._composite_image = FRBImage(self, self._composite)
self.figure.marks = list(self.figure.marks) + [self._composite_image]
self.state.add_callback('reference_data', self._reset_limits)
self.state.add_callback('x_att', self._reset_limits)
self.state.add_callback('y_att', self._reset_limits)
def _reset_limits(self, *args):
self.state.reset_limits()
def _sync_figure_aspect(self):
with self.figure.hold_trait_notifications():
if self.state.aspect == 'equal':
self.figure.max_aspect_ratio = 1
self.figure.min_aspect_ratio = 1
else:
self.figure.min_aspect_ratio = bqplot.Figure.min_aspect_ratio.default_value
self.figure.max_aspect_ratio = bqplot.Figure.max_aspect_ratio.default_value
def get_data_layer_artist(self, layer=None, layer_state=None):
if layer.ndim == 1:
cls = BqplotScatterLayerArtist
else:
cls = BqplotImageLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
def get_subset_layer_artist(self, layer=None, layer_state=None):
if layer.ndim == 1:
cls = BqplotScatterLayerArtist
else:
cls = BqplotImageSubsetLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
```
|
{
"source": "jdavies-st/gwcs",
"score": 2
}
|
#### File: gwcs/gwcs/api.py
```python
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from astropy.modeling import separable
import astropy.units as u
from . import utils
from . import coordinate_frames as cf
__all__ = ["GWCSAPIMixin"]
class GWCSAPIMixin(BaseHighLevelWCS, BaseLowLevelWCS):
"""
A mix-in class that is intended to be inherited by the
:class:`~gwcs.wcs.WCS` class and provides the low- and high-level
WCS API described in the astropy APE 14
(https://doi.org/10.5281/zenodo.1188875).
"""
# Low Level APE 14 API
@property
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
if self.input_frame is None:
return self.forward_transform.n_inputs
return self.input_frame.naxes
@property
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
if self.output_frame is None:
return self.forward_transform.n_outputs
return self.output_frame.naxes
@property
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
# A CompositeFrame orders the output correctly based on axes_order.
if isinstance(self.output_frame, cf.CompositeFrame):
return self.output_frame.axis_physical_types
# If we don't have a CompositeFrame, where this is taken care of for us,
# we need to make sure we re-order the output to match the transform.
# The underlying frames don't reorder themselves because axes_order is global.
return tuple(self.output_frame.axis_physical_types[i] for i in self.output_frame.axes_order)
@property
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
return tuple(unit.to_string(format='vounit') for unit in self.output_frame.unit)
def _remove_quantity_output(self, result, frame):
if self.forward_transform.uses_quantity:
if self.output_frame.naxes == 1:
result = [result]
result = tuple(r.to_value(unit) for r, unit in zip(result, frame.unit))
# If we only have one output axes, we shouldn't return a tuple.
if self.output_frame.naxes == 1 and isinstance(result, tuple):
return result[0]
return result
def _add_units_input(self, arrays, transform, frame):
if transform.uses_quantity:
return tuple(u.Quantity(array, unit) for array, unit in zip(arrays, frame.unit))
return arrays
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes ``pixel_n_dim`` scalars or arrays as input, and pixel
coordinates should be zero-based. Returns ``world_n_dim`` scalars or
arrays in units given by ``world_axis_units``. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a pixel is in a region where the WCS is not defined, NaN
can be returned. The coordinates should be specified in the ``(x, y)``
order, where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate.
"""
pixel_arrays = self._add_units_input(pixel_arrays, self.forward_transform, self.input_frame)
result = self(*pixel_arrays, with_units=False)
return self._remove_quantity_output(result, self.output_frame)
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~BaseLowLevelWCS.pixel_to_world_values`).
"""
index_arrays = self._add_units_input(index_arrays[::-1], self.forward_transform, self.input_frame)
result = self(*index_arrays, with_units=False)
return self._remove_quantity_output(result, self.output_frame)
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes ``world_n_dim`` scalars or arrays as input in units
given by ``world_axis_units``. Returns ``pixel_n_dim`` scalars or
arrays. Note that pixel coordinates are assumed to be 0 at the center of
the first pixel in each dimension. If a world coordinate does not have a
matching pixel coordinate, NaN can be returned. The coordinates should
be returned in the ``(x, y)`` order, where for an image, ``x`` is the
horizontal coordinate and ``y`` is the vertical coordinate.
"""
world_arrays = self._add_units_input(world_arrays, self.backward_transform, self.output_frame)
result = self.invert(*world_arrays, with_units=False)
return self._remove_quantity_output(result, self.input_frame)
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
"""
world_arrays = self._add_units_input(world_arrays, self.backward_transform, self.output_frame)
result = self.invert(*world_arrays, with_units=False)
if self.pixel_n_dim != 1:
result = result[::-1]
return self._remove_quantity_output(result, self.input_frame)
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of
length `~BaseLowLevelWCS.pixel_n_dim`.
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
The shape should be given in ``(row, column)`` order (the convention
for arrays in Python).
"""
return self._array_shape
@array_shape.setter
def array_shape(self, value):
self._array_shape = value
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~BaseLowLevelWCS.pixel_n_dim` ``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
bounding_box = self.bounding_box
if bounding_box is None:
return bounding_box
if self.pixel_n_dim == 1 and len(bounding_box) == 2:
bounding_box = (bounding_box,)
# Iterate over the bounding box and convert from quantity if required.
bounding_box = list(bounding_box)
for i, bb_axes in enumerate(bounding_box):
bb = []
for lim in bb_axes:
if isinstance(lim, u.Quantity):
lim = lim.value
bb.append(lim)
bounding_box[i] = tuple(bb)
return tuple(bounding_box)
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
``pixel_n_dim`` in ``(x, y)`` order (where for an image, ``x`` is
the horizontal coordinate and ``y`` is the vertical coordinate)
(optional).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is neither known nor relevant.
"""
return self._pixel_shape
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._pixel_shape = None
return
wcs_naxes = self.input_frame.naxes
if len(value) != wcs_naxes:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(wcs_naxes, len(value)))
self._pixel_shape = tuple(value)
@property
def axis_correlation_matrix(self):
"""
Returns an (`~BaseLowLevelWCS.world_n_dim`,
`~BaseLowLevelWCS.pixel_n_dim`) matrix that indicates using booleans
whether a given world coordinate depends on a given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence of
any further information. For completely independent axes, the diagonal
would be `True` and all other entries `False`.
"""
return separable.separability_matrix(self.forward_transform)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
@property
def world_axis_object_classes(self):
return self.output_frame._world_axis_object_classes
@property
def world_axis_object_components(self):
return self.output_frame._world_axis_object_components
# High level APE 14 API
@property
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
return self
def _sanitize_pixel_inputs(self, *pixel_arrays):
pixels = []
if self.forward_transform.uses_quantity:
for i, pixel in enumerate(pixel_arrays):
if not isinstance(pixel, u.Quantity):
pixel = u.Quantity(value=pixel, unit=self.input_frame.unit[i])
pixels.append(pixel)
else:
for i, pixel in enumerate(pixel_arrays):
if isinstance(pixel, u.Quantity):
if pixel.unit != self.input_frame.unit[i]:
raise ValueError('Quantity input does not match the '
'input_frame unit.')
pixel = pixel.value
pixels.append(pixel)
return pixels
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel values to world coordinates.
"""
pixels = self._sanitize_pixel_inputs(*pixel_arrays)
return self(*pixels, with_units=True)
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
"""
pixel_arrays = index_arrays[::-1]
pixels = self._sanitize_pixel_inputs(*pixel_arrays)
return self(*pixels, with_units=True)
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates to pixel values.
"""
result = self.invert(*world_objects, with_units=True)
if not utils.isnumerical(result[0]):
result = [i.value for i in result]
if self.input_frame.naxes == 1:
return result[0]
return result
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
"""
result = self.invert(*world_objects, with_units=True)[::-1]
return tuple([utils._toindex(r) for r in result])
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
"""
if self.input_frame is not None:
return self.input_frame.axes_names
return tuple([''] * self.pixel_n_dim)
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
"""
if self.output_frame is not None:
return self.output_frame.axes_names
return tuple([''] * self.world_n_dim)
```
#### File: gwcs/tags/geometry_models.py
```python
from asdf import yamlutil
from ..gwcs_types import GWCSTransformType
from .. geometry import (ToDirectionCosines, FromDirectionCosines,
SphericalToCartesian, CartesianToSpherical)
__all__ = ['DirectionCosinesType', 'SphericalCartesianType']
class DirectionCosinesType(GWCSTransformType):
name = "direction_cosines"
types = [ToDirectionCosines, FromDirectionCosines]
version = "1.1.0"
@classmethod
def from_tree_transform(cls, node, ctx):
transform_type = node['transform_type']
if transform_type == 'to_direction_cosines':
return ToDirectionCosines()
elif transform_type == 'from_direction_cosines':
return FromDirectionCosines()
else:
raise TypeError(f"Unknown model_type {transform_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, FromDirectionCosines):
transform_type = 'from_direction_cosines'
elif isinstance(model, ToDirectionCosines):
transform_type = 'to_direction_cosines'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {'transform_type': transform_type}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
class SphericalCartesianType(GWCSTransformType):
name = "spherical_cartesian"
types = [SphericalToCartesian, CartesianToSpherical]
version = "1.1.0"
@classmethod
def from_tree_transform(cls, node, ctx):
transform_type = node['transform_type']
wrap_lon_at = node['wrap_lon_at']
if transform_type == 'spherical_to_cartesian':
return SphericalToCartesian(wrap_lon_at=wrap_lon_at)
elif transform_type == 'cartesian_to_spherical':
return CartesianToSpherical(wrap_lon_at=wrap_lon_at)
else:
raise TypeError(f"Unknown model_type {transform_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, SphericalToCartesian):
transform_type = 'spherical_to_cartesian'
elif isinstance(model, CartesianToSpherical):
transform_type = 'cartesian_to_spherical'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {
'transform_type': transform_type,
'wrap_lon_at': model.wrap_lon_at
}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
```
#### File: tags/tests/test_selector.py
```python
import numpy as np
from astropy.modeling.models import Mapping, Shift, Scale, Polynomial2D
from ... import selector
from asdf.tests import helpers
from ...tests.test_region import create_scalar_mapper
from ...extension import GWCSExtension
def test_regions_selector(tmpdir):
m1 = Mapping([0, 1, 1]) | Shift(1) & Shift(2) & Shift(3)
m2 = Mapping([0, 1, 1]) | Scale(2) & Scale(3) & Scale(3)
sel = {1: m1, 2: m2}
a = np.zeros((5, 6), dtype=np.int32)
a[:, 1:3] = 1
a[:, 4:5] = 2
mask = selector.LabelMapperArray(a)
rs = selector.RegionsSelector(inputs=('x', 'y'), outputs=('ra', 'dec', 'lam'),
selector=sel, label_mapper=mask)
tree = {'model': rs}
helpers.assert_roundtrip_tree(tree, tmpdir, extensions=GWCSExtension())
def test_LabelMapperArray_str(tmpdir):
a = np.array([["label1", "", "label2"],
["label1", "", ""],
["label1", "label2", "label2"]])
mask = selector.LabelMapperArray(a)
tree = {'model': mask}
helpers.assert_roundtrip_tree(tree, tmpdir, extensions=GWCSExtension())
def test_labelMapperArray_int(tmpdir):
a = np.array([[1, 0, 2],
[1, 0, 0],
[1, 2, 2]])
mask = selector.LabelMapperArray(a)
tree = {'model': mask}
helpers.assert_roundtrip_tree(tree, tmpdir, extensions=GWCSExtension())
def test_LabelMapperDict(tmpdir):
dmapper = create_scalar_mapper()
sel = selector.LabelMapperDict(('x', 'y'), dmapper,
inputs_mapping=Mapping((0,), n_inputs=2), atol=1e-3)
tree = {'model': sel}
helpers.assert_roundtrip_tree(tree, tmpdir, extensions=GWCSExtension())
def test_LabelMapperRange(tmpdir):
m = []
for i in np.arange(9) * .1:
c0_0, c1_0, c0_1, c1_1 = np.ones((4,)) * i
m.append(Polynomial2D(2, c0_0=c0_0,
c1_0=c1_0, c0_1=c0_1, c1_1=c1_1))
keys = np.array([[4.88, 5.64],
[5.75, 6.5],
[6.67, 7.47],
[7.7, 8.63],
[8.83, 9.96],
[10.19, 11.49],
[11.77, 13.28],
[13.33, 15.34],
[15.56, 18.09]])
rmapper = {}
for k, v in zip(keys, m):
rmapper[tuple(k)] = v
sel = selector.LabelMapperRange(('x', 'y'), rmapper,
inputs_mapping=Mapping((0,), n_inputs=2))
tree = {'model': sel}
helpers.assert_roundtrip_tree(tree, tmpdir, extensions=GWCSExtension())
```
#### File: gwcs/gwcs/wcs.py
```python
import functools
import itertools
import warnings
import numpy as np
import numpy.linalg as npla
from scipy import optimize
from astropy.modeling.core import Model # , fix_inputs
from astropy.modeling import utils as mutils
from astropy.modeling.models import (Shift, Polynomial2D, Sky2Pix_TAN,
RotateCelestial2Native, Mapping)
from astropy.modeling.fitting import LinearLSQFitter
import astropy.io.fits as fits
from .api import GWCSAPIMixin
from . import coordinate_frames as cf
from .utils import CoordinateFrameError
from . import utils
from .wcstools import grid_from_bounding_box
try:
from astropy.modeling.core import fix_inputs
HAS_FIX_INPUTS = True
except ImportError:
HAS_FIX_INPUTS = False
__all__ = ['WCS', 'NoConvergence']
_ITER_INV_KWARGS = ['tolerance', 'maxiter', 'adaptive', 'detect_divergence', 'quiet']
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Estimate of the accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
class WCS(GWCSAPIMixin):
"""
Basic WCS class.
Parameters
----------
forward_transform : `~astropy.modeling.Model` or a list
The transform between ``input_frame`` and ``output_frame``.
A list of (frame, transform) tuples where ``frame`` is the starting frame and
``transform`` is the transform from this frame to the next one or ``output_frame``.
The last tuple is (transform, None), where None indicates the end of the pipeline.
input_frame : str, `~gwcs.coordinate_frames.CoordinateFrame`
A coordinates object or a string name.
output_frame : str, `~gwcs.coordinate_frames.CoordinateFrame`
A coordinates object or a string name.
name : str
a name for this WCS
"""
def __init__(self, forward_transform=None, input_frame='detector', output_frame=None,
name=""):
#self.low_level_wcs = self
self._approx_inverse = None
self._available_frames = []
self._pipeline = []
self._name = name
self._array_shape = None
self._initialize_wcs(forward_transform, input_frame, output_frame)
self._pixel_shape = None
self._pipeline = [Step(*step) for step in self._pipeline]
def _initialize_wcs(self, forward_transform, input_frame, output_frame):
if forward_transform is not None:
if isinstance(forward_transform, Model):
if output_frame is None:
raise CoordinateFrameError("An output_frame must be specified"
"if forward_transform is a model.")
_input_frame, inp_frame_obj = self._get_frame_name(input_frame)
_output_frame, outp_frame_obj = self._get_frame_name(output_frame)
super(WCS, self).__setattr__(_input_frame, inp_frame_obj)
super(WCS, self).__setattr__(_output_frame, outp_frame_obj)
self._pipeline = [(input_frame, forward_transform.copy()),
(output_frame, None)]
elif isinstance(forward_transform, list):
for item in forward_transform:
name, frame_obj = self._get_frame_name(item[0])
super(WCS, self).__setattr__(name, frame_obj)
#self._pipeline.append((name, item[1]))
self._pipeline = forward_transform
else:
raise TypeError("Expected forward_transform to be a model or a "
"(frame, transform) list, got {0}".format(
type(forward_transform)))
else:
# Initialize a WCS without a forward_transform - allows building a WCS programmatically.
if output_frame is None:
raise CoordinateFrameError("An output_frame must be specified"
"if forward_transform is None.")
_input_frame, inp_frame_obj = self._get_frame_name(input_frame)
_output_frame, outp_frame_obj = self._get_frame_name(output_frame)
super(WCS, self).__setattr__(_input_frame, inp_frame_obj)
super(WCS, self).__setattr__(_output_frame, outp_frame_obj)
self._pipeline = [(_input_frame, None),
(_output_frame, None)]
def get_transform(self, from_frame, to_frame):
"""
Return a transform between two coordinate frames.
Parameters
----------
from_frame : str or `~gwcs.coordinate_frame.CoordinateFrame`
Initial coordinate frame name of object.
to_frame : str, or instance of `~gwcs.cordinate_frames.CoordinateFrame`
End coordinate frame name or object.
Returns
-------
transform : `~astropy.modeling.Model`
Transform between two frames.
"""
if not self._pipeline:
return None
try:
from_ind = self._get_frame_index(from_frame)
except ValueError:
raise CoordinateFrameError("Frame {0} is not in the available "
"frames".format(from_frame))
try:
to_ind = self._get_frame_index(to_frame)
except ValueError:
raise CoordinateFrameError("Frame {0} is not in the available frames".format(to_frame))
if to_ind < from_ind:
#transforms = np.array(self._pipeline[to_ind: from_ind], dtype="object")[:, 1].tolist()
transforms = [step.transform for step in self._pipeline[to_ind: from_ind]]
transforms = [tr.inverse for tr in transforms[::-1]]
elif to_ind == from_ind:
return None
else:
#transforms = np.array(self._pipeline[from_ind: to_ind], dtype="object")[:, 1].copy()
transforms = [step.transform for step in self._pipeline[from_ind: to_ind]]
return functools.reduce(lambda x, y: x | y, transforms)
def set_transform(self, from_frame, to_frame, transform):
"""
Set/replace the transform between two coordinate frames.
Parameters
----------
from_frame : str or `~gwcs.coordinate_frame.CoordinateFrame`
Initial coordinate frame.
to_frame : str, or instance of `~gwcs.cordinate_frames.CoordinateFrame`
End coordinate frame.
transform : `~astropy.modeling.Model`
Transform between ``from_frame`` and ``to_frame``.
"""
from_name, from_obj = self._get_frame_name(from_frame)
to_name, to_obj = self._get_frame_name(to_frame)
if not self._pipeline:
if from_name != self._input_frame:
raise CoordinateFrameError(
"Expected 'from_frame' to be {0}".format(self._input_frame))
if to_frame != self._output_frame:
raise CoordinateFrameError(
"Expected 'to_frame' to be {0}".format(self._output_frame))
try:
from_ind = self._get_frame_index(from_name)
except ValueError:
raise CoordinateFrameError("Frame {0} is not in the available frames".format(from_name))
try:
to_ind = self._get_frame_index(to_name)
except ValueError:
raise CoordinateFrameError("Frame {0} is not in the available frames".format(to_name))
if from_ind + 1 != to_ind:
raise ValueError("Frames {0} and {1} are not in sequence".format(from_name, to_name))
self._pipeline[from_ind].transform = transform
@property
def forward_transform(self):
"""
Return the total forward transform - from input to output coordinate frame.
"""
if self._pipeline:
#return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])
return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])
else:
return None
@property
def backward_transform(self):
"""
Return the total backward transform if available - from output to input coordinate system.
Raises
------
NotImplementedError :
An analytical inverse does not exist.
"""
try:
backward = self.forward_transform.inverse
except NotImplementedError as err:
raise NotImplementedError("Could not construct backward transform. \n{0}".format(err))
return backward
def _get_frame_index(self, frame):
"""
Return the index in the pipeline where this frame is locate.
"""
if isinstance(frame, cf.CoordinateFrame):
frame = frame.name
#frame_names = [getattr(item[0], "name", item[0]) for item in self._pipeline]
frame_names = [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline]
return frame_names.index(frame)
def _get_frame_name(self, frame):
"""
Return the name of the frame and a ``CoordinateFrame`` object.
Parameters
----------
frame : str, `~gwcs.coordinate_frames.CoordinateFrame`
Coordinate frame.
Returns
-------
name : str
The name of the frame
frame_obj : `~gwcs.coordinate_frames.CoordinateFrame`
Frame instance or None (if `frame` is str)
"""
if isinstance(frame, str):
name = frame
frame_obj = None
else:
name = frame.name
frame_obj = frame
return name, frame_obj
def __call__(self, *args, **kwargs):
"""
Executes the forward transform.
args : float or array-like
Inputs in the input coordinate system, separate inputs
for each dimension.
with_units : bool
If ``True`` returns a `~astropy.coordinates.SkyCoord` or
`~astropy.units.Quantity` object, by using the units of
the output cooridnate frame.
Optional, default=False.
with_bounding_box : bool, optional
If True(default) values in the result which correspond to
any of the inputs being outside the bounding_box are set
to ``fill_value``.
fill_value : float, optional
Output value for inputs outside the bounding_box
(default is np.nan).
"""
transform = self.forward_transform
if transform is None:
raise NotImplementedError("WCS.forward_transform is not implemented.")
with_units = kwargs.pop("with_units", False)
if 'with_bounding_box' not in kwargs:
kwargs['with_bounding_box'] = True
if 'fill_value' not in kwargs:
kwargs['fill_value'] = np.nan
if self.bounding_box is not None:
# Currently compound models do not attempt to combine individual model
# bounding boxes. Get the forward transform and assign the bounding_box to it
# before evaluating it. The order Model.bounding_box is reversed.
axes_ind = self._get_axes_indices()
if transform.n_inputs > 1:
transform.bounding_box = [self.bounding_box[ind] for ind in axes_ind][::-1]
else:
transform.bounding_box = self.bounding_box
result = transform(*args, **kwargs)
if with_units:
if self.output_frame.naxes == 1:
result = self.output_frame.coordinates(result)
else:
result = self.output_frame.coordinates(*result)
return result
def in_image(self, *args, **kwargs):
"""
This method tests if one or more of the input world coordinates are
contained within forward transformation's image and that it maps to
the domain of definition of the forward transformation.
In practical terms, this function tests
that input world coordinate(s) can be converted to input frame and that
it is within the forward transformation's ``bounding_box`` when
defined.
Parameters
----------
args : float, array like, `~astropy.coordinates.SkyCoord` or
`~astropy.units.Unit` coordinates to be inverted
kwargs : dict
keyword arguments to be passed either to ``backward_transform``
(when defined) or to the iterative invert method.
Returns
-------
result : bool, numpy.ndarray
A single boolean value or an array of boolean values with `True`
indicating that the WCS footprint contains the coordinate
and `False` if input is outside the footprint.
"""
kwargs['with_bounding_box'] = True
kwargs['fill_value'] = np.nan
coords = self.invert(*args, **kwargs)
result = np.isfinite(coords)
if self.input_frame.naxes > 1:
result = np.all(result, axis=0)
if self.bounding_box is None or not np.any(result):
return result
if self.input_frame.naxes == 1:
x1, x2 = self.bounding_box
if len(np.shape(args[0])) > 0:
result[result] = (coords[result] >= x1) & (coords[result] <= x2)
elif result:
result = (coords >= x1) and (coords <= x2)
else:
if len(np.shape(args[0])) > 0:
for c, (x1, x2) in zip(coords, self.bounding_box):
result[result] = (c[result] >= x1) & (c[result] <= x2)
elif result:
result = all([(c >= x1) and (c <= x2) for c, (x1, x2) in zip(coords, self.bounding_box)])
return result
def invert(self, *args, **kwargs):
"""
Invert coordinates from output frame to input frame using analytical or
user-supplied inverse. When neither analytical nor user-supplied
inverses are defined, a numerical solution will be attempted using
:py:meth:`numerical_inverse`.
.. note::
Currently numerical inverse is implemented only for 2D imaging WCS.
Parameters
----------
args : float, array like, `~astropy.coordinates.SkyCoord` or `~astropy.units.Unit`
Coordinates to be inverted. The number of arguments must be equal
to the number of world coordinates given by ``world_n_dim``.
with_bounding_box : bool, optional
If `True` (default) values in the result which correspond to any
of the inputs being outside the bounding_box are set to
``fill_value``.
fill_value : float, optional
Output value for inputs outside the bounding_box (default is ``np.nan``).
with_units : bool, optional
If ``True`` returns a `~astropy.coordinates.SkyCoord` or
`~astropy.units.Quantity` object, by using the units of
the output cooridnate frame. Default is `False`.
Other Parameters
----------------
kwargs : dict
Keyword arguments to be passed to :py:meth:`numerical_inverse`
(when defined) or to the iterative invert method.
Returns
-------
result : tuple
Returns a tuple of scalar or array values for each axis.
"""
with_units = kwargs.pop('with_units', False)
if not utils.isnumerical(args[0]):
args = self.output_frame.coordinate_to_quantity(*args)
if self.output_frame.naxes == 1:
args = [args]
try:
if not self.backward_transform.uses_quantity:
args = utils.get_values(self.output_frame.unit, *args)
except (NotImplementedError, KeyError):
args = utils.get_values(self.output_frame.unit, *args)
if 'with_bounding_box' not in kwargs:
kwargs['with_bounding_box'] = True
if 'fill_value' not in kwargs:
kwargs['fill_value'] = np.nan
try:
# remove iterative inverse-specific keyword arguments:
akwargs = {k: v for k, v in kwargs.items() if k not in _ITER_INV_KWARGS}
result = self.backward_transform(*args, **akwargs)
except (NotImplementedError, KeyError):
result = self.numerical_inverse(*args, **kwargs, with_units=with_units)
if with_units and self.input_frame:
if self.input_frame.naxes == 1:
return self.input_frame.coordinates(result)
else:
return self.input_frame.coordinates(*result)
else:
return result
def numerical_inverse(self, *args, **kwargs):
"""
Invert coordinates from output frame to input frame using numerical
inverse.
.. note::
Currently numerical inverse is implemented only for 2D imaging WCS.
.. note::
This method uses a combination of vectorized fixed-point
iterations algorithm and `scipy.optimize.root`. The later is used
for input coordinates for which vectorized algorithm diverges.
Parameters
----------
args : float, array like, `~astropy.coordinates.SkyCoord` or `~astropy.units.Unit`
Coordinates to be inverted. The number of arguments must be equal
to the number of world coordinates given by ``world_n_dim``.
with_bounding_box : bool, optional
If `True` (default) values in the result which correspond to any
of the inputs being outside the bounding_box are set to
``fill_value``.
fill_value : float, optional
Output value for inputs outside the bounding_box (default is ``np.nan``).
with_units : bool, optional
If ``True`` returns a `~astropy.coordinates.SkyCoord` or
`~astropy.units.Quantity` object, by using the units of
the output cooridnate frame. Default is `False`.
tolerance : float, optional
*Absolute tolerance* of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
Default ``tolerance`` is 1.0e-5.
maxiter : int, optional
Maximum number of iterations allowed to reach a solution.
Default is 50.
quiet : bool, optional
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution. Default is `True`.
Other Parameters
----------------
adaptive : bool, optional
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default (`True`) is recommended.
.. note::
The :py:meth:`numerical_inverse` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`numerical_inverse` will continue
iterating *only* over the points that have not yet
converged to the required accuracy.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`numerical_inverse` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`numerical_inverse` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`numerical_inverse` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`numerical_inverse`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True` (default),
:py:meth:`numerical_inverse` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`numerical_inverse` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using JWST NIRCAM images, setting
``detect_divergence`` to `True` will incur about 5-10%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
result : tuple
Returns a tuple of scalar or array values for each axis.
Raises
------
NoConvergence
The iterative method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
NotImplementedError
Numerical inverse has not been implemented for this WCS.
ValueError
Invalid argument values.
Examples
--------
>>> from astropy.utils.data import get_pkg_data_filename
>>> from gwcs import NoConvergence
>>> import asdf
>>> import numpy as np
>>> filename = get_pkg_data_filename('data/nircamwcs.asdf', package='gwcs.tests')
>>> w = asdf.open(filename).tree['wcs']
>>> ra, dec = w([1,2,3], [1,1,1])
>>> print(ra) # doctest: +FLOAT_CMP
[5.927628 5.92757069 5.92751337]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.01341247 -72.01341273 -72.013413 ]
>>> x, y = w.numerical_inverse(ra, dec)
>>> print(x) # doctest: +FLOAT_CMP
[1.00000005 2.00000005 3.00000006]
>>> print(y) # doctest: +FLOAT_CMP
[1.00000004 0.99999979 1.00000015]
>>> x, y = w.numerical_inverse(ra, dec, maxiter=3, tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
gwcs.wcs.NoConvergence: 'WCS.numerical_inverse' failed to converge to the
requested accuracy after 3 iterations.
>>> w.numerical_inverse(
... *w([1, 300000, 3], [2, 1000000, 5], with_bounding_box=False),
... adaptive=False,
... detect_divergence=True,
... quiet=False,
... with_bounding_box=False
... )
Traceback (most recent call last):
...
gwcs.wcs.NoConvergence: 'WCS.numerical_inverse' failed to converge to the
requested accuracy. After 4 iterations, the solution is diverging at
least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w([1, 300000, 3], [2, 1000000, 5], with_bounding_box=False)
>>> print(divradec) # doctest: +FLOAT_CMP
(array([ 5.92762673, 148.21600848, 5.92750827]),
array([-72.01339464, -7.80968079, -72.01334172]))
>>> try: # doctest: +SKIP
... x, y = w.numerical_inverse(*divradec, maxiter=20,
... tolerance=1.0e-4, adaptive=True,
... detect_divergence=True,
... quiet=False)
... except NoConvergence as e:
... print(f"Indices of diverging points: {e.divergent}")
... print(f"Indices of poorly converging points: {e.slow_conv}")
... print(f"Best solution:\\n{e.best_solution}")
... print(f"Achieved accuracy:\\n{e.accuracy}")
Indices of diverging points: None
Indices of poorly converging points: [1]
Best solution:
[[1.00000040e+00 1.99999841e+00]
[6.33507833e+17 3.40118820e+17]
[3.00000038e+00 4.99999841e+00]]
Achieved accuracy:
[[2.75925982e-05 1.18471543e-05]
[3.65405005e+04 1.31364188e+04]
[2.76552923e-05 1.14789013e-05]]
"""
tolerance = kwargs.get('tolerance', 1e-5)
maxiter = kwargs.get('maxiter', 50)
adaptive = kwargs.get('adaptive', True)
detect_divergence = kwargs.get('detect_divergence', True)
quiet = kwargs.get('quiet', True)
with_bounding_box = kwargs.get('with_bounding_box', True)
fill_value = kwargs.get('fill_value', np.nan)
with_units = kwargs.pop('with_units', False)
if not utils.isnumerical(args[0]):
args = self.output_frame.coordinate_to_quantity(*args)
if self.output_frame.naxes == 1:
args = [args]
args = utils.get_values(self.output_frame.unit, *args)
args_shape = np.shape(args)
nargs = args_shape[0]
arg_dim = len(args_shape) - 1
if nargs != self.world_n_dim:
raise ValueError("Number of input coordinates is different from "
"the number of defined world coordinates in the "
f"WCS ({self.world_n_dim:d})")
if self.world_n_dim != self.pixel_n_dim:
raise NotImplementedError(
"Support for iterative inverse for transformations with "
"different number of inputs and outputs was not implemented."
)
# initial guess:
if nargs == 2 and self._approx_inverse is None:
self._calc_approx_inv(max_inv_pix_error=5, inv_degree=None)
if self._approx_inverse is None:
if self.bounding_box is None:
x0 = np.ones(self.pixel_n_dim)
else:
x0 = np.mean(self.bounding_box, axis=-1)
if arg_dim == 0:
argsi = args
if nargs == 2 and self._approx_inverse is not None:
x0 = self._approx_inverse(*argsi)
if not np.all(np.isfinite(x0)):
return [np.array(np.nan) for _ in range(nargs)]
result = tuple(self._vectorized_fixed_point(
x0, argsi,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
with_bounding_box=with_bounding_box,
fill_value=fill_value
).T.ravel().tolist())
else:
arg_shape = args_shape[1:]
nelem = np.prod(arg_shape)
args = np.reshape(args, (nargs, nelem))
if self._approx_inverse is None:
x0 = np.full((nelem, nargs), x0)
else:
x0 = np.array(self._approx_inverse(*args)).T
result = self._vectorized_fixed_point(
x0, args.T,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
with_bounding_box=with_bounding_box,
fill_value=fill_value
).T
result = tuple(np.reshape(result, args_shape))
if with_units and self.input_frame:
if self.input_frame.naxes == 1:
return self.input_frame.coordinates(result)
else:
return self.input_frame.coordinates(*result)
else:
return result
def _vectorized_fixed_point(self, pix0, world, tolerance, maxiter,
adaptive, detect_divergence, quiet,
with_bounding_box, fill_value):
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# make a copy of the initial approximation
pix0 = np.atleast_2d(np.array(pix0)) # 0-order solution
pix = np.array(pix0)
world0 = np.atleast_2d(np.array(world))
world = np.array(world0)
# estimate pixel scale using approximate algorithm
# from https://trs.jpl.nasa.gov/handle/2014/40409
if self.bounding_box is None:
crpix = np.ones(self.pixel_n_dim)
else:
crpix = np.mean(self.bounding_box, axis=-1)
l1, phi1 = np.deg2rad(self.__call__(*(crpix - 0.5)))
l2, phi2 = np.deg2rad(self.__call__(*(crpix + [-0.5, 0.5])))
l3, phi3 = np.deg2rad(self.__call__(*(crpix + 0.5)))
l4, phi4 = np.deg2rad(self.__call__(*(crpix + [0.5, -0.5])))
area = np.abs(0.5 * ((l4 - l2) * np.sin(phi1) +
(l1 - l3) * np.sin(phi2) +
(l2 - l4) * np.sin(phi3) +
(l3 - l2) * np.sin(phi4)))
inv_pscale = 1 / np.rad2deg(np.sqrt(area))
# form equation:
def f(x):
w = np.array(self.__call__(*(x.T), with_bounding_box=False)).T
dw = np.mod(np.subtract(w, world) - 180.0, 360.0) - 180.0
return np.add(inv_pscale * dw, x)
def froot(x):
return np.mod(np.subtract(self.__call__(*x, with_bounding_box=False), worldi) - 180.0, 360.0) - 180.0
# compute correction:
def correction(pix):
p1 = f(pix)
p2 = f(p1)
d = p2 - 2.0 * p1 + pix
idx = np.where(d != 0)
corr = pix - p2
corr[idx] = np.square(p1[idx] - pix[idx]) / d[idx]
return corr
# initial iteration:
dpix = correction(pix)
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = correction(pix)
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
world = world[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
world = world[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = correction(pix[ind])
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = np.logical_or(dnnew < dnprev[ind], dnnew < tol2)
if not np.all(conv):
conv = np.ones_like(dnnew, dtype=bool)
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
world = world[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world0), axis=1)))
# When detect_divergence is False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# If there are divergent points, attempt to find a solution using
# scipy's 'hybr' method:
if detect_divergence and inddiv is not None and inddiv.size:
bad = []
for idx in inddiv:
worldi = world0[idx]
result = optimize.root(
froot,
pix0[idx],
method='hybr',
tol=tolerance / (np.linalg.norm(pix0[idx]) + 1),
options={'maxfev': 2 * maxiter}
)
if result['success']:
pix[idx, :] = result['x']
invalid[idx] = False
else:
bad.append(idx)
if bad:
inddiv = np.array(bad, dtype=int)
else:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.numerical_inverse' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.numerical_inverse' failed to "
"converge to the requested accuracy.\n"
"After {:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
if with_bounding_box and self.bounding_box is not None:
# find points outside the bounding box and replace their values
# with fill_value
valid = np.logical_not(invalid)
in_bb = np.ones_like(invalid, dtype=np.bool_)
for c, (x1, x2) in zip(pix[valid].T, self.bounding_box):
in_bb[valid] &= (c >= x1) & (c <= x2)
pix[np.logical_not(in_bb)] = fill_value
return pix
def transform(self, from_frame, to_frame, *args, **kwargs):
"""
Transform positions between two frames.
Parameters
----------
from_frame : str or `~gwcs.coordinate_frames.CoordinateFrame`
Initial coordinate frame.
to_frame : str, or instance of `~gwcs.cordinate_frames.CoordinateFrame`
Coordinate frame into which to transform.
args : float or array-like
Inputs in ``from_frame``, separate inputs for each dimension.
output_with_units : bool
If ``True`` - returns a `~astropy.coordinates.SkyCoord` or
`~astropy.units.Quantity` object.
with_bounding_box : bool, optional
If True(default) values in the result which correspond to any of the inputs being
outside the bounding_box are set to ``fill_value``.
fill_value : float, optional
Output value for inputs outside the bounding_box (default is np.nan).
"""
transform = self.get_transform(from_frame, to_frame)
if not utils.isnumerical(args[0]):
inp_frame = getattr(self, from_frame)
args = inp_frame.coordinate_to_quantity(*args)
if not transform.uses_quantity:
args = utils.get_values(inp_frame.unit, *args)
with_units = kwargs.pop("with_units", False)
if 'with_bounding_box' not in kwargs:
kwargs['with_bounding_box'] = True
if 'fill_value' not in kwargs:
kwargs['fill_value'] = np.nan
result = transform(*args, **kwargs)
if with_units:
to_frame_name, to_frame_obj = self._get_frame_name(to_frame)
if to_frame_obj is not None:
if to_frame_obj.naxes == 1:
result = to_frame_obj.coordinates(result)
else:
result = to_frame_obj.coordinates(*result)
else:
raise TypeError("Coordinate objects could not be created because"
"frame {0} is not defined.".format(to_frame_name))
return result
@property
def available_frames(self):
"""
List all frames in this WCS object.
Returns
-------
available_frames : dict
{frame_name: frame_object or None}
"""
if self._pipeline:
#return [getattr(frame[0], "name", frame[0]) for frame in self._pipeline]
return [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline ]
else:
return None
def insert_transform(self, frame, transform, after=False):
"""
Insert a transform before (default) or after a coordinate frame.
Append (or prepend) a transform to the transform connected to frame.
Parameters
----------
frame : str or `~gwcs.coordinate_frame.CoordinateFrame`
Coordinate frame which sets the point of insertion.
transform : `~astropy.modeling.Model`
New transform to be inserted in the pipeline
after : bool
If True, the new transform is inserted in the pipeline
immediately after ``frame``.
"""
name, _ = self._get_frame_name(frame)
frame_ind = self._get_frame_index(name)
if not after:
current_transform = self._pipeline[frame_ind - 1].transform
self._pipeline[frame_ind - 1].transform = current_transform | transform
else:
current_transform = self._pipeline[frame_ind].transform
self._pipeline[frame_ind].transform = transform | current_transform
def insert_frame(self, input_frame, transform, output_frame):
"""
Insert a new frame into an existing pipeline. This frame must be
anchored to a frame already in the pipeline by a transform. This
existing frame is identified solely by its name, although an entire
`~gwcs.coordinate_frame.CoordinateFrame` can be passed (e.g., the
`input_frame` or `output_frame` attribute). This frame is never
modified.
Parameters
----------
input_frame : str or `~gwcs.coordinate_frame.CoordinateFrame`
Coordinate frame at start of new transform
transform : `~astropy.modeling.Model`
New transform to be inserted in the pipeline
output_frame: str or `~gwcs.coordinate_frame.CoordinateFrame`
Coordinate frame at end of new transform
"""
input_name, input_frame_obj = self._get_frame_name(input_frame)
output_name, output_frame_obj = self._get_frame_name(output_frame)
try:
input_index = self._get_frame_index(input_frame)
except ValueError:
input_index = None
if input_frame_obj is None:
raise ValueError(f"New coordinate frame {input_name} must "
"be defined")
try:
output_index = self._get_frame_index(output_frame)
except ValueError:
output_index = None
if output_frame_obj is None:
raise ValueError(f"New coordinate frame {output_name} must "
"be defined")
new_frames = [input_index, output_index].count(None)
if new_frames == 0:
raise ValueError("Could not insert frame as both frames "
f"{input_name} and {output_name} already exist")
elif new_frames == 2:
raise ValueError("Could not insert frame as neither frame "
f"{input_name} nor {output_name} exists")
if input_index is None:
self._pipeline = (self._pipeline[:output_index] +
[Step(input_frame_obj, transform)] +
self._pipeline[output_index:])
super(WCS, self).__setattr__(input_name, input_frame_obj)
else:
split_step = self._pipeline[input_index]
self._pipeline = (self._pipeline[:input_index] +
[Step(split_step.frame, transform),
Step(output_frame_obj, split_step.transform)] +
self._pipeline[input_index + 1:])
super(WCS, self).__setattr__(output_name, output_frame_obj)
@property
def unit(self):
"""The unit of the coordinates in the output coordinate system."""
if self._pipeline:
try:
#return getattr(self, self._pipeline[-1][0].name).unit
return self._pipeline[-1].frame.unit
except AttributeError:
return None
else:
return None
@property
def output_frame(self):
"""Return the output coordinate frame."""
if self._pipeline:
frame = self._pipeline[-1].frame
if not isinstance(frame, str):
frame = frame.name
return getattr(self, frame)
else:
return None
@property
def input_frame(self):
"""Return the input coordinate frame."""
if self._pipeline:
frame = self._pipeline[0].frame
if not isinstance(frame, str):
frame = frame.name
return getattr(self, frame)
else:
return None
@property
def name(self):
"""Return the name for this WCS."""
return self._name
@name.setter
def name(self, value):
"""Set the name for the WCS."""
self._name = value
@property
def pipeline(self):
"""Return the pipeline structure."""
return self._pipeline
@property
def bounding_box(self):
"""
Return the range of acceptable values for each input axis.
The order of the axes is `~gwcs.coordinate_frames.CoordinateFrame.axes_order`.
"""
frames = self.available_frames
transform_0 = self.get_transform(frames[0], frames[1])
try:
bb = transform_0.bounding_box
except NotImplementedError:
return None
if transform_0.n_inputs == 1:
return bb
try:
axes_order = self.input_frame.axes_order
except AttributeError:
axes_order = np.arange(transform_0.n_inputs)
# Model.bounding_box is in python order, need to reverse it first.
return tuple(bb[::-1][i] for i in axes_order)
@bounding_box.setter
def bounding_box(self, value):
"""
Set the range of acceptable values for each input axis.
The order of the axes is `~gwcs.coordinate_frames.CoordinateFrame.axes_order`.
For two inputs and axes_order(0, 1) the bounding box is ((xlow, xhigh), (ylow, yhigh)).
Parameters
----------
value : tuple or None
Tuple of tuples with ("low", high") values for the range.
"""
frames = self.available_frames
transform_0 = self.get_transform(frames[0], frames[1])
if value is None:
transform_0.bounding_box = value
else:
try:
# Make sure the dimensions of the new bbox are correct.
mutils._BoundingBox.validate(transform_0, value)
except Exception:
raise
# get the sorted order of axes' indices
axes_ind = self._get_axes_indices()
if transform_0.n_inputs == 1:
transform_0.bounding_box = value
else:
# The axes in bounding_box in modeling follow python order
#transform_0.bounding_box = np.array(value)[axes_ind][::-1]
transform_0.bounding_box = [value[ind] for ind in axes_ind][::-1]
self.set_transform(frames[0], frames[1], transform_0)
def _get_axes_indices(self):
try:
axes_ind = np.argsort(self.input_frame.axes_order)
except AttributeError:
# the case of a frame being a string
axes_ind = np.arange(self.forward_transform.n_inputs)
return axes_ind
def __str__(self):
from astropy.table import Table
#col1 = [item[0] for item in self._pipeline]
col1 = [step.frame for step in self._pipeline]
col2 = []
for item in self._pipeline[: -1]:
#model = item[1]
model = item.transform
if model.name is not None:
col2.append(model.name)
else:
col2.append(model.__class__.__name__)
col2.append(None)
t = Table([col1, col2], names=['From', 'Transform'])
return str(t)
def __repr__(self):
fmt = "<WCS(output_frame={0}, input_frame={1}, forward_transform={2})>".format(
self.output_frame, self.input_frame, self.forward_transform)
return fmt
def footprint(self, bounding_box=None, center=False, axis_type="all"):
"""
Return the footprint in world coordinates.
Parameters
----------
bounding_box : tuple of floats: (start, stop)
`prop: bounding_box`
center : bool
If `True` use the center of the pixel, otherwise use the corner.
axis_type : str
A supported ``output_frame.axes_type`` or "all" (default).
One of ['spatial', 'spectral', 'temporal'] or a custom type.
Returns
-------
coord : ndarray
Array of coordinates in the output_frame mapping
corners to the output frame. For spatial coordinates the order
is clockwise, starting from the bottom left corner.
"""
def _order_clockwise(v):
return np.asarray([[v[0][0], v[1][0]], [v[0][0], v[1][1]],
[v[0][1], v[1][1]], [v[0][1], v[1][0]]]).T
if bounding_box is None:
if self.bounding_box is None:
raise TypeError("Need a valid bounding_box to compute the footprint.")
bb = self.bounding_box
else:
bb = bounding_box
all_spatial = all([t.lower() == "spatial" for t in self.output_frame.axes_type])
if all_spatial:
vertices = _order_clockwise(bb)
else:
vertices = np.array(list(itertools.product(*bb))).T
if center:
vertices = utils._toindex(vertices)
result = np.asarray(self.__call__(*vertices, **{'with_bounding_box': False}))
axis_type = axis_type.lower()
if axis_type == 'spatial' and all_spatial:
return result.T
if axis_type != "all":
axtyp_ind = np.array([t.lower() for t in self.output_frame.axes_type]) == axis_type
if not axtyp_ind.any():
raise ValueError('This WCS does not have axis of type "{}".'.format(axis_type))
result = np.asarray([(r.min(), r.max()) for r in result[axtyp_ind]])
if axis_type == "spatial":
result = _order_clockwise(result)
else:
result.sort()
result = np.squeeze(result)
return result.T
def fix_inputs(self, fixed):
"""
Return a new unique WCS by fixing inputs to constant values.
Parameters
----------
fixed : dict
Keyword arguments with fixed values corresponding to `self.selector`.
Returns
-------
new_wcs : `WCS`
A new unique WCS corresponding to the values in `fixed`.
Examples
--------
>>> w = WCS(pipeline, selector={"spectral_order": [1, 2]}) # doctest: +SKIP
>>> new_wcs = w.set_inputs(spectral_order=2) # doctest: +SKIP
>>> new_wcs.inputs # doctest: +SKIP
("x", "y")
"""
if not HAS_FIX_INPUTS:
raise ImportError('"fix_inputs" needs astropy version >= 4.0.')
new_pipeline = []
step0 = self.pipeline[0]
new_transform = fix_inputs(step0[1], fixed)
new_pipeline.append((step0[0], new_transform))
new_pipeline.extend(self.pipeline[1:])
return self.__class__(new_pipeline)
def to_fits_sip(self, bounding_box=None, max_pix_error=0.25, degree=None,
max_inv_pix_error=0.25, inv_degree=None,
npoints=32, crpix=None, verbose=False):
"""
Construct a SIP-based approximation to the WCS in the form of a FITS header
This assumes a tangent projection.
The default mode in using this attempts to achieve roughly 0.25 pixel
accuracy over the whole image.
Parameters
----------
bounding_box : tuple, optional
A pair of tuples, each consisting of two numbers
Represents the range of pixel values in both dimensions
((xmin, xmax), (ymin, ymax))
max_pix_error : float, optional
Maximum allowed error over the domain of the pixel array. This
error is the equivalent pixel error that corresponds to the maximum
error in the output coordinate resulting from the fit based on
a nominal plate scale.
degree : int, iterable, None, optional
Degree of the SIP polynomial. Default value `None` indicates that
all allowed degree values (``[1...9]``) will be considered and
the lowest degree that meets accuracy requerements set by
``max_pix_error`` will be returned. Alternatively, ``degree`` can be
an iterable containing allowed values for the SIP polynomial degree.
This option is similar to default `None` but it allows caller to
restrict the range of allowed SIP degrees used for fitting.
Finally, ``degree`` can be an integer indicating the exact SIP degree
to be fit to the WCS transformation. In this case
``max_pixel_error`` is ignored.
max_inv_error : float, optional
Maximum allowed inverse error over the domain of the pixel array
in pixel units. If None, no inverse is generated.
inv_degree : int, iterable, None, optional
Degree of the SIP polynomial. Default value `None` indicates that
all allowed degree values (``[1...9]``) will be considered and
the lowest degree that meets accuracy requerements set by
``max_pix_error`` will be returned. Alternatively, ``degree`` can be
an iterable containing allowed values for the SIP polynomial degree.
This option is similar to default `None` but it allows caller to
restrict the range of allowed SIP degrees used for fitting.
Finally, ``degree`` can be an integer indicating the exact SIP degree
to be fit to the WCS transformation. In this case
``max_inv_pixel_error`` is ignored.
npoints : int, optional
The number of points in each dimension to sample the bounding box
for use in the SIP fit. Minimum number of points is 3.
crpix : list of float, None, optional
Coordinates (1-based) of the reference point for the new FITS WCS.
When not provided, i.e., when set to `None` (default) the reference
pixel will be chosen near the center of the bounding box.
verbose : bool, optional
Print progress of fits.
Returns
-------
FITS header with all SIP WCS keywords
Raises
------
ValueError
If the WCS is not 2D, an exception will be raised. If the specified accuracy
(both forward and inverse, both rms and maximum) is not achieved an exception
will be raised.
Notes
-----
Use of this requires a judicious choice of required accuracies. Attempts to use
higher degrees (~7 or higher) will typically fail due floating point problems
that arise with high powers.
"""
if not isinstance(self.output_frame, cf.CelestialFrame):
raise ValueError(
"The to_fits_sip method only works with celestial frame transforms")
if npoints < 8:
raise ValueError("Number of sampling points is too small. 'npoints' must be >= 8.")
transform = self.forward_transform
# Determine reference points.
if bounding_box is None and self.bounding_box is None:
raise ValueError("A bounding_box is needed to proceed.")
if bounding_box is None:
bounding_box = self.bounding_box
(xmin, xmax), (ymin, ymax) = bounding_box
if crpix is None:
crpix1 = round((xmax + xmin) / 2, 1)
crpix2 = round((ymax + ymin) / 2, 1)
else:
crpix1 = crpix[0] - 1
crpix2 = crpix[1] - 1
# check that the bounding box has some reasonable size:
if (xmax - xmin) < 1 or (ymax - ymin) < 1:
raise ValueError("Bounding box is too small for fitting a SIP polynomial")
crval1, crval2 = transform(crpix1, crpix2)
hdr = fits.Header()
hdr['naxis'] = 2
hdr['naxis1'] = int(xmax) + 1
hdr['naxis2'] = int(ymax) + 1
hdr['ctype1'] = 'RA---TAN-SIP'
hdr['ctype2'] = 'DEC--TAN-SIP'
hdr['CRPIX1'] = crpix1 + 1
hdr['CRPIX2'] = crpix2 + 1
hdr['CRVAL1'] = crval1
hdr['CRVAL2'] = crval2
hdr['cd1_1'] = 1 # Placeholders for FITS card order, all will change.
hdr['cd1_2'] = 0
hdr['cd2_1'] = 0
hdr['cd2_2'] = 1
# Now rotate to native system and deproject. Recall that transform
# expects pixels in the original coordinate system, but the SIP
# transform is relative to crpix coordinates, thus the initial shift.
ntransform = ((Shift(crpix1) & Shift(crpix2)) | transform
| RotateCelestial2Native(crval1, crval2, 180)
| Sky2Pix_TAN())
# standard sampling:
u, v = _make_sampling_grid(npoints, bounding_box, crpix=[crpix1, crpix2])
undist_x, undist_y = ntransform(u, v)
# Double sampling to check if sampling is sufficient.
ud, vd = _make_sampling_grid(2 * npoints, bounding_box, crpix=[crpix1, crpix2])
undist_xd, undist_yd = ntransform(ud, vd)
# Determine approximate pixel scale in order to compute error threshold
# from the specified pixel error. Computed at the center of the array.
x0, y0 = ntransform(0, 0)
xx, xy = ntransform(1, 0)
yx, yy = ntransform(0, 1)
pixarea = np.abs((xx - x0) * (yy - y0) - (xy - y0) * (yx - x0))
plate_scale = np.sqrt(pixarea)
max_error = max_pix_error * plate_scale
# The fitting section.
fit_poly_x, fit_poly_y, max_resid = _fit_2D_poly(
ntransform, npoints,
degree, max_error,
u, v, undist_x, undist_y,
ud, vd, undist_xd, undist_yd,
verbose=verbose
)
# The following is necessary to put the fit into the SIP formalism.
cdmat, sip_poly_x, sip_poly_y = _reform_poly_coefficients(fit_poly_x, fit_poly_y)
# cdmat = np.array([[fit_poly_x.c1_0.value, fit_poly_x.c0_1.value],
# [fit_poly_y.c1_0.value, fit_poly_y.c0_1.value]])
det = cdmat[0][0] * cdmat[1][1] - cdmat[0][1] * cdmat[1][0]
U = ( cdmat[1][1] * undist_x - cdmat[0][1] * undist_y) / det
V = (-cdmat[1][0] * undist_x + cdmat[0][0] * undist_y) / det
detd = cdmat[0][0] * cdmat[1][1] - cdmat[0][1] * cdmat[1][0]
Ud = ( cdmat[1][1] * undist_xd - cdmat[0][1] * undist_yd) / detd
Vd = (-cdmat[1][0] * undist_xd + cdmat[0][0] * undist_yd) / detd
if max_inv_pix_error:
fit_inv_poly_u, fit_inv_poly_v, max_inv_resid = _fit_2D_poly(ntransform,
npoints, inv_degree,
max_inv_pix_error,
U, V, u-U, v-V,
Ud, Vd, ud-Ud, vd-Vd,
verbose=verbose)
pdegree = fit_poly_x.degree
if pdegree > 1:
hdr['a_order'] = pdegree
hdr['b_order'] = pdegree
_store_2D_coefficients(hdr, sip_poly_x, 'A')
_store_2D_coefficients(hdr, sip_poly_y, 'B')
hdr['sipmxerr'] = (max_resid * plate_scale, 'Max diff from GWCS (equiv pix).')
if max_inv_pix_error:
hdr['sipiverr'] = (max_inv_resid, 'Max diff for inverse (pixels)')
_store_2D_coefficients(hdr, fit_inv_poly_u, 'AP', keeplinear=True)
_store_2D_coefficients(hdr, fit_inv_poly_v, 'BP', keeplinear=True)
if max_inv_pix_error:
ipdegree = fit_inv_poly_u.degree
hdr['ap_order'] = ipdegree
hdr['bp_order'] = ipdegree
else:
hdr['ctype1'] = 'RA---TAN'
hdr['ctype2'] = 'DEC--TAN'
hdr['cd1_1'] = cdmat[0][0]
hdr['cd1_2'] = cdmat[0][1]
hdr['cd2_1'] = cdmat[1][0]
hdr['cd2_2'] = cdmat[1][1]
return hdr
def to_fits_tab(self, bounding_box=None, bin_ext_name='WCS-TABLE',
coord_col_name='coordinates', sampling=1):
"""
Construct a FITS WCS ``-TAB``-based approximation to the WCS
in the form of a FITS header and a binary table extension. For the
description of the FITS WCS ``-TAB`` convention, see
"Representations of spectral coordinates in FITS" in
`<NAME>. et al. A&A 446 (2) 747-771 (2006)
<https://doi.org/10.1051/0004-6361:20053818>`_ .
Parameters
----------
bounding_box : tuple, optional
Specifies the range of acceptable values for each input axis.
The order of the axes is
`~gwcs.coordinate_frames.CoordinateFrame.axes_order`.
For two image axes ``bounding_box`` is of the form
``((xmin, xmax), (ymin, ymax))``.
bin_ext_name : str, optional
Extension name for the `~astropy.io.fits.BinTableHDU` extension.
coord_col_name : str, optional
Field name of the coordinate array in the structured array
stored in `~astropy.io.fits.BinTableHDU` data. This corresponds to
``TTYPEi`` field in the FITS header of the binary table extension.
sampling : float, tuple, optional
The target "density" of grid nodes per pixel to be used when
creating the coordinate array for the ``-TAB`` FITS WCS convention.
It is equal to ``1/step`` where ``step`` is the distance between
grid nodes in pixels. ``sampling`` can be specified as a single
number to be used for all axes or as a `tuple` of numbers
that specify the sampling for each image axis.
Returns
-------
hdr : `~astropy.io.fits.Header`
Header with WCS-TAB information associated (to be used) with image
data.
bin_table : `~astropy.io.fits.BinTableHDU`
Binary table extension containing the coordinate array.
Raises
------
ValueError
When ``bounding_box`` is not defined either through the input
``bounding_box`` parameter or this object's ``bounding_box``
property.
ValueError
When ``sampling`` is a `tuple` of length larger than 1 that
does not match the number of image axes.
RuntimeError
If the number of image axes (`~gwcs.WCS.pixel_n_dim`) is larger
than the number of world axes (`~gwcs.WCS.world_n_dim`).
"""
if bounding_box is None:
if self.bounding_box is None:
raise ValueError(
"Need a valid bounding_box to compute the footprint."
)
bounding_box = self.bounding_box
else:
# validate user-supplied bounding box:
frames = self.available_frames
transform_0 = self.get_transform(frames[0], frames[1])
mutils._BoundingBox.validate(transform_0, bounding_box)
if self.pixel_n_dim > self.world_n_dim:
raise RuntimeError(
"The case when the number of input axes is larger than the "
"number of output axes is not supported."
)
try:
sampling = np.broadcast_to(sampling, (self.pixel_n_dim, ))
except ValueError:
raise ValueError("Number of sampling values either must be 1 "
"or it must match the number of pixel axes.")
# 1D grid coordinates:
gcrds = []
cdelt = []
for (xmin, xmax), s in zip(bounding_box, sampling):
npix = max(2, 1 + int(np.ceil(abs((xmax - xmin) / s))))
gcrds.append(np.linspace(xmin, xmax, npix))
cdelt.append((npix - 1) / (xmax - xmin) if xmin != xmax else 1)
# n-dim coordinate arrays:
coord = np.stack(
self(*np.meshgrid(*gcrds[::-1], indexing='ij')[::-1]),
axis=-1
)
# create header with WCS info:
hdr = fits.Header()
for k in range(self.world_n_dim):
k1 = k + 1
ct = cf.get_ctype_from_ucd(self.world_axis_physical_types[k])
if len(ct) > 4:
raise ValueError("Axis type name too long.")
hdr['CTYPE{:d}'.format(k1)] = ct + (4 - len(ct)) * '-' + '-TAB'
hdr['CUNIT{:d}'.format(k1)] = self.world_axis_units[k]
hdr['PS{:d}_0'.format(k1)] = bin_ext_name
hdr['PS{:d}_1'.format(k1)] = coord_col_name
hdr['PV{:d}_3'.format(k1)] = k1
hdr['CRVAL{:d}'.format(k1)] = 1
if k < self.pixel_n_dim:
hdr['CRPIX{:d}'.format(k1)] = gcrds[k][0] + 1
hdr['PC{0:d}_{0:d}'.format(k1)] = 1.0
hdr['CDELT{:d}'.format(k1)] = cdelt[k]
else:
hdr['CRPIX{:d}'.format(k1)] = 1
coord = coord[None, :]
# structured array (data) for binary table HDU:
arr = np.array(
[(coord, )],
dtype=[
(coord_col_name, np.float64, coord.shape),
]
)
# create binary table HDU:
bin_tab = fits.BinTableHDU(arr)
bin_tab.header['EXTNAME'] = bin_ext_name
return hdr, bin_tab
def _calc_approx_inv(self, max_inv_pix_error=5, inv_degree=None, npoints=16):
"""
Compute polynomial fit for the inverse transformation to be used as
initial aproximation/guess for the iterative solution.
"""
self._approx_inverse = None
try:
# try to use analytic inverse if available:
self._approx_inverse = functools.partial(self.backward_transform,
with_bounding_box=False)
return
except (NotImplementedError, KeyError):
pass
if not isinstance(self.output_frame, cf.CelestialFrame):
# The _calc_approx_inv method only works with celestial frame transforms
return
# Determine reference points.
if self.bounding_box is None:
# A bounding_box is needed to proceed.
return
crpix = np.mean(self.bounding_box, axis=1)
crval1, crval2 = self.forward_transform(*crpix)
# Rotate to native system and deproject. Set center of the projection
# transformation to the middle of the bounding box ("image") in order
# to minimize projection effects across the entire image,
# thus the initial shift.
ntransform = ((Shift(crpix[0]) & Shift(crpix[1])) | self.forward_transform
| RotateCelestial2Native(crval1, crval2, 180)
| Sky2Pix_TAN())
# standard sampling:
u, v = _make_sampling_grid(npoints, self.bounding_box, crpix=crpix)
undist_x, undist_y = ntransform(u, v)
# Double sampling to check if sampling is sufficient.
ud, vd = _make_sampling_grid(2 * npoints, self.bounding_box, crpix=crpix)
undist_xd, undist_yd = ntransform(ud, vd)
fit_inv_poly_u, fit_inv_poly_v, max_inv_resid = _fit_2D_poly(
ntransform,
npoints, None,
max_inv_pix_error,
undist_x, undist_y, u, v,
undist_xd, undist_yd, ud, vd,
verbose=True
)
self._approx_inverse = (RotateCelestial2Native(crval1, crval2, 180) |
Sky2Pix_TAN() | Mapping((0, 1, 0, 1)) |
(fit_inv_poly_u & fit_inv_poly_v) |
(Shift(crpix[0]) & Shift(crpix[1])))
def _fit_2D_poly(ntransform, npoints, degree, max_error,
xin, yin, xout, yout,
xind, yind, xoutd, youtd,
verbose=False):
"""
Fit a pair of ordinary 2D polynomials to the supplied transform.
"""
llsqfitter = LinearLSQFitter()
# The case of one pass with the specified polynomial degree
if degree is None:
deglist = range(1, 10)
elif hasattr(degree, '__iter__'):
deglist = sorted(map(int, degree))
if set(deglist).difference(range(1, 10)):
raise ValueError("Allowed values for SIP degree are [1...9]")
else:
degree = int(degree)
if degree < 1 or degree > 9:
raise ValueError("Allowed values for SIP degree are [1...9]")
deglist = [degree]
prev_max_error = float(np.inf)
if verbose:
print(f'maximum_specified_error: {max_error}')
for deg in deglist:
poly_x = Polynomial2D(degree=deg)
poly_y = Polynomial2D(degree=deg)
fit_poly_x = llsqfitter(poly_x, xin, yin, xout)
fit_poly_y = llsqfitter(poly_y, xin, yin, yout)
max_resid = _compute_distance_residual(xout, yout,
fit_poly_x(xin, yin),
fit_poly_y(xin, yin))
if max_resid > prev_max_error:
raise RuntimeError('Failed to achieve required error tolerance')
if verbose:
print(f'Degree = {deg}, max_resid = {max_resid}')
if max_resid < max_error:
# Check to see if double sampling meets error requirement.
max_resid = _compute_distance_residual(xoutd, youtd,
fit_poly_x(xind, yind),
fit_poly_y(xind, yind))
if verbose:
print(f'Double sampling check: maximum residual={max_resid}')
if max_resid < max_error:
if verbose:
print('terminating condition met')
break
return fit_poly_x, fit_poly_y, max_resid
def _make_sampling_grid(npoints, bounding_box, crpix):
step = np.subtract.reduce(bounding_box, axis=1) / (1.0 - npoints)
crpix = np.asanyarray(crpix)[:, None, None]
return grid_from_bounding_box(bounding_box, step=step, center=False) - crpix
def _compute_distance_residual(undist_x, undist_y, fit_poly_x, fit_poly_y):
"""
Compute the distance residuals and return the rms and maximum values.
"""
dist = np.sqrt((undist_x - fit_poly_x)**2 + (undist_y - fit_poly_y)**2)
max_resid = dist.max()
return max_resid
def _reform_poly_coefficients(fit_poly_x, fit_poly_y):
"""
The fit polynomials must be recombined to align with the SIP decomposition
The result is the f(u,v) and g(u,v) polynomials, and the CD matrix.
"""
# Extract values for CD matrix and recombining
c11 = fit_poly_x.c1_0.value
c12 = fit_poly_x.c0_1.value
c21 = fit_poly_y.c1_0.value
c22 = fit_poly_y.c0_1.value
sip_poly_x = fit_poly_x.copy()
sip_poly_y = fit_poly_y.copy()
# Force low order coefficients to be 0 as defined in SIP
sip_poly_x.c0_0 = 0
sip_poly_y.c0_0 = 0
sip_poly_x.c1_0 = 0
sip_poly_x.c0_1 = 0
sip_poly_y.c1_0 = 0
sip_poly_y.c0_1 = 0
cdmat = ((c11, c12), (c21, c22))
invcdmat = npla.inv(np.array(cdmat))
degree = fit_poly_x.degree
# Now loop through all remaining coefficients
for i in range(0, degree + 1):
for j in range(0, degree + 1):
if (i + j > 1) and (i + j < degree + 1):
old_x = getattr(fit_poly_x, f'c{i}_{j}').value
old_y = getattr(fit_poly_y, f'c{i}_{j}').value
newcoeff = np.dot(invcdmat, np.array([[old_x], [old_y]]))
setattr(sip_poly_x, f'c{i}_{j}', newcoeff[0, 0])
setattr(sip_poly_y, f'c{i}_{j}', newcoeff[1, 0])
return cdmat, sip_poly_x, sip_poly_y
def _store_2D_coefficients(hdr, poly_model, coeff_prefix, keeplinear=False):
"""
Write the polynomial model coefficients to the header.
"""
mindeg = int(not keeplinear)
degree = poly_model.degree
for i in range(0, degree + 1):
for j in range(0, degree + 1):
if (i + j) > mindeg and (i + j < degree + 1):
hdr[f'{coeff_prefix}_{i}_{j}'] = getattr(poly_model, f'c{i}_{j}').value
class Step:
"""
Represents a ``step`` in the WCS pipeline.
Parameters
----------
frame : `~gwcs.coordinate_frames.CoordinateFrame`
A gwcs coordinate frame object.
transform : `~astropy.modeling.core.Model` or None
A transform from this step's frame to next step's frame.
The transform of the last step should be ``None``.
"""
def __init__(self, frame, transform=None):
self.frame = frame
self.transform = transform
@property
def frame(self):
return self._frame
@frame.setter
def frame(self, val):
if not isinstance(val, (cf.CoordinateFrame, str)):
raise TypeError('"frame" should be an instance of CoordinateFrame or a string.')
self._frame = val
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, val):
if val is not None and not isinstance(val, (Model)):
raise TypeError('"transform" should be an instance of astropy.modeling.Model.')
self._transform = val
@property
def frame_name(self):
if isinstance(self.frame, str):
return self.frame
return self.frame.name
def __getitem__(self, ind):
warnings.warn("Indexing a WCS.pipeline step is deprecated. "
"Use the `frame` and `transform` attributes instead.", DeprecationWarning)
if ind not in (0, 1):
raise IndexError("Allowed inices are 0 (frame) and 1 (transform).")
if ind == 0:
return self.frame
return self.transform
def __str__(self):
return f"{self.frame_name}\t {getattr(self.transform, 'name', 'None') or self.transform.__class__.__name__}"
def __repr__(self):
return f"Step(frame={self.frame_name}, \
transform={getattr(self.transform, 'name', 'None') or self.transform.__class__.__name__})"
```
|
{
"source": "jdavies-st/jwst_reffiles",
"score": 2
}
|
#### File: jwst_reffiles/bad_pixel_mask/bad_pixel_mask.py
```python
import copy
import datetime
import os
from astropy.io import fits
from jwst.datamodels import MaskModel, util
import numpy as np
from jwst_reffiles.bad_pixel_mask import badpix_from_flats
from jwst_reffiles.dark_current import badpix_from_darks
# Flat field-related header keywords
dead_search_kw = 'BPFDEAD'
low_qe_search_kw = 'BPFLOWQE'
dead_search_type_kw = 'BPFSCHTP'
mean_sig_threshold_kw = 'BPFSIGMA'
norm_method_kw = 'BPFNORM'
smooth_box_width_kw = 'BPFSMOTH'
smoothing_type_kw = 'BPFSMTYP'
dead_sig_thresh_kw = 'BPFDEDSG'
dead_zero_sig_frac_kw = 'BPFZEROF'
dead_flux_check_kw = 'BPFFXCHK'
#dead_flux_file_kw = 'BPFFXFIL'
max_dead_sig_kw = 'BPFMXDED'
manual_flag_kw = 'BPFMANFL'
flat_do_not_use_kw = 'BPFDONOT'
max_low_qe_kw = 'BPFMXLQE'
max_open_adj_kw = 'BPFMXOAD'
# Dark current-related header keywords
bad_from_dark_kw = 'BPDSERCH'
dark_clip_sigma_kw = 'BPDCLPSG'
dark_clip_iters_kw = 'BPDCLPIT'
dark_noisy_thresh_kw = 'BPDNSETH'
max_sat_frac_kw = 'BPDMXSAT'
jump_limit_kw = 'BPDMXJMP'
jump_ratio_thresh_kw = 'BPDJPRAT'
cutoff_frac_kw = 'BPDCUTFC'
pedestal_sig_thresh_kw = 'BPDPEDTH'
rc_frac_thresh_kw = 'BPDRCTH'
low_ped_frac_kw = 'BPDLOPFC'
high_cr_frac_kw = 'BPDCRFC'
dark_do_not_use_kw = 'BPDDONOT'
flag_mapping_kw = 'BPDMAPS'
def bad_pixels(flat_slope_files=None, dead_search=True, low_qe_and_open_search=True,
dead_search_type='sigma_rate', flat_mean_sigma_threshold=3, flat_mean_normalization_method='smoothed',
smoothing_box_width=15, smoothing_type='Box2D', dead_sigma_threshold=5., max_dead_norm_signal=None,
run_dead_flux_check=False, dead_flux_check_files=None, flux_check=45000, max_low_qe_norm_signal=0.5,
max_open_adj_norm_signal=1.05, manual_flag_file='default', flat_do_not_use=[],
dark_slope_files=None, dark_uncal_files=None, dark_jump_files=None, dark_fitopt_files=None,
dark_stdev_clipping_sigma=5., dark_max_clipping_iters=5,
dark_noisy_threshold=5, max_saturated_fraction=0.5, max_jump_limit=10, jump_ratio_threshold=5,
early_cutoff_fraction=0.25, pedestal_sigma_threshold=5, rc_fraction_threshold=0.8, low_pedestal_fraction=0.8,
high_cr_fraction=0.8,
flag_values={'hot': ['HOT'], 'rc': ['RC'], 'low_pedestal': ['OTHER_BAD_PIXEL'], 'high_cr': ["TELEGRAPH"]},
dark_do_not_use=['hot', 'rc', 'low_pedestal', 'high_cr'], plot=False,
output_file=None, author='jwst_reffiles', description='A bad pix mask',
pedigree='GROUND', useafter='2019-04-01 00:00:00', history='', quality_check=False):
"""
Wrapper that calls the two modules for finding bad pixels from input flat
field files, and bad pixels from dark current files.
Parameters
----------
flat_slope_files : list
List of flat field slope files to be used for the dead pixel search.
If None, the search is skipped.
dead_search : bool
Whether or not to search for DEAD pixels using the flat field files
low_qe_and_open_search : bool
Whether or not to search for LOW_QE, OPEN, and ADJ_OPEN pixels using
the flat field files
dead_search_type : str
Type of search to use when looking for dead pixels. Options are:
``sigma_rate``: Using a normalized signal rate image, dead pixels
are defined as those with a rate smaller than
``dead_sigma_threshold`` standard deviations below
the mean.
``absolute_rate``: Using a normalized signal rate image, dead pixels
are defined as those with a rate less than
``max_dead_norm_signal``.
flat_mean_sigma_threshold : float
Number of standard deviations to use when sigma-clipping to
calculate the mean slope image or the mean across the detector
flat_mean_normalization_method : str
Specify how the mean image is normalized prior to searching for
bad pixels. Options are:
'smoothed': Mean image will be smoothed using a
``smoothing_box_width`` x ``smoothing_box_width``
box kernel. The mean image is then normalized by
this smoothed image.
'none': No normalization is done. Mean slope image is used as is
'mean': Mean image is normalized by its sigma-clipped mean
'fit2d': Mean image will be normalized by a fit of 2-D surface to
mean image. The degree of the fit is controlled by the
``fit_degree`` parameters
smoothing_box_width : float
Width in pixels of the box kernel to use to compute the smoothed
mean image
smoothing_typ : string
Type of smoothing to do ``Box2D `` or ``median`` filtering
smoothing_sigma : float
Number of standard deviations to use when smoothing in a box defined
by smoothing_box_width.
dead_sigma_threshold : float
Number of standard deviations below the mean at which a pixel is
considered dead.
max_dead_norm_signal : float
Maximum normalized signal rate of a pixel that is considered dead
run_dead_flux_check : bool
Whether or not to check for dead pixels using an absolute flux value
dead_flux_check_files : list
List of ramp (uncalibrated) files to use to check the flux of average
of last 4 groups. If None then the uncalibration files are not read in
and no flux_check is done.
flux_check: float
Tolerance on average signal in last 4 groups. If dead_flux_check_files is
a list of uncalibrated files, then the average of the last four groups
for all the integrations is determined. If this average > flux_check
then this pixel is not a dead pixel.
max_low_qe_norm_signal: float
The maximum normalized signal a pixel can have and be considered
low QE.
max_open_adj_norm_signal : float
The maximum normalized signal a pixel adjacent to a low QE pixel can have
in order for the low QE pixel to be reclassified as OPEN
manual_flag_file : str
Name of the ascii file containing a list of pixels to be added manually
to the output bad pixel mask file. Default is 'default', in which case
the file contained in the ``bad_pixel_mask`` directory of the repo will
be used.
flat_do_not_use : list
List of bad pixel types where the DO_NOT_USE flag should also be
applied (e.g. ['DEAD', 'LOW_QE'])
dark_slope_files : list
List of dark current slope files to be used for the noisy pixel search.
If None, the search is skipped.
dark_uncal_files : list
List of uncalibrated dark current ramp files. These should correspond
1-to-1 with the files listed in ``dark_slope_files``. If None,
the code assumes the files are in the same location as the slope
files and have names ending in uncal.fits
dark_jump_files : list
List of dark current ramp files output from the jump step of the pipeline.
These should correspond 1-to-1 with the files listed in ``dark_slope_files``.
If None, the code assumes the files are in the same location as the slope
files and have names ending in jump.fits
dark_fitopt_files : list
List of optional output files produced by the ramp-fitting step of the
pipeline. These should correspond 1-to-1 with the files listed in
``dark_slope_files``. If None, the code assumes the files are in the
same location as the slope files and have names ending in fitopt.fits
dark_stdev_clipping_sigma : int
Number of sigma to use when sigma-clipping the 2D array of
standard deviation values. The sigma-clipped mean and standard
deviation are used to locate noisy pixels.
dark_max_clipping_iters : int
Maximum number of iterations to use when sigma clipping to find
the mean and standard deviation values that are used when
locating noisy pixels.
dark_noisy_threshold : int
Number of sigma above the mean noise (associated with the slope)
to use as a threshold for identifying noisy pixels.
max_saturated_fraction : float
When identifying pixels that are fully saturated (in all groups
of an integration), this is the fraction of integrations within
which a pixel must be fully saturated before flagging it as HOT
max_jump_limit : int
The maximum number of jumps a pixel can have in an integration
before it is flagged as a ``high jump`` pixel (which may be
flagged as noisy later)
jump_ratio_threshold : int
Cutoff for the ratio of jumps early in the ramp to jumps later in
the ramp. Pixels with a ratio greater than this value (and which
also have a high total number of jumps) will be flagged as
potential (I)RC pixels.
early_cutoff_fraction : float
Fraction of the integration to use when comparing the jump rate
early in the integration to that across the entire integration.
Must be <= 0.5
pedestal_sigma_threshold : int
Used when searching for RC pixels via the pedestal image. Pixels
with pedestal values more than ``pedestal_sigma_threshold`` above
the mean are flagged as potential RC pixels
rc_fraction_threshold : float
Used when searching for RC pixels. This is the fraction of input
files within which the pixel must be identified as an RC pixel
before it will be flagged as a permanent RC pixel
low_pedestal_fraction : float
This is the fraction of input files within which a pixel must be
identified as a low pedestal pixel before it will be flagged as
a permanent low pedestal pixel
high_cr_fraction : float
This is the fraction of input files within which a pixel must be
flagged as having a high number of jumps before it will be flagged
as permanently noisy
flag_values : dict
This dictionary maps the types of bad pixels searched for to the
flag mnemonics to use when creating the bad pixel file. Keys are
the types of bad pixels searched for, and values are lists that
include mnemonics recognized by the jwst calibration pipeline
e.g. {'hot': ['HOT'], 'rc': ['RC'], 'low_pedestal': ['OTHER_BAD_PIXEL'], 'high_cr': ["TELEGRAPH"]}
dark_do_not_use : list
List of bad pixel types to be flagged as DO_NOT_USE
e.g. ['hot', 'rc', 'low_pedestal', 'high_cr']
plot : bool
If True, produce and save intermediate results from noisy pixel search
output_file : str
Name of the CRDS-formatted bad pixel reference file to save the final
bad pixel map into
author : str
CRDS-required name of the reference file author, to be placed in the
referece file header
description : str
CRDS-required description of the reference file, to be placed in the
reference file header
pedigree : str
CRDS-required pedigree of the data used to create the reference file
useafter : str
CRDS-required date of earliest data with which this referece file
should be used. (e.g. '2019-04-01 00:00:00')
history : str
Text to be added to the HISOTRY section of the output bad pixel file
quality_check : bool
If True, the pipeline is run using the output reference file to be
sure the pipeline doens't crash
"""
instrument = None
detector = None
all_files = []
history = [history]
hdu = fits.PrimaryHDU()
if flat_slope_files is not None:
all_files = copy.deepcopy(flat_slope_files)
instrument, detector = instrument_info(flat_slope_files[0])
# Get output filenames
if output_file is None:
output_file = create_output_filename(instrument, detector)
flat_output_file = output_file.replace('.fits', '_from_flats.fits')
# Get bad pixels from the flats
flatmask = badpix_from_flats.find_bad_pix(flat_slope_files, dead_search=dead_search,
low_qe_and_open_search=low_qe_and_open_search,
dead_search_type=dead_search_type,
sigma_threshold=flat_mean_sigma_threshold,
normalization_method=flat_mean_normalization_method,
smoothing_type=smoothing_type,
smoothing_box_width=smoothing_box_width,
dead_sigma_threshold=dead_sigma_threshold,
#dead_zero_signal_fraction=dead_zero_signal_fraction,
run_dead_flux_check=run_dead_flux_check,
dead_flux_check_files=dead_flux_check_files,
max_dead_norm_signal=max_dead_norm_signal,
manual_flag_file=manual_flag_file,
max_low_qe_norm_signal=max_low_qe_norm_signal,
max_open_adj_norm_signal=max_open_adj_norm_signal,
do_not_use=flat_do_not_use,
output_file=flat_output_file,
author=author,
description=description,
pedigree=pedigree,
useafter=useafter,
history=history[0],
quality_check=quality_check)
# Convert the do not use list to a string to add to the header
if len(flat_do_not_use) > 0:
flat_do_not_use_string = ', '.join(flat_do_not_use)
else:
flat_do_not_use_string = 'None'
flat_do_not_use_string = '{} {}'.format('Bad pixel types from flat to which DO_NOT_USE is applied: ', flat_do_not_use_string)
# Add the do not use string to the list of history entries to add,
# since it may end up being longer than 8 characters
history.append(flat_do_not_use_string)
# Define the non-standard fits header keywords by placing them in a
# fits HDU List
hdu.header[dead_search_kw] = dead_search
hdu.header[low_qe_search_kw] = low_qe_and_open_search
hdu.header[dead_search_type_kw] = dead_search_type
hdu.header[mean_sig_threshold_kw] = flat_mean_sigma_threshold
hdu.header[norm_method_kw] = flat_mean_normalization_method
hdu.header[smooth_box_width_kw] = smoothing_box_width
hdu.header[dead_sig_thresh_kw] = dead_sigma_threshold
#hdu.header[dead_zero_sig_frac_kw] = dead_zero_signal_fraction
hdu.header[dead_flux_check_kw] = run_dead_flux_check
#hdu.header[dead_flux_file_kw] = dead_flux_check_files
hdu.header[max_dead_sig_kw] = max_dead_norm_signal
hdu.header[manual_flag_kw] = manual_flag_file
hdu.header[max_low_qe_kw] = max_low_qe_norm_signal
hdu.header[max_open_adj_kw] = max_open_adj_norm_signal
else:
flatmask = 0
hdu.header[dead_search_kw] = False
hdu.header[low_qe_search_kw] = False
if dark_slope_files is not None:
if len(all_files) == 0:
all_files = copy.deepcopy(dark_slope_files)
instrument, detector = instrument_info(dark_slope_files[0])
else:
all_files = all_files + dark_slope_files
# Get output filenames
if output_file is None:
output_file = create_output_filename(instrument, detector)
dark_output_file = output_file.replace('.fits', '_from_darks.fits')
# Get bad pixels from the darks
darkmask = badpix_from_darks.find_bad_pix(dark_slope_files, uncal_filenames=dark_uncal_files,
jump_filenames=dark_jump_files,
fitopt_filenames=dark_fitopt_files,
clipping_sigma=dark_stdev_clipping_sigma,
max_clipping_iters=dark_max_clipping_iters,
noisy_threshold=dark_noisy_threshold,
max_saturated_fraction=max_saturated_fraction,
max_jump_limit=max_jump_limit,
jump_ratio_threshold=jump_ratio_threshold,
early_cutoff_fraction=early_cutoff_fraction,
pedestal_sigma_threshold=pedestal_sigma_threshold,
rc_fraction_threshold=rc_fraction_threshold,
low_pedestal_fraction=low_pedestal_fraction,
high_cr_fraction=high_cr_fraction,
flag_values=flag_values,
do_not_use=dark_do_not_use,
outfile=dark_output_file, plot=False)
# Convert the do not use list to a string to add to the header
if len(dark_do_not_use) > 0:
dark_do_not_use_string = ', '.join(dark_do_not_use)
else:
dark_do_not_use_string = 'None'
dark_do_not_use_string = '{} {}'.format('Bad pixel types from dark to which DO_NOT_USE is applied: ', dark_do_not_use_string)
# Add the do not use string to the list of history entries to add,
# since it may end up being longer than 8 characters
history.append(dark_do_not_use_string)
# Convert the bad pixel type mapping into a string so it can be
# added to the output header
if len(flag_values) > 0:
mapping_str = ''
for key in flag_values:
substr = '{}: {}, '.format(key, flag_values[key])
mapping_str = mapping_str + substr
else:
mapping_str = 'None'
mapping_str = '{} {}'.format('Mapping of jwst_reffiles bad pixel types to jwst cal bad pixel flags: ', mapping_str)
# Add the do not use string to the list of history entries to add,
# since it may end up being longer than 8 characters
history.append(mapping_str)
# Define the non-standard fits header keywords by placing them in a
# fits HDU List
hdu.header[bad_from_dark_kw] = True
hdu.header[dark_clip_sigma_kw] = dark_stdev_clipping_sigma
hdu.header[dark_clip_iters_kw] = dark_max_clipping_iters
hdu.header[dark_noisy_thresh_kw] = dark_noisy_threshold
hdu.header[max_sat_frac_kw] = max_saturated_fraction
hdu.header[jump_limit_kw] = max_jump_limit
hdu.header[jump_ratio_thresh_kw] = jump_ratio_threshold
hdu.header[cutoff_frac_kw] = early_cutoff_fraction
hdu.header[pedestal_sig_thresh_kw ] = pedestal_sigma_threshold
hdu.header[rc_frac_thresh_kw ] = rc_fraction_threshold
hdu.header[low_ped_frac_kw] = low_pedestal_fraction
hdu.header[high_cr_frac_kw] = high_cr_fraction
else:
darkmask = 0.
hdu.header[bad_from_dark_kw] = False
# Combine the two masks
final_mask = flatmask + darkmask
# Save mask in reference file
hdu_list = fits.HDUList([hdu])
save_final_map(final_mask, instrument.upper(), detector.upper(), hdu_list, all_files, author, description,
pedigree, useafter, history, output_file)
def create_output_filename(inst_name, det_name):
"""Create a default output filename for the bad pixel mask given
instrument and detector names
Parameters
----------
inst_name : str
Instrument name
det_name : str
Detector name
Returns
-------
outfile : str
Default bad pixel mask filename
"""
# Add in timestamp as a way to prevent overwriting past runs
current_time = datetime.datetime.now()
# Use the current working directory
outfile = '{}_{}_{}_badpix_mask.fits'.format(inst_name, det_name, current_time)
outfile = os.path.join(os.getcwd(), outfile)
return outfile
def instrument_info(filename):
"""Get the instrument and detector name from the header of the
input file
Parameters
----------
filename : str
Name of fits file
Returns
-------
inst : str
Instrument name
det : str
Detector name
"""
with fits.open(filename) as hdulist:
try:
inst = hdulist[0].header['INSTRUME'].lower()
except KeyError:
raise KeyError("ERROR: expecting instrument name in main header of {}".format(filename))
try:
det = hdulist[0].header['DETECTOR'].lower()
except KeyError:
raise KeyError("ERROR: expecting detector name in main header of {}".format(filename))
return inst, det
def save_final_map(bad_pix_map, instrument, detector, hdulist, files, author, description, pedigree, useafter,
history_text, outfile):
"""Save a bad pixel map into a CRDS-formatted reference file
Parameters
----------
bad_pix_map : numpy.ndarray
2D bad pixel array
instrument : str
Name of instrument associated with the bad pixel array
detector : str
Name of detector associated with the bad pixel array
hdulist : astropy.fits.HDUList
HDUList containing "extra" fits keywords
files : list
List of files used to create ``bad_pix_map``
author : str
Author of the bad pixel mask reference file
description : str
CRDS description to use in the final bad pixel file
pedigree : str
CRDS pedigree to use in the final bad pixel file
useafter : str
CRDS useafter string for the bad pixel file
history_text : list
List of strings to add as HISTORY entries to the bad pixel file
outfile : str
Name of the output bad pixel file
"""
yd, xd = bad_pix_map.shape
# Initialize the MaskModel using the hdu_list, so the new keywords will
# be populated
model = MaskModel(hdulist)
model.dq = bad_pix_map
# Create dq_def data
dq_def = badpix_from_flats.create_dqdef()
model.dq_def = dq_def
model.meta.reftype = 'MASK'
model.meta.subarray.name = 'FULL'
model.meta.subarray.xstart = 1
model.meta.subarray.xsize = xd
model.meta.subarray.ystart = 1
model.meta.subarray.ysize = yd
model.meta.instrument.name = instrument.upper()
model.meta.instrument.detector = detector
# Get the fast and slow axis directions from one of the input files
fastaxis, slowaxis = badpix_from_flats.get_fastaxis(files[0])
model.meta.subarray.fastaxis = fastaxis
model.meta.subarray.slowaxis = slowaxis
model.meta.author = author
model.meta.description = description
model.meta.pedigree = pedigree
model.meta.useafter = useafter
# Add information about parameters used
# Parameters from badpix_from_flats
package_note = ('This file was created using the bad_pixel_mask.py module within the '
'jwst_reffiles package.')
software_dict = {'name': 'jwst_reffiles.bad_pixel_mask.bad_pixel_mask.py', 'author': 'STScI',
'homepage': 'https://github.com/spacetelescope/jwst_reffiles',
'version': '0.0.0'}
entry = util.create_history_entry(package_note, software=software_dict)
model.history.append(entry)
model.history.append(util.create_history_entry('Parameter values and descriptions:'))
dead_search_descrip = ('dead_search: Boolean, whether or not to run the dead pixel search '
'using flat field files. The value is stored in the {} keyword.'.format(dead_search_kw))
model.history.append(util.create_history_entry(dead_search_descrip))
low_qe_search_descrip = ('low_qe_and_open_search: Boolean, whether or not to run the low QE '
'and open pixel search using flat field files. The value is stored in the {} '
'keyword.'.format(low_qe_search_kw))
model.history.append(util.create_history_entry(low_qe_search_descrip))
dead_type_descrip = ('dead_search_type: Method used to identify dead pixels. The value is stored in the '
'{} keyword.'.format(dead_search_type_kw))
model.history.append(util.create_history_entry(dead_type_descrip))
sigma_descrip = ('flat_mean_sigma_threshold: Number of standard deviations to use when sigma-clipping to '
'calculate the mean slope image or the mean across the detector. The value '
'used is stored in the {} keyword.'.format(mean_sig_threshold_kw))
model.history.append(util.create_history_entry(sigma_descrip))
norm_descrip = ('flat_mean_normalization_method: Specify how the mean image is normalized prior to searching '
'for bad pixels. The value used is stored in the {} keyword.'.format(norm_method_kw))
model.history.append(util.create_history_entry(norm_descrip))
smooth_descrip = ('smoothing_box_width: Width in pixels of the box kernel to use to compute the '
'smoothed mean image. The value used is stored in the {} keyword.'.format(smooth_box_width_kw))
model.history.append(util.create_history_entry(smooth_descrip))
smooth_type_descrip = ('smoothing_type: Type of smoothing to do: Box2D or median filtering. The value used '
'is stored in the {} keyword.'.format(smoothing_type_kw))
model.history.append(util.create_history_entry(smooth_type_descrip))
dead_sig_descrip = ('Number of standard deviations below the mean at which a pixel is considered dead. '
'The value used is stored in the {} keyword.'.format(dead_sig_thresh_kw))
model.history.append(util.create_history_entry(dead_sig_descrip))
max_dead_descrip = ('Maximum normalized signal rate of a pixel that is considered dead. The value '
'used is stored in the {} keyword.'.format(max_dead_sig_kw))
model.history.append(util.create_history_entry(max_dead_descrip))
run_dead_flux_descrip = ('run_dead_flux_check: Boolean, if True, search for pixels erroneously flagged '
'as dead because they are saturated in all groups. The value used is stored '
'in the {} keyword.'.format(dead_flux_check_kw))
model.history.append(util.create_history_entry(run_dead_flux_descrip))
dead_flux_limit_descrip = ('Signal limit in raw data above which the pixel is considered not dead. The '
'value used is stored in the {} keyword.'.format(max_dead_sig_kw))
model.history.append(util.create_history_entry(dead_flux_limit_descrip))
max_low_qe_descrip = ('The maximum normalized signal a pixel can have and be considered low QE. The '
'value used is stored in the {} keyword.'.format(max_low_qe_kw))
model.history.append(util.create_history_entry(max_low_qe_descrip))
max_open_adj_descrip = ('The maximum normalized signal a pixel adjacent to a low QE pixel can have '
'in order for the low QE pixel to be reclassified as OPEN. The value used '
'is stored in the {} keyword.'.format(max_open_adj_kw))
model.history.append(util.create_history_entry(max_open_adj_descrip))
flat_do_not_use_descrip = ('List of bad pixel types (from flats) where the DO_NOT_USE flag is also applied. '
'The values used are stored in the {} keyword.'.format(flat_do_not_use_kw))
model.history.append(util.create_history_entry(flat_do_not_use_descrip))
manual_file_descrip = ('Name of the ascii file containing a list of pixels to be added manually. The '
'value used is stored in the {} keyword.'.format(manual_flag_kw))
model.history.append(util.create_history_entry(manual_file_descrip))
# Parameters from badpix_from_darks
bad_from_dark_descrip = ('badpix_from_dark: Boolean, whether or not the bad pixel from dark search '
'has been run. The value is stored in the {} keyword.'.format(bad_from_dark_kw))
model.history.append(util.create_history_entry(bad_from_dark_descrip))
dark_clip_sig_descrip = ('Number of sigma to use when sigma-clipping 2D stdev image. The value used '
'is stored in the {} keyword.'.format(dark_clip_sigma_kw))
model.history.append(util.create_history_entry(dark_clip_sig_descrip))
dark_clip_iter_descrip = ('Max number of iterations to use when sigma clipping mean and stdev values. '
'The value used is stored in the {} keyword.'.format(dark_clip_iters_kw))
model.history.append(util.create_history_entry(dark_clip_iter_descrip))
dark_noisy_thresh_descrip = ('Number of sigma above mean noise for noisy pix threshold. The value '
'used is stored in the {} keyword.'.format(dark_noisy_thresh_kw))
model.history.append(util.create_history_entry(dark_noisy_thresh_descrip))
max_sat_frac_descrip = ('Fraction of integrations within which a pixel must be fully saturated before '
'flagging it as HOT. The value used is stored in the {} keyword.'.format(max_sat_frac_kw))
model.history.append(util.create_history_entry(max_sat_frac_descrip))
jump_limit_descrip = ('Maximum number of jumps a pixel can have in an integration before it is flagged as a '
'"high jump" pixel. The value used is stored in the {} keyword.'.format(jump_limit_kw))
model.history.append(util.create_history_entry(jump_limit_descrip))
jump_ratio_descrip = ('Cutoff for the ratio of jumps early in the ramp to jumps later in the ramp when '
'looking for RC pixels. The value used is stored in the {} keyword.'.format(jump_ratio_thresh_kw))
model.history.append(util.create_history_entry(jump_ratio_descrip))
cutoff_frac_descrip = ('Fraction of the integration to use when comparing the jump rate early in the integration to '
'that across the entire integration. The value used is stored in the {} keyword.'.format(cutoff_frac_kw))
model.history.append(util.create_history_entry(cutoff_frac_descrip))
ped_sigma_descrip = ('Pixels with pedestal values more than this limit above the mean are flagged as RC. '
'The value used is stored in the {} keyword.'.format(pedestal_sig_thresh_kw))
model.history.append(util.create_history_entry(ped_sigma_descrip))
rc_thresh_descrip = ('Fraction of input files within which a pixel must be identified as an RC pixel before '
'it will be flagged as a permanent RC pixel. The value used is stored in the {} '
'keyword.'.format(rc_frac_thresh_kw))
model.history.append(util.create_history_entry(rc_thresh_descrip))
low_ped_descrip = ('Fraction of input files within which a pixel must be identified as a low pedestal '
'pixel before it will be flagged as a permanent low pedestal pixel. The value used '
'is stored in the {} keyword.'.format(low_ped_frac_kw))
model.history.append(util.create_history_entry(low_ped_descrip))
high_cr_descrip = ('Fraction of input files within which a pixel must be flagged as having a high number '
'of jumps before it will be flagged as permanently noisy. The value used '
'is stored in the {} keyword.'.format(high_cr_frac_kw))
dark_do_not_use_descrip = ('List of bad pixel types (from darks) where the DO_NOT_USE flag is also applied. '
'The values used are stored in the {} keyword.'.format(dark_do_not_use_kw))
model.history.append(util.create_history_entry(dark_do_not_use_descrip))
# Add the list of input files used to create the map
model.history.append('DATA USED:')
for file in files:
totlen = len(file)
div = np.arange(0, totlen, 60)
for val in div:
if totlen > (val+60):
model.history.append(util.create_history_entry(file[val:val+60]))
else:
model.history.append(util.create_history_entry(file[val:]))
# Add the do not use lists, pixel flag mappings, and user-provided
# history text
for history_entry in history_text:
if history_entry != '':
model.history.append(util.create_history_entry(history_entry))
model.save(outfile, overwrite=True)
print('Final bad pixel mask reference file save to: {}'.format(outfile))
```
|
{
"source": "jdavies-st/pytest-astropy-header",
"score": 2
}
|
#### File: pytest-astropy-header/tests/test_display.py
```python
import pytest
import numpy
NUMPY_VERSION = numpy.__version__
pytest_plugins = ['pytester']
def extract_package_version_lines(output):
lines = []
in_section = False
for line in output.splitlines():
if line.strip() == 'Package versions:':
in_section = True
elif in_section:
if line.strip() == "":
break
else:
lines.append(line)
return lines
def test_default(testdir, capsys):
testdir.inline_run()
out, err = capsys.readouterr()
assert 'Package versions:' not in out
@pytest.mark.parametrize('method', ['cli', 'ini', 'conftest'])
def test_enabled(testdir, capsys, method):
if method == 'cli':
testdir.inline_run("--astropy-header")
elif method == 'ini':
testdir.makeini("""
[pytest]
astropy_header = yes
""")
testdir.inline_run()
elif method == 'conftest':
testdir.makeconftest("""
def pytest_configure(config):
config.option.astropy_header = True
""")
testdir.inline_run()
out, err = capsys.readouterr()
lines = extract_package_version_lines(out)
assert len(lines) == 6
assert lines[0].startswith('Numpy: ')
assert lines[1].startswith('Scipy: ')
assert lines[2].startswith('Matplotlib: ')
assert lines[3].startswith('h5py: ')
assert lines[4].startswith('Pandas: ')
assert lines[5].startswith('astropy-helpers: ')
@pytest.mark.parametrize('method', ['ini', 'conftest'])
def test_explicit_disable(testdir, capsys, method):
if method == 'ini':
testdir.makeini("""
[pytest]
astropy_header = no
""")
testdir.inline_run()
elif method == 'conftest':
testdir.makeconftest("""
def pytest_configure(config):
config.option.astropy_header = False
""")
testdir.inline_run()
out, err = capsys.readouterr()
assert 'Package versions:' not in out
@pytest.mark.parametrize('method', ['cli', 'ini', 'ini_list', 'conftest'])
def test_override_package_single(testdir, capsys, method):
if method == 'cli':
testdir.inline_run("--astropy-header", "--astropy-header-packages=numpy")
elif method == 'ini':
testdir.makeini("""
[pytest]
astropy_header = yes
astropy_header_packages = numpy
""")
testdir.inline_run()
elif method == 'ini_list':
testdir.makeini("""
[pytest]
astropy_header = yes
astropy_header_packages =
numpy
""")
testdir.inline_run()
elif method == 'conftest':
testdir.makeconftest("""
def pytest_configure(config):
config.option.astropy_header = True
config.option.astropy_header_packages = ['numpy']
""")
testdir.inline_run()
out, err = capsys.readouterr()
lines = extract_package_version_lines(out)
assert len(lines) == 2
assert lines[0] == 'numpy: {NUMPY_VERSION}'.format(NUMPY_VERSION=NUMPY_VERSION)
assert lines[1].startswith('astropy-helpers: ')
@pytest.mark.parametrize('method', ['cli', 'ini', 'ini_list', 'conftest'])
def test_override_package_multiple(testdir, capsys, method):
if method == 'cli':
testdir.inline_run("--astropy-header", "--astropy-header-packages=numpy,pandas")
elif method == 'ini':
testdir.makeini("""
[pytest]
astropy_header = yes
astropy_header_packages = numpy, pandas
""")
testdir.inline_run()
elif method == 'ini_list':
testdir.makeini("""
[pytest]
astropy_header = yes
astropy_header_packages =
numpy
pandas
""")
testdir.inline_run()
elif method == 'conftest':
testdir.makeconftest("""
def pytest_configure(config):
config.option.astropy_header = True
config.option.astropy_header_packages = ['numpy', 'pandas']
""")
testdir.inline_run()
out, err = capsys.readouterr()
print(out)
lines = extract_package_version_lines(out)
assert len(lines) == 3
assert lines[0] == 'numpy: {NUMPY_VERSION}'.format(NUMPY_VERSION=NUMPY_VERSION)
assert lines[1].startswith('pandas')
assert lines[2].startswith('astropy-helpers: ')
@pytest.mark.parametrize('method', ['cli', 'ini', 'ini_list', 'conftest'])
def test_nonexistent(testdir, capsys, method):
if method == 'cli':
testdir.inline_run("--astropy-header", "--astropy-header-packages=apackagethatdoesnotexist")
elif method == 'ini':
testdir.makeini("""
[pytest]
astropy_header = yes
astropy_header_packages = apackagethatdoesnotexist
""")
testdir.inline_run()
elif method == 'ini_list':
testdir.makeini("""
[pytest]
astropy_header = yes
astropy_header_packages =
apackagethatdoesnotexist
""")
testdir.inline_run()
elif method == 'conftest':
testdir.makeconftest("""
def pytest_configure(config):
config.option.astropy_header = True
config.option.astropy_header_packages = ['apackagethatdoesnotexist']
""")
testdir.inline_run()
out, err = capsys.readouterr()
lines = extract_package_version_lines(out)
assert len(lines) == 2
assert lines[0] == 'apackagethatdoesnotexist: not available'
assert lines[1].startswith('astropy-helpers: ')
def test_modify_in_conftest(testdir, capsys):
testdir.makeconftest("""
from pytest_astropy.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
def pytest_configure(config):
config.option.astropy_header = True
PYTEST_HEADER_MODULES.pop('Pandas')
PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
TESTED_VERSIONS['fakepackage'] = '1.0.2'
""")
testdir.inline_run()
out, err = capsys.readouterr()
lines = extract_package_version_lines(out)
assert len(lines) == 6
assert lines[0].startswith('Numpy: ')
assert lines[1].startswith('Scipy: ')
assert lines[2].startswith('Matplotlib: ')
assert lines[3].startswith('h5py: ')
assert lines[4].startswith('scikit-image: ')
assert lines[5].startswith('astropy-helpers: ')
assert 'Running tests with fakepackage version 1.0.2' in out
```
|
{
"source": "jdaviscooke/goodtables-py",
"score": 2
}
|
#### File: goodtables/checks/blank_row.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from ..registry import check
from ..error import Error
# Module API
@check('blank-row')
def blank_row(cells):
errors = []
if not list(filter(lambda cell: cell.get('value'), cells)):
# Add error
error = Error('blank-row', row_number=cells[0].get('row-number'))
errors.append(error)
# Clear cells
del cells[:]
return errors
```
#### File: goodtables/checks/extra_header.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from copy import copy
from tableschema import Schema
from ..registry import check
from ..error import Error
# Module API
@check('extra-header')
class ExtraHeader(object):
# Public
def __init__(self, infer_fields=False, **options):
self.__infer_fields = infer_fields
def check_headers(self, cells, sample):
errors = []
for cell in copy(cells):
# Skip if cell has field
if 'field' in cell:
continue
# Infer field
if self.__infer_fields:
column_sample = []
for row in sample:
value = None
if len(row) >= cell['column-number']:
value = row[cell['column-number'] - 1]
column_sample.append([value])
schema = Schema()
schema.infer(column_sample, headers=[cell.get('header')])
cell['field'] = schema.fields[0]
# Add error/remove column
else:
error = Error('extra-header', cell)
errors.append(error)
cells.remove(cell)
return errors
```
#### File: goodtables/checks/minimum_length_constraint.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from ..registry import check
from .constraints_checks import create_check_constraint
# Module API
@check('minimum-length-constraint')
def minimum_length_constraint(cells):
check_constraint = create_check_constraint('minimum-length-constraint', 'minLength')
return check_constraint(cells)
```
#### File: tests/checks/test_duplicate_row.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from goodtables.checks.duplicate_row import DuplicateRow
import goodtables.cells
# Check
def test_check_duplicate_row(log):
row1 = [
goodtables.cells.create_cell('name', 'value1', row_number=1, column_number=1)
]
row2 = [
goodtables.cells.create_cell('name', 'value2', row_number=2, column_number=1)
]
duplicate_row = DuplicateRow()
errors = duplicate_row.check_row(row1)
errors += duplicate_row.check_row(row2)
assert log(errors) == []
assert len(row1) == 1
assert len(row2) == 1
def test_check_duplicate_row_problem(log):
row1 = [
goodtables.cells.create_cell('name', 'value', row_number=1, column_number=1)
]
row2 = [
goodtables.cells.create_cell('name', 'value', row_number=2, column_number=1)
]
duplicate_row = DuplicateRow()
errors = duplicate_row.check_row(row1)
errors += duplicate_row.check_row(row2)
assert log(errors) == [
(2, None, 'duplicate-row'),
]
assert len(row1) == 1
assert len(row2) == 0
```
#### File: tests/checks/test_extra_header.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from tableschema import Field
from goodtables.checks.extra_header import ExtraHeader
import goodtables.cells
# Check
def test_check_extra_header(log):
cells = [
goodtables.cells.create_cell('name1', field=Field({'name': 'name1'}), column_number=1),
goodtables.cells.create_cell('name2', field=Field({'name': 'name2'}), column_number=2),
]
sample = []
extra_header = ExtraHeader()
errors = extra_header.check_headers(cells, sample=sample)
assert log(errors) == []
assert len(cells) == 2
def test_check_extra_header_infer(log):
cells = [
goodtables.cells.create_cell('name1', field=Field({'name': 'name1'}), column_number=1),
goodtables.cells.create_cell('name2', column_number=2),
]
sample = []
extra_header = ExtraHeader(infer_fields=True)
errors = extra_header.check_headers(cells, sample=sample)
assert log(errors) == []
assert len(cells) == 2
assert cells[1]['field'].name == 'name2'
def test_check_extra_header_infer_with_data(log):
cells = [
goodtables.cells.create_cell('name1', field=Field({'name': 'name1'}), column_number=1),
goodtables.cells.create_cell('name2', column_number=2),
]
sample = [
['123', 'abc'],
['456', 'def'],
['789', 'ghi'],
]
extra_header = ExtraHeader(infer_fields=True)
errors = extra_header.check_headers(cells, sample=sample)
assert log(errors) == []
assert len(cells) == 2
assert cells[1]['field'].name == 'name2'
assert cells[1]['field'].type == 'string'
def test_check_extra_header_infer_with_empty_data(log):
cells = [
goodtables.cells.create_cell('name1', field=Field({'name': 'name1'}), column_number=1),
goodtables.cells.create_cell('name2', column_number=2),
]
sample = [
['123', ''],
['456', ''],
['789', ''],
]
extra_header = ExtraHeader(infer_fields=True)
errors = extra_header.check_headers(cells, sample=sample)
assert log(errors) == []
assert len(cells) == 2
assert cells[1]['field'].name == 'name2'
assert cells[1]['field'].type == 'string'
def test_check_extra_header_problem(log):
cells = [
goodtables.cells.create_cell('name1', field=Field({'name': 'name1'}), column_number=1),
goodtables.cells.create_cell('name2', column_number=2),
]
sample = []
extra_header = ExtraHeader()
errors = extra_header.check_headers(cells, sample=sample)
assert log(errors) == [
(None, 2, 'extra-header'),
]
assert len(cells) == 1
```
#### File: tests/checks/test_pattern_constraint.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from goodtables.checks.pattern_constraint import pattern_constraint
# Check
def test_check_pattern_constraint(log):
cells = []
errors = pattern_constraint(cells)
assert log(errors) == []
assert len(cells) == 0
```
#### File: contrib/checks/test_sequential_value.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from goodtables import validate
# Validate
def test_check_sequential_value(log):
source = [
['row', 'index2', 'index3'],
[2, 1, 1],
[3, 2, 3],
[4, 3, 5],
[5, 5, 6],
[6],
]
report = validate(source, checks=[
{'sequential-value': {'column': 2}},
{'sequential-value': {'column': 'index3'}},
])
assert log(report) == [
(1, 3, 3, 'sequential-value'),
(1, 4, 3, 'sequential-value'),
(1, 5, 2, 'sequential-value'),
(1, 6, 2, 'sequential-value'),
(1, 6, 3, 'sequential-value'),
]
def test_check_sequential_value_non_existent_column(log):
source = [
['row', 'name'],
[2, 'Alex'],
]
report = validate(source, checks=[
{'sequential-value': {'column': 3}},
{'sequential-value': {'column': 'non-existent'}},
])
assert log(report) == [
(1, 2, None, 'sequential-value'),
(1, 2, None, 'sequential-value'),
]
```
|
{
"source": "jdawgzim/hdl_build",
"score": 3
}
|
#### File: hdl_build/build/find_dependencies.py
```python
import os
import sys
import re
import argparse
# Package use pattern
package_use_re = re.compile('(\w+)::([\*\w]+)')
# Include use pattern
include_use_re = re.compile('`include\s+["<]([\w/\.\d]+)[">]')
# Module instance pattern, assuming parentheses contents removed
module_instance_re = re.compile(r'''
(\w+)\s+ # module_identifier
(?:\#\s*\(\)\s*)? # optional parameters
(\w+)\s* # instance name
\(\)\s* # port connections
(?=;) # statement end, don't consume
''', re.DOTALL | re.VERBOSE)
# These can fail with weird comments (like nested), but should be good enough
comment_line_re = re.compile(r'//.*')
comment_block_re = re.compile(r'/\*.*?\*/', re.DOTALL)
# Match literal quoted strings
quote_re = re.compile(r'".*?"')
# Enforce space before "#" in modules
add_space_re = re.compile(r'#\s*\(')
def de_parentheses(text):
pstack = 0
bstack = 0
result = ""
last_close = 0
for i, c in enumerate(text):
if c == '(':
if not pstack and not bstack:
result += text[last_close:i+1]
pstack += 1
elif c == '[':
if not bstack and not pstack:
result += text[last_close:i]
bstack += 1
elif c == ')' and pstack:
last_close = i
pstack -= 1
elif c == ']' and bstack:
last_close = i+1
bstack -= 1
result += text[last_close:]
return result
keywords = [
'accept_on', 'alias', 'always', 'always_comb',
'always_ff', 'always_latch', 'and', 'assert', 'assign', 'assume',
'automatic', 'before', 'begin', 'bind', 'bins', 'binsof', 'bit',
'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez',
'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config',
'const', 'constraint', 'context', 'continue', 'cover', 'covergroup',
'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design',
'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction',
'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage',
'endprimitive', 'endprogram', 'endproperty', 'endspecify',
'endsequence', 'endtable', 'endtask', 'enum', 'event', 'eventually',
'expect', 'export', 'extends', 'extern', 'final', 'first_match',
'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function',
'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff',
'ifnone', 'ignore_bins', 'illegal_bins', 'implements', 'implies',
'import', 'incdir', 'include', 'initial', 'inout', 'input', 'inside',
'instance', 'int', 'integer', 'interconnect', 'interface',
'intersect', 'join', 'join_any', 'join_none', 'large', 'let',
'liblist', 'library', 'local', 'localparam', 'logic', 'longint',
'macromodule', 'matches', 'medium', 'modport', 'module', 'nand',
'negedge', 'nettype', 'new', 'nexttime', 'nmos', 'nor',
'noshowcancelled', 'not', 'notif0', 'notif1', 'null', 'or', 'output',
'package', 'packed', 'parameter', 'pmos', 'posedge', 'primitive',
'priority', 'program', 'property', 'protected', 'pull0', 'pull1',
'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real',
'realtime', 'ref', 'reg', 'reject_on', 'release', 'repeat',
'restrict', 'return', 'rnmos', 'rpmos', 'rtran', 'rtranif0',
'rtranif1', 's_always', 's_eventually', 's_nexttime', 's_until',
's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal',
'showcancelled', 'signed', 'small', 'soft', 'solve', 'specify',
'specparam', 'static', 'string', 'strong', 'strong0', 'strong1',
'struct', 'super', 'supply0', 'supply1', 'sync_accept_on',
'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1',
'tri', 'tri0', 'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef',
'union', 'unique', 'unique0', 'unsigned', 'until', 'until_with',
'untyped', 'use', 'uwire', 'var', 'vectored', 'virtual', 'void',
'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while',
'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'
]
def find_deps(path, name, text, args):
''' Process module contents to determine a list of dependencies
path = repository relative path to file
name = module name
text = file contents
args = arg parser object, looking for args.debug'''
#print("Find deps args:", path, name, args)
includes = []
packages = []
instances = []
# Remove characters or comments
text = comment_line_re.sub('', text)
text = comment_block_re.sub('', text)
# Get includes
include_search = include_use_re.findall(text)
if include_search:
for include_path in include_search:
includes.append(os.path.basename(include_path))
# Remove quoted characters (must be preserved for includes)
text = quote_re.sub('', text)
# Get packages
package_search = package_use_re.findall(text)
if package_search:
for (pkg_name, fname) in package_search:
packages.append(pkg_name)
# Get instances -- clean up code for instance search first
clean_text = add_space_re.sub(' #(', text)
clean_text = de_parentheses(clean_text)
instance_search = module_instance_re.findall(clean_text)
if instance_search:
for (mod_name, inst_name) in instance_search:
if mod_name not in keywords:
instances.append(mod_name)
dep_set = {obj for obj in includes + packages + instances if obj != name}
return list(dep_set)
def main(args):
name = args.name
path = args.path
text = open(path, 'r').read()
deps = find_deps(path, name, text, args)
print(f"{name} dependencies:")
deps.sort()
for mod in deps:
print(f"\t{mod}")
if __name__ == '__main__':
argp = argparse.ArgumentParser(
description='Parse file and create list of dependencies')
argp.add_argument('path', help="path of module to analyze")
argp.add_argument('name', help="name of module to analyze")
argp.add_argument('-d', '--debug', action='store_true', help='print debug')
args = argp.parse_args()
main(args)
```
|
{
"source": "jdawsonash/elasticsearch-py",
"score": 2
}
|
#### File: elasticsearch-py/utils/generate-api.py
```python
import contextlib
import io
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
from functools import lru_cache
from itertools import chain
from pathlib import Path
import black
import unasync
import urllib3
from click.testing import CliRunner
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
http = urllib3.PoolManager()
# line to look for in the original source file
SEPARATOR = " # AUTO-GENERATED-API-DEFINITIONS #"
# global substitutions for python keywords
SUBSTITUTIONS = {"type": "doc_type", "from": "from_"}
# api path(s)
BRANCH_NAME = "master"
CODE_ROOT = Path(__file__).absolute().parent.parent
GLOBAL_QUERY_PARAMS = {
"pretty": "Optional[bool]",
"human": "Optional[bool]",
"error_trace": "Optional[bool]",
"format": "Optional[str]",
"filter_path": "Optional[Union[str, Collection[str]]]",
"request_timeout": "Optional[Union[int, float]]",
"ignore": "Optional[Union[int, Collection[int]]]",
"opaque_id": "Optional[str]",
"http_auth": "Optional[Union[str, Tuple[str, str]]]",
"api_key": "Optional[Union[str, Tuple[str, str]]]",
}
jinja_env = Environment(
loader=FileSystemLoader([CODE_ROOT / "utils" / "templates"]),
trim_blocks=True,
lstrip_blocks=True,
)
def blacken(filename):
runner = CliRunner()
result = runner.invoke(black.main, [str(filename)])
assert result.exit_code == 0, result.output
@lru_cache()
def is_valid_url(url):
return 200 <= http.request("HEAD", url).status < 400
class Module:
def __init__(self, namespace, is_pyi=False):
self.namespace = namespace
self.is_pyi = is_pyi
self._apis = []
self.parse_orig()
if not is_pyi:
self.pyi = Module(namespace, is_pyi=True)
self.pyi.orders = self.orders[:]
def add(self, api):
self._apis.append(api)
def parse_orig(self):
self.orders = []
self.header = "class C:"
if os.path.exists(self.filepath):
with open(self.filepath) as f:
content = f.read()
header_lines = []
for line in content.split("\n"):
header_lines.append(line)
if line == SEPARATOR:
break
# no separator found
else:
header_lines = []
for line in content.split("\n"):
header_lines.append(line)
if line.startswith("class"):
break
self.header = "\n".join(header_lines)
self.orders = re.findall(
r"\n (?:async )?def ([a-z_]+)\(", content, re.MULTILINE
)
def _position(self, api):
try:
return self.orders.index(api.name)
except ValueError:
return len(self.orders)
def sort(self):
self._apis.sort(key=self._position)
def dump(self):
self.sort()
with open(self.filepath, "w") as f:
f.write(self.header)
for api in self._apis:
f.write(api.to_python())
if not self.is_pyi:
self.pyi.dump()
@property
def filepath(self):
return (
CODE_ROOT
/ f"elasticsearch/_async/client/{self.namespace}.py{'i' if self.is_pyi else ''}"
)
class API:
def __init__(self, namespace, name, definition, is_pyi=False):
self.namespace = namespace
self.name = name
self.is_pyi = is_pyi
# overwrite the dict to maintain key order
definition["params"] = {
SUBSTITUTIONS.get(p, p): v for p, v in definition.get("params", {}).items()
}
self._def = definition
self.description = ""
self.doc_url = ""
self.stability = self._def.get("stability", "stable")
if isinstance(definition["documentation"], str):
self.doc_url = definition["documentation"]
else:
# set as attribute so it may be overridden by Module.add
self.description = (
definition["documentation"].get("description", "").strip()
)
self.doc_url = definition["documentation"].get("url", "")
# Filter out bad URL refs like 'TODO'
# and serve all docs over HTTPS.
if self.doc_url:
if not self.doc_url.startswith("http"):
self.doc_url = ""
if self.doc_url.startswith("http://"):
self.doc_url = self.doc_url.replace("http://", "https://")
# Try setting doc refs like 'current' and 'master' to our branches ref.
if BRANCH_NAME is not None:
revised_url = re.sub(
"/elasticsearch/reference/[^/]+/",
f"/elasticsearch/reference/{BRANCH_NAME}/",
self.doc_url,
)
if is_valid_url(revised_url):
self.doc_url = revised_url
else:
print(f"URL {revised_url!r}, falling back on {self.doc_url!r}")
@property
def all_parts(self):
parts = {}
for url in self._def["url"]["paths"]:
parts.update(url.get("parts", {}))
for p in parts:
parts[p]["required"] = all(
p in url.get("parts", {}) for url in self._def["url"]["paths"]
)
parts[p]["type"] = "Any"
for k, sub in SUBSTITUTIONS.items():
if k in parts:
parts[sub] = parts.pop(k)
dynamic, components = self.url_parts
def ind(item):
try:
return components.index(item[0])
except ValueError:
return len(components)
parts = dict(sorted(parts.items(), key=ind))
return parts
@property
def params(self):
parts = self.all_parts
params = self._def.get("params", {})
return chain(
((p, parts[p]) for p in parts if parts[p]["required"]),
(("body", self.body),) if self.body else (),
(
(p, parts[p])
for p in parts
if not parts[p]["required"] and p not in params
),
sorted(params.items(), key=lambda x: (x[0] not in parts, x[0])),
)
@property
def body(self):
b = self._def.get("body", {})
if b:
b.setdefault("required", False)
return b
@property
def query_params(self):
return (
k
for k in sorted(self._def.get("params", {}).keys())
if k not in self.all_parts
)
@property
def all_func_params(self):
"""Parameters that will be in the '@query_params' decorator list
and parameters that will be in the function signature.
This doesn't include
"""
params = list(self._def.get("params", {}).keys())
for url in self._def["url"]["paths"]:
params.extend(url.get("parts", {}).keys())
if self.body:
params.append("body")
return params
@property
def path(self):
return max(
(path for path in self._def["url"]["paths"]),
key=lambda p: len(re.findall(r"\{([^}]+)\}", p["path"])),
)
@property
def method(self):
# To adhere to the HTTP RFC we shouldn't send
# bodies in GET requests.
default_method = self.path["methods"][0]
if self.body and default_method == "GET" and "POST" in self.path["methods"]:
return "POST"
return default_method
@property
def url_parts(self):
path = self.path["path"]
dynamic = "{" in path
if not dynamic:
return dynamic, path
parts = []
for part in path.split("/"):
if not part:
continue
if part[0] == "{":
part = part[1:-1]
parts.append(SUBSTITUTIONS.get(part, part))
else:
parts.append(f"'{part}'")
return dynamic, parts
@property
def required_parts(self):
parts = self.all_parts
required = [p for p in parts if parts[p]["required"]]
if self.body.get("required"):
required.append("body")
return required
def to_python(self):
if self.is_pyi:
t = jinja_env.get_template("base_pyi")
else:
try:
t = jinja_env.get_template(f"overrides/{self.namespace}/{self.name}")
except TemplateNotFound:
t = jinja_env.get_template("base")
return t.render(
api=self,
substitutions={v: k for k, v in SUBSTITUTIONS.items()},
global_query_params=GLOBAL_QUERY_PARAMS,
)
@contextlib.contextmanager
def download_artifact(version):
# Download the list of all artifacts for a version
# and find the latest build URL for 'rest-resources-zip-*.zip'
resp = http.request(
"GET", f"https://artifacts-api.elastic.co/v1/versions/{version}"
)
packages = json.loads(resp.data)["version"]["builds"][0]["projects"][
"elasticsearch"
]["packages"]
for package in packages:
if re.match(r"^rest-resources-zip-.*\.zip$", package):
zip_url = packages[package]["url"]
break
else:
raise RuntimeError(
"Could not find the package 'rest-resources-zip-*.zip' in build"
)
# Download the .jar file and unzip only the API
# .json files into a temporary directory
resp = http.request("GET", zip_url)
tmp = Path(tempfile.mkdtemp())
zip = zipfile.ZipFile(io.BytesIO(resp.data))
for name in zip.namelist():
if not name.endswith(".json") or name == "schema.json":
continue
# Compatibility APIs/tests should be skipped
if "/compatApi" in name or "/compatTest" in name:
continue
with (tmp / name.replace("rest-api-spec/api/", "")).open("wb") as f:
f.write(zip.read(name))
yield tmp
shutil.rmtree(tmp)
def read_modules(version):
modules = {}
with download_artifact(version) as path:
for f in sorted(os.listdir(path)):
name, ext = f.rsplit(".", 1)
if ext != "json" or name == "_common":
continue
with open(path / f) as api_def:
api = json.load(api_def)[name]
namespace = "__init__"
if "." in name:
namespace, name = name.rsplit(".", 1)
# The data_frame API has been changed to transform.
if namespace == "data_frame_transform_deprecated":
continue
if namespace not in modules:
modules[namespace] = Module(namespace)
modules[namespace].add(API(namespace, name, api))
modules[namespace].pyi.add(API(namespace, name, api, is_pyi=True))
return modules
def dump_modules(modules):
for mod in modules.values():
mod.dump()
# Unasync all the generated async code
additional_replacements = {
# We want to rewrite to 'Transport' instead of 'SyncTransport', etc
"AsyncTransport": "Transport",
"AsyncElasticsearch": "Elasticsearch",
# We don't want to rewrite this class
"AsyncSearchClient": "AsyncSearchClient",
}
rules = [
unasync.Rule(
fromdir="/elasticsearch/_async/client/",
todir="/elasticsearch/_sync/client/",
additional_replacements=additional_replacements,
),
]
filepaths = []
for root, _, filenames in os.walk(CODE_ROOT / "elasticsearch/_async"):
for filename in filenames:
if (
filename.rpartition(".")[-1]
in (
"py",
"pyi",
)
and not filename.startswith("utils.py")
):
filepaths.append(os.path.join(root, filename))
unasync.unasync_files(filepaths, rules)
blacken(CODE_ROOT / "elasticsearch")
if __name__ == "__main__":
version = sys.argv[1]
dump_modules(read_modules(version))
```
|
{
"source": "jdayllon/leiserbik",
"score": 2
}
|
#### File: leiserbik/twitter/capturer.py
```python
import arrow
from pypeln import asyncio_task as aio
from pypeln import thread as th
from leiserbik import *
from leiserbik.async_http import fetch_all
from leiserbik.twitter.core import __generate_search_url_by_range, \
_get_page_branches, list_no_dupes, not_in_list, \
_read_statuses, _get_branch_walk
from leiserbik.twitter.query import TwitterQuery
def query(tq: TwitterQuery):
cur_query = tq.query(with_dates=False)
logger.debug(f"Obtainer Twitter Query Object with query 🔎 {cur_query}")
return rawquery(cur_query, tq.start_date, tq.end_date)
def iter_query(tq: TwitterQuery):
cur_query = tq.query(with_dates=False)
logger.debug(f"Obtainer Twitter Query Object with query 🔎 {cur_query}")
return iter_rawquery(cur_query, tq.end_date)
def rawquery(query: str,
start_date: str = arrow.get().format(SHORT_DATE_FORMAT),
end_date: str = arrow.get().shift(days=-15).format(SHORT_DATE_FORMAT)):
logger.debug("Converting dates from string")
init_date = arrow.get(start_date)
finish_date = arrow.get(end_date)
logger.info("🐦 Scrapping with:[%s] From 🗓️:[%s] ➡️ To 🗓️:[%s]" % (query, init_date.format('YYYY-MM-DD'),
finish_date.format('YYYY-MM-DD')))
# Create day urls
urls = __generate_search_url_by_range(query, init_date, finish_date)
stage_results = fetch_all(urls)
stage_results = aio.flat_map(_get_page_branches, stage_results, workers=MAX_WORKERS)
stage_results = th.flat_map(_get_branch_walk, stage_results, workers=MAX_WORKERS)
stage_results = th.flat_map(_read_statuses, stage_results, workers=MAX_WORKERS)
# results = list_no_dupes(stage_results)
results = list(stage_results)
logger.info(f"💬 Captured {len(results)}")
return results
def iter_rawquery(query: str, end_date: str = arrow.get().shift(days=-15).format(SHORT_DATE_FORMAT)):
# if we are iterating, start_date is "now"
start_date: str = arrow.get().format(SHORT_DATE_FORMAT)
# First call get everything until now
all_status_until_now = list_no_dupes(rawquery(query, start_date, end_date))
yield all_status_until_now
while True:
cur_date = arrow.get().format(SHORT_DATE_FORMAT)
cur_statuses = rawquery(query, cur_date, cur_date)
cur_new_statuses = not_in_list(all_status_until_now, cur_statuses)
logger.info(f"Found: {len(cur_statuses)} 💬")
all_status_until_now += cur_new_statuses
yield cur_new_statuses
```
#### File: leiserbik/twitter/core.py
```python
import copy
import json
import time
import urllib
import arrow
from arrow import Arrow
from bs4 import BeautifulSoup, Tag
from ratelimit import limits, sleep_and_retry
from requests import Session
from scalpl import Cut
from leiserbik import *
from leiserbik.borg import Kakfa
from leiserbik.twitter.query import TwitterQueryStatus
def not_in_list(l1, l2):
if l1 is None:
l1 = []
if l2 is None:
l2 = []
if l1 == [] and l2 != []:
return list_no_dupes(l2)
elif l1 != [] and l2 == []:
return list_no_dupes(l1)
elif l1 == [] and l2 == []:
return []
else:
# return list(set(l2) - set(l1))
return union_lists_no_dupes(l2, l1)
def __generate_search_url_by_day(query: str, date: Arrow):
"""
Returns a string with a url to ask twitter for a query in a day
:param query:str twitter advanced query string
:param date: date to query
:return: url for date
"""
search_url = '%s since:%s until:%s' % (query, date.format('YYYY-MM-DD'), date.shift(days=1).format('YYYY-MM-DD'))
search_url = 'https://mobile.twitter.com/search?q=' + urllib.parse.quote_plus(search_url)
logger.debug(f"Generated url: {search_url}")
return search_url
def _session_get_requests(url: str):
return __session_get_request(requests.Session(), url)
def __session_get_request(session: Session, url: str):
# session.headers.update({'User-Agent': GENERATED_USER_AGENT})
if 'HTTPS_PROXY' in globals():
session.proxies = {"http": '127.0.0.1:5566', "https": '127.0.0.1:5566'}
return session.get(url)
else:
session.proxies = {"http": None, "https": None}
return __session_get_rated_requests(**locals())
@sleep_and_retry
@limits(calls=50, period=60)
def __session_get_rated_requests(session: Session, url: str):
logger.trace(f"👮Rate limited GET request: {url}")
try:
response = session.get(url)
return response
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail on GET request - Retry on 30s: {url}")
time.sleep(10)
return session.get(url)
def __session_post_request(session: Session, url):
session.headers.update({'User-Agent': GENERATED_USER_AGENT})
if 'HTTPS_PROXY' in globals():
session.proxies = {"http": ROTATE_HTTP_PROXY, "https": ROTATE_HTTPS_PROXY}
return session.get(url)
else:
session.proxies = {"http": None, "https": None}
return __session_get_rated_requests(**locals())
@sleep_and_retry
@limits(calls=50, period=60)
def __session_post_rated_requests(session: Session, url: str):
logger.trace(f"👮Rate limited POST request: {url}")
return session.post(url)
def __get_statuses(decoded_content):
# return [f"https://mobile.twitter.com{path}" for path in REGEX_STATUS_LINK.findall(decoded_content)]
def _get_base_status(id: int):
status = Cut()
status['@data_source'] = 'https://mobile.twitter.com'
status['id'] = id
status['id_str'] = str(id)
status['@updated_at'] = arrow.utcnow().format(LONG_DATETIME_PATTERN) + "Z"
return status.data
statuses = []
statuses_links = list_no_dupes(REGEX_STATUS_LINK_VALUES.findall(decoded_content))
for x in statuses_links:
try:
if type(x) is list:
for y in x:
statuses += [_get_base_status(int(y))]
else:
statuses += [_get_base_status(int(x))]
except KeyboardInterrupt:
raise
except:
logger.warning(f"⚠️ Converting to integer: {x}")
return statuses
def __get_next_page(decoded_content, session, REGEX_COMPILED_PATTERN):
next_pages = [f"https://mobile.twitter.com{path}" for path in REGEX_COMPILED_PATTERN.findall(decoded_content)]
if len(next_pages) == 1:
logger.debug(f"Requesting: {next_pages[0]}")
res = __session_get_request(session, next_pages[0])
logger.debug(f"Request: {next_pages[0]} |{res.status_code}|")
if res.status_code == 200:
return res.content.decode('utf-8')
elif res.status_code == 429:
logger.warning(f"Request Rate Limit Exception: {next_pages[0]}")
time.sleep(30)
return __get_next_page(decoded_content, session, REGEX_COMPILED_PATTERN)
else:
return None
return None
def __generate_search_url_by_range(query: str, init_date: Arrow, finish_date: str = Arrow):
urls = []
cur_date = init_date
while cur_date >= finish_date:
cur_url = __generate_search_url_by_day(query, cur_date)
urls += [cur_url]
cur_date = cur_date.shift(days=-1)
return urls
def _get_page_branches(content):
def get_query_from_content(decode_content):
results = REGEX_GET_QUERY.findall(decode_content)
if len(results) == 1:
return results[0]
else:
return []
# twqstatus = TwitterQueryStatus()
try:
cur_decoded_content = content.decode('utf-8')
session = requests.Session()
except KeyboardInterrupt:
raise
except:
return []
data = []
branches = 1
query_from_content = get_query_from_content(cur_decoded_content)
while True:
# cur_statuses = __get_statuses(cur_decoded_content)
# new_statuses = not_in_list(twqstatus.get(query_from_content), cur_statuses)
data += [(cur_decoded_content, copy.deepcopy(session), branches, query_from_content)]
# cur_decoded_content = get_next_branch(cur_decoded_content , session)
cur_decoded_content = __get_next_page(cur_decoded_content, session, REGEX_UPDATE_LINK)
if cur_decoded_content is None:
break
else:
branches += 1
logger.debug(f"New Branch |{query_from_content}|, total branches ⌥: {branches}")
return data
def _get_user_statuses(user, max_id=0):
session = requests.Session()
branch = 0
query_from_content = user
if max_id > 0:
res = __session_get_request(session, f"https://mobile.twitter.com/{user}?max_id={max_id}")
else:
res = __session_get_request(session, f"https://mobile.twitter.com/{user}")
# res = session.get(f"https://mobile.twitter.com/{user}")
statuses = []
logger.debug(f"Requests: {res.url} |{res.status_code}|")
if res.status_code == 200:
cur_content = res.content.decode('utf-8')
# logger.info(cur_content)
else:
return statuses
# Do while emulation
while True:
cur_statuses = __get_statuses(cur_content)
if len(cur_statuses) == 0:
logger.debug(f"Statuses 💬 not Found 😅 |{user}|")
# nojs_post_url =REGEX_NOJS_ROUTER.findall(cur_content)[0].split('"')[0]
# logger.debug(f"POST Requests detected: {nojs_post_url}")
# cur_content = __session_post_request(session,nojs_post_url)
# if cur_content is None and type(cur_content) is bytes:
# cur_content = cur_content.decode('utf-8')
# logger.info(cur_content)
# cur_statuses_check = __get_statuses(cur_content)
# if len(cur_statuses_check) == 0:
# return statuses
return statuses
else:
statuses = list(set(cur_statuses + statuses))
logger.debug(f"Current content statuses found: {len(statuses)} 💬 |{user}|")
cur_content = __get_next_page(cur_content, session, REGEX_USER_NEXT_LINK)
if cur_content is None:
return statuses
def _get_branch_walk(params):
decoded_content = params[0]
session = params[1]
branch = params[2]
query_from_content = params[3]
contents = []
cur_content = decoded_content
twqstatus = TwitterQueryStatus()
# Do while emulation
while True:
contents += [cur_content]
cur_statuses = __get_statuses(cur_content)
try:
new_statuses = not_in_list(twqstatus.get(query_from_content), cur_statuses)
# except KeyboardInterrupt:
# raise
except:
new_statuses = []
logger.exception("🚨Error on content parameters, probably partial page download")
if len(cur_statuses) == 0:
logger.debug(f"💬 No more statuses found 😅 |{query_from_content} -- Branch: {branch}|")
return contents
elif len(new_statuses) == 0:
logger.debug(f"💬 No new statuses found 😅 |{query_from_content} -- Branch: {branch}|")
return contents
else:
logger.info(f"💬 {len(new_statuses)} new statuses found 👍 |{query_from_content} -- Branch: {branch}|")
twqstatus.append(query_from_content, cur_statuses)
cur_content = __get_next_page(cur_content, session, REGEX_NEXT_LINK)
if cur_content is None:
return contents
def _get_branch_statuses(params):
decoded_content = params[0]
session = params[1]
branch = params[2]
query_from_content = params[3]
statuses = []
cur_content = decoded_content
# Do while emulation
while True:
cur_statuses = __get_statuses(cur_content)
if len(cur_statuses) == 0:
logger.debug(f"Statuses 💬 not Found 😅 |{query_from_content} -- Branch: {branch}|")
return statuses
else:
statuses = list(set(cur_statuses + statuses))
logger.debug(
f"Current content statuses found: {len(statuses)} 💬 |{query_from_content} -- Branch: {branch}|")
cur_content = __get_next_page(cur_content, session, REGEX_NEXT_LINK)
if cur_content is None:
return statuses
def _get_status(id: int, session: Session = requests.Session()):
res = __session_get_request(session, f"https://mobile.twitter.com/twitter/status/{id}")
if res.status_code == 200:
return _read_statuses(res.content.decode('utf-8'))
elif res.status_code == 429:
time.sleep(10)
return _get_status(id, session)
else:
return None
def _read_statuses(content: str):
statuses_data = []
soup = BeautifulSoup(content, "html.parser")
statuses = soup.find_all('table', {"class": "tweet"})
if len(statuses) == 0:
statuses = soup.find_all('div', {"class": "js-tweet-text-container"})
for cur_tweet in statuses:
cur_statuses_data = __read_status(cur_tweet)
statuses_data += [cur_statuses_data]
return statuses_data
def _update_status_stats(status: dict):
"""Get from desktop web version stats about current status (RT and FAVs)
Arguments:
status {dict} -- Current status in scalpl object
Returns:
dict -- Updated stats status
"""
try:
# ! TODO Implement a better version in parallalel or async
cur_retweets, cur_favs = __update_status_stats(status['id'])
status['retweet_count'] = cur_retweets
status['favorite_count'] = cur_favs
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting RT and Favs from 🐦: {status['id']}")
return status
def __update_status_stats(id: int, session: Session = requests.Session()):
res = __session_get_request(session, f"https://twitter.com/twitter/status/{id}")
# Getting web standart version
if res.status_code == 200:
soup = BeautifulSoup(res.content.decode('utf-8'), "html.parser")
retweet_ele = soup.find('li', {"class": "js-stat-retweets"})
fav_ele = soup.find('li', {"class": "js-stat-favorites"})
if retweet_ele is not None:
cur_retweets = int(retweet_ele.find('a').find('strong').get_text())
else:
cur_retweets = 0
if fav_ele is not None:
cur_favorites = fav_ele.find('a').find('strong').get_text()
else:
cur_favorites = 0
logger.info(f"🔁 Retweets for {id} : {cur_retweets}")
logger.info(f"❤️ Favs for {id} : {cur_favorites}")
return cur_retweets, cur_favorites
elif res.status_code == 429:
time.sleep(10)
return __update_status_stats(id, session)
else:
return None
def __read_status(soup):
status = Cut()
if 'tombstone-tweet' in soup['class']:
# Dead twitter account reference
return status
cur_tweet_data = soup.find('div', {"class": "tweet-text"})
status['@data_source'] = 'https://mobile.twitter.com'
status['id'] = int(cur_tweet_data['data-id'])
status['id_str'] = str(status['id'])
status['@updated_at'] = arrow.utcnow().format(LONG_DATETIME_PATTERN) + "Z"
status['user'] = {}
try:
status['user.screen_name'] = soup.find('div', {"class": "username"}).get_text().replace('\n', '').strip()[
1:] # Omits @
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting screen_name from 🐦: {status['id']}")
status['user.screen_name'] = soup['href'].split('/')[1]
try:
status['user.name'] = soup.find('strong', {"class": "fullname"}).get_text()
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting fullname from 🐦: {status['id']}")
try:
cur_tweet_mentions = soup.find('a', {"class": "twitter-atreply"})
status['user_mentions'] = []
if cur_tweet_mentions is not None:
if type(cur_tweet_mentions) is Tag:
cur_tweet_mentions = [cur_tweet_mentions]
for cur_mention in cur_tweet_mentions:
# Example info
# {
# "id": 3001809246,
# "id_str": "3001809246",
# "name": "<NAME>",
# "screen_name": "rafamorenorojas"
# },
#
status['user_mentions'] += [{
'id': cur_mention['data-mentioned-user-id'],
'id_str': str(cur_mention['data-mentioned-user-id']),
'screen_name': cur_mention.get_text()[1:] # Omit @
}]
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting user_mentions from 🐦: {status['id']}")
try:
cur_tweet_text = cur_tweet_data.find('div', {"class": "dir-ltr"})
if cur_tweet_text is None:
cur_tweet_text = cur_tweet_data.get_text().lstrip()
else:
cur_tweet_text = cur_tweet_text.get_text().lstrip()
status['full_text'] = cur_tweet_text
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting full_text from 🐦: {status['id']}")
try:
cur_tweet_date = soup.find('td', {"class": "timestamp"}).find('a').get_text()
if "h" in cur_tweet_date and len(cur_tweet_date) < 4:
hours = int(re.findall("([0-9]{0,2})\s?h", cur_tweet_date)[0])
cur_tweet_date = arrow.get().shift(hours=-hours).format(LONG_DATETIME_PATTERN)
elif "m" in cur_tweet_date and len(cur_tweet_date) < 4:
minutes = int(re.findall("([0-9]{0,2})\s?m", cur_tweet_date)[0])
cur_tweet_date = arrow.get().shift(minutes=-minutes).format(LONG_DATETIME_PATTERN)
elif "s" in cur_tweet_date and len(cur_tweet_date) < 4:
hours = int(re.findall("([0-9]{0,2})\s?s", cur_tweet_date)[0])
cur_tweet_date = arrow.get().shift(hours=-hours).format(LONG_DATETIME_PATTERN)
elif len(cur_tweet_date) < 9:
# On current year tweets doesn't show a year in text
cur_tweet_date += arrow.get().format(" YY")
cur_tweet_date = arrow.get(cur_tweet_date, "MMM D YY").format(LONG_DATETIME_PATTERN)
else:
cur_tweet_date = arrow.get(cur_tweet_date, "D MMM YY").format(LONG_DATETIME_PATTERN)
status['created_at'] = cur_tweet_date.format(LONG_DATETIME_PATTERN) + "Z"
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting created_at from 🐦: {status['id']}")
try:
cur_tweet_hashtags = soup.find('a', {"class": "twitter-hashtag"})
status['hashtags'] = []
if cur_tweet_hashtags is not None:
if type(cur_tweet_hashtags) is Tag:
cur_tweet_hashtags = [cur_tweet_hashtags]
for cur_hashtag in cur_tweet_hashtags:
# "hashtags": [
# {
# "text": "Gastronom\u00eda"
# },
# {
# "text": "Andaluc\u00eda"
# }
# ],
status['hashtags'] += [{
'text': cur_hashtag.get_text()[1:] # Omits '#'
}]
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting hashtags from 🐦: {status['id']}")
try:
cur_tweet_urls = soup.find('a', {"class": "twitter_external_link"})
status['urls'] = []
if cur_tweet_urls is not None:
if type(cur_tweet_urls) is Tag:
cur_tweet_urls = [cur_tweet_urls]
for cur_url in cur_tweet_urls:
# "urls": [
# {
# "expanded_url": "https://sevilla.abc.es/gurme//reportajes-bares-y-restaurantes-cordoba/cordoba/rafael-moreno-rojas-director-la-catedra-gastronomia-andalucia-objetivo-darnos-conocer-cordoba/",
# "url": "https://t.co/5Qiiv6KR9w"
# }
# ],
status['urls'] += [{
'url': cur_url['href'],
'expanded_url': cur_url['data-expanded-url'] if 'data-expanded-url' in cur_url else None,
}]
except KeyboardInterrupt:
raise
except:
logger.warning(f"🚨 Fail getting external urls from 🐦: {status['id']}")
# return status.data
# return json.dumps(status.data, indent=4)
return status.data
def _send_kafka(cur_dict: dict, topic=None):
logger.info("Going to send kafka")
kafka = Kakfa()
cur_json = json.dumps(cur_dict, indent=4)
try:
if topic is None:
logger.debug(f"📧 Sending to Kafka [{kafka.topic}]: {cur_json} - {cur_dict['id_str']}")
future_requests = kafka.producer.send(kafka.topic, f'{cur_json}'.encode(), key=cur_dict['id_str'].encode())
else:
logger.debug(f"📧 Sending to Kafka [{topic}]: {cur_json} - {cur_dict['id_str']}")
future_requests = kafka.producer.send(topic, f'{cur_json}'.encode(), key=cur_dict['id_str'].encode())
future_response = future_requests.get(timeout=10)
except:
logger.exception(f"🚨 Error sending to Kafka: {cur_dict['id_str']}")
return cur_dict
```
#### File: leiserbik/tests/test_leiserbik.py
```python
import pytest
from click.testing import CliRunner
from leiserbik import watcher
from leiserbik import cli
#<EMAIL>
def test_user_by_id():
"""Test of user status by id
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
statuses = watcher.user_by_id('jday11on',14992604202)
assert len(statuses) == 1
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
help_result = runner.invoke(cli.main, ['--help'])
assert '--help' in help_result.output
assert 'Show this message and exit.' in help_result.output
```
|
{
"source": "jdayllon/tile-changer",
"score": 2
}
|
#### File: tile-changer/app/main.py
```python
from flask import Flask, Response, request, send_file
from werkzeug.wsgi import FileWrapper
from flask_caching import Cache
import requests
from loguru import logger
import os
import PIL.Image
from io import BytesIO
import uuid
import logging
####
def env_var_load(name, default_value):
if name in os.environ:
res = type(default_value) (os.environ[name])
logger.info(f"ENV '{name}' Found: {res}")
return res
else:
logger.warning(f"ENV '{name}' NOT Found: {default_value}")
return default_value
# Initial env prepare
DEBUG = env_var_load("DEBUG",True)
CACHE_TYPE = env_var_load("CACHE_TYPE",'simple')
CACHE_DEFAULT_TIMEOUT = env_var_load("CACHE_DEFAULT_TIMEOUT",300)
TARGET_HOST = env_var_load('TARGET_HOST','')
config = {
"DEBUG": DEBUG, # some Flask specific configs
"CACHE_TYPE": CACHE_TYPE, # Flask-Caching related configs
"CACHE_DEFAULT_TIMEOUT": CACHE_DEFAULT_TIMEOUT
}
if CACHE_TYPE == 'redis':
config['CACHE_REDIS_HOST'] = env_var_load("CACHE_REDIS_HOST", 'localhost')
config['CACHE_REDIS_PORT'] = env_var_load("CACHE_REDIS_PORT", 6379)
config['CACHE_REDIS_PASSWORD'] = env_var_load("CACHE_REDIS_PASSWORD",'')
config['CACHE_REDIS_DB'] = env_var_load("CACHE_REDIS_DB",'')
app = Flask(__name__)
app.config.from_mapping(config)
cache = Cache(app)
####
# Partial disable Requests logging
logging.getLogger("requests").setLevel(logging.WARNING)
# Based on https://stackoverflow.com/a/10170635
# https://www.pythonanywhere.com/forums/topic/13570/
def serve_pil_image(pil_img, mimetype):
img_io = BytesIO()
pil_img.save(img_io, mimetype.split("/")[1].upper())
img_io.seek(0)
# Original response from Github uses send_file method
# but I have problems with pickle and uwsgi, changed send_file
# for direct uwsgi filewraper and Response from Flask
# --> return send_file(img_io, mimetype=mimetype)
w = FileWrapper(img_io)
return Response(w, mimetype=mimetype, direct_passthrough=True)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
@cache.cached(timeout=24*60*60, query_string=True)
def proxy(path):
current_process = uuid.uuid1()
if isinstance(request.query_string, str):
target_url = f"{TARGET_HOST}{path}?{request.query_string}"
else:
target_url = f"{TARGET_HOST}{path}?{request.query_string.decode()}"
logger.info(f"|{current_process}| Requesting : {target_url}")
res = requests.get(target_url)
logger.info(f"|{current_process}| Requested: {res.status_code}")
original_mimetype = res.headers['Content-type'].split(";")[0]
original_type_of_res = original_mimetype.split("/")[0]
logger.info(f"|{current_process}| Response mimetype: {original_mimetype}")
if original_type_of_res != "image" or "VND.MICROSOFT.ICON" in original_mimetype.upper():
logger.warning(f"|{current_process}| Content non-supported")
return (f'Non-Supported Content Type: {original_mimetype}', 204)
image_stream = BytesIO(res.content)
image = PIL.Image.open(image_stream)
image_bw = image.convert('LA')
logger.info(f"|{current_process}| Image transformed, sending...")
return serve_pil_image(image_bw, original_mimetype)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
```
|
{
"source": "jdaymude/AttractionRepulsionModel",
"score": 3
}
|
#### File: jdaymude/AttractionRepulsionModel/arm.py
```python
import math
import numpy as np
from tqdm import trange
def arm(N=100, D=1, E=[0.1], T=0.25, R=0.25, K=math.inf, S=500000, P=0, \
shock=(None, None), init='norm', seed=None, silent=False):
"""
Execute a simulation of the Attraction-Repulsion Model.
Inputs:
N (int): number of agents
D (int): number of ideological dimensions
E ([float]): list of exposures
T (float): tolerance
R (float): responsiveness
K (float): steepness of stochastic attraction-repulsion
S (int): number of steps to simulate
P (float): self-interest probability
shock ((float, float)): external shock step and strength
init (str): 'norm' for Gaussian normal initialization, 'emp' for empirical
seed (int): random seed
silent (bool): True if progress should be shown on command line
Returns (init_config, config, history):
init_config: N x D array of initial agent ideological positions
config: N x D array of agent ideological positions after S steps
history: S x (D + 2) array detailing interaction history
"""
# Initialize the random number generation.
rng = np.random.default_rng(seed)
# Initialize the agent population and their initial ideological positions.
if init == 'norm':
if D == 1:
config = np.zeros(N)
for i in np.arange(N):
while True:
config[i] = rng.normal(0.5, 0.2)
if 0 <= config[i] and config[i] <= 1:
break
config = config.reshape(-1, 1)
else: # Higher dimensions.
means, covs = 0.5 + np.zeros(D), 0.04 * np.eye(D)
config = np.zeros((N, D))
for i in np.arange(N):
while True:
config[i] = rng.multivariate_normal(means, covs)
clip = np.maximum(np.zeros(D), np.minimum(np.ones(D), config[i]))
if np.allclose(config[i], clip):
break
else: # Empirical initialization.
assert D == 1, 'ERROR: CCES 2020 data is 1-dimensional'
with open('CCES_2020_dist.npy', 'rb') as f:
emp = np.load(f)
vals, probs = emp[0], emp[1]
config = rng.choice(vals, N, p=probs) + (0.005 * rng.random(N) - 0.0025)
config = config.reshape(-1, 1)
init_config = np.copy(config)
# Create an S x (D + 2) array to store the interaction history. Each step i
# records the active agent [i][0], the passive agent [i][1], and the active
# agent's new position [i][2:].
history = np.zeros((S, D + 2))
# Simulate the desired number of pairwise interactions.
for step in trange(S, desc='Simulating interactions', disable=silent):
# Perform the external shock intervention, if specified.
shock_step, shock_strength = shock
if shock_step is not None:
assert D == 1, 'ERROR: External shock requires 1D'
if step >= shock_step and step < shock_step + N:
i = step - shock_step
config[i] = np.minimum(np.ones(D), config[i] + shock_strength)
history[step] = np.concatenate(([i], [i], config[i]))
continue
# Choose the active agent u.a.r.
i = rng.integers(N)
# Perform self-interest intervention, if specified.
if P > 0 and rng.random() < P:
config[i] = config[i] + R * (init_config[i] - config[i])
history[step] = np.concatenate(([i], [i], config[i]))
continue
# Interaction Rule: interact with probability (1/2)^delta, where delta
# is the decay based on the agents' distance, scaled by the exposures
# for each dimension.
j = rng.choice(np.delete(np.arange(N), i))
delta = math.sqrt(sum([(config[i][k] - config[j][k])**2 / \
E[k]**2 for k in range(D)]))
if rng.random() <= math.pow(0.5, delta):
dist = np.linalg.norm(config[i] - config[j])
if K == math.inf:
# The Attraction-Repulsion rule of opinion change.
if dist <= T:
# Attraction: agent i moves toward agent j.
config[i] = config[i] + R * (config[j] - config[i])
else:
# Repulsion: agent i moves away from agent j.
config[i] = config[i] - R * (config[j] - config[i])
elif dist > 0:
# The Stochastic Attraction-Repulsion rule of opinion change.
rep_prob = 1/(1 + np.power((D**0.5/dist - 1)/(D**0.5/T - 1), K))
if rng.random() >= rep_prob:
# Attraction: agent i moves toward agent j.
config[i] = config[i] + R * (config[j] - config[i])
else:
# Repulsion: agent i moves away from agent j.
config[i] = config[i] - R * (config[j] - config[i])
# Clip to the limits of ideological space.
config[i] = np.maximum(np.zeros(D), np.minimum(D, config[i]))
history[step] = np.concatenate(([i], [j], config[i]))
else: # No interaction.
history[step] = np.concatenate(([i], [i], config[i]))
return init_config, config, history
```
|
{
"source": "jdayton3/ch-pipeline",
"score": 2
}
|
#### File: jdayton3/ch-pipeline/remove_unplaced_multiallelic.py
```python
import gzip
import re
import os
import time
from sys import argv
import concurrent.futures
startTime = time.time()
char = '\n' + ('*' * 70) + '\n'
#Input file or list of files
inputFile = argv[1]
#Create a list of file(s) that need to have unplaced and multiallelic sites removed
fileSet = set()
if inputFile.endswith(".gz"):
fileSet.add(inputFile)
elif inputFile.endswith(".txt"):
with open(inputFile) as sampleFile:
for sample in sampleFile:
sample = sample.rstrip("\n")
fileSet.add(sample)
elif inputFile.endswith(".tsv"):
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
fileName = sampleData[fileNameIndex]
sampleFamilyId = sampleData[familyIdIndex]
shortName = re.findall(r"([\w\-/]+)\.?.*\.?.*\.gz", fileName)[0]
individualFileName = "{}_test/{}_liftover.vcf.gz".format(sampleFamilyId, shortName)
trioFileName = "{}_test/{}_liftover.vcf.gz".format(sampleFamilyId, sampleFamilyId)
fileSet.add(individualFileName)
fileSet.add(trioFileName)
#Remove Unplaced sites and multiallelic sites
chrToKeep = {"chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13",\
"chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"}
filesToRemoveDuplicates = []
def removeSites(file):
fileName = re.findall(r"([\w\-/]+)_liftover\.?.*\.?.*\.gz", file)[0]
outputName = "{}_no_ambiguous_sites.vcf".format(fileName)
with gzip.open(file, "rt") as inputFile, open(outputName, "wt") as outFile:
for line in inputFile:
if line.startswith("#") and "##contig=<ID=" not in line:
outFile.write(line)
elif line.startswith("#") and "##contig=<ID=" in line:
splitLine = line.split(",")
chr = splitLine[0].replace("##contig=<ID=", "")
if chr in chrToKeep:
outFile.write(line)
else:
splitLine = line.split("\t")
if splitLine[0] in chrToKeep and "," not in splitLine[4]:
outFile.write(line)
os.system("bgzip {}".format(outputName))
return("{}.gz".format(outputName))
with concurrent.futures.ProcessPoolExecutor(max_workers=24) as executor:
fileName = executor.map(removeSites, fileSet)
for file in fileName:
filesToRemoveDuplicates.append(file)
#Output message and time complete
timeElapsedMinutes = round((time.time()-startTime) / 60, 2)
timeElapsedHours = round(timeElapsedMinutes / 60, 2)
print('{}Unplaced and multiallelic sites have been removed. Time elapsed: {} minutes ({} hours){}'.format(char, timeElapsedMinutes, timeElapsedHours, char))
#Remove all duplicate sites
def removeDuplicates(file):
fileName = re.findall(r"([\w\-/]+)_no_ambiguous_sites\.?.*\.?.*\.gz", file)[0]
outputName = "{}_liftover_parsed.vcf".format(fileName)
duplicateFile = "{}_removedDuplicates.vcf".format(fileName)
lines = [["NA"], ["NA"], ["NA"], ["NA"], ["NA"], ["NA"], ["NA"], ["NA"], ["NA"], ["NA"]]
dupList = []
with gzip.open(file, "rt") as inputFile:
for line in inputFile:
if not line.startswith("#"):
line = line.split("\t")
line = line[0:2] + line[3:5]
if line not in lines:
lines.append(line)
lines = lines[-10:]
else:
dupList.append(line)
with gzip.open(file, "rt") as inputFile, open(outputName, "wt") as outFile, open(duplicateFile, "w") as duplicates:
for line in inputFile:
if not line.startswith("#"):
splitLine = line.split("\t")
splitLine = splitLine[0:2] + splitLine[3:5]
if splitLine not in dupList:
outFile.write(line)
else:
duplicates.write(line)
else:
outFile.write(line)
duplicates.write(line)
os.system("bgzip {}".format(outputName))
with concurrent.futures.ProcessPoolExecutor(max_workers=24) as executor:
executor.map(removeDuplicates, filesToRemoveDuplicates)
timeElapsedMinutes = round((time.time()-startTime) / 60, 2)
timeElapsedHours = round(timeElapsedMinutes / 60, 2)
print('{}Duplicate sites removed. Time elapsed: {} minutes ({} hours){}'.format(char, timeElapsedMinutes, timeElapsedHours, char))
```
|
{
"source": "jdayton3/dpu-utils",
"score": 3
}
|
#### File: dpu_utils/mlutils/chartensorizer.py
```python
import numpy as np
from typing import Optional
__all__ = ['CharTensorizer']
class CharTensorizer:
"""Tensorize strings into characters"""
def __init__(self, max_num_chars: Optional[int], lower_case_all: bool, include_space: bool):
self.__max_num_chars = max_num_chars
self.__lower_case_all = lower_case_all
self.__ALPHABET = "abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
if not self.__lower_case_all:
self.__ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + self.__ALPHABET
if include_space:
self.__ALPHABET += ' '
self.__ALPHABET_DICT = {char: idx + 2 for (idx, char) in enumerate(self.__ALPHABET)} # "0" is PAD, "1" is UNK
self.__ALPHABET_DICT['PAD'] = 0
self.__ALPHABET_DICT['UNK'] = 1
@property
def max_char_length(self)-> Optional[int]:
return self.__max_num_chars
def num_chars_in_vocabulary(self)-> int:
return len(self.__ALPHABET)
def __get_char_idx(self, character: str) -> int:
idx = self.__ALPHABET_DICT.get(character)
if idx is not None:
return idx
return self.__ALPHABET_DICT['UNK']
def get_word_from_ids(self, ids: np.ndarray)-> str:
return ''.join(self.__ALPHABET[i] if i!=1 else '<UNK>' for i in ids)
def tensorize_str(self, input: str)-> np.ndarray:
if self.__lower_case_all:
input = input.lower()
def char_iterator():
for i, c in enumerate(input):
if self.__max_num_chars is not None and i >= self.__max_num_chars:
break
yield self.__get_char_idx(c)
if self.__max_num_chars is not None and len(input) < self.__max_num_chars:
pad_id = self.__get_char_idx('PAD')
yield from (pad_id for _ in range(self.__max_num_chars - len(input)))
return np.fromiter(char_iterator(), dtype=np.uint8)
```
#### File: dpu_utils/tfutils/unsortedsegmentops.py
```python
import tensorflow as tf
def unsorted_segment_logsumexp(scores, segment_ids, num_segments):
"""Perform an unsorted segment safe logsumexp."""
# Note: if a segment is empty, the smallest value for the score will be returned,
# which yields the correct behavior
max_per_segment = tf.unsorted_segment_max(data=scores,
segment_ids=segment_ids,
num_segments=num_segments)
scattered_log_maxes = tf.gather(params=max_per_segment,
indices=segment_ids)
recentered_scores = scores - scattered_log_maxes
exped_recentered_scores = tf.exp(recentered_scores)
per_segment_sums = tf.unsorted_segment_sum(exped_recentered_scores, segment_ids, num_segments)
per_segment_logs = tf.log(per_segment_sums)
return per_segment_logs + max_per_segment
def unsorted_segment_log_softmax(logits, segment_ids, num_segments):
"""Perform an unsorted segment safe log_softmax."""
# Note: if a segment is empty, the smallest value for the score will be returned,
# which yields the correct behavior
max_per_segment = tf.unsorted_segment_max(data=logits,
segment_ids=segment_ids,
num_segments=num_segments)
scattered_maxes = tf.gather(params=max_per_segment,
indices=segment_ids)
recentered_scores = logits - scattered_maxes
exped_recentered_scores = tf.exp(recentered_scores)
per_segment_sums = tf.unsorted_segment_sum(exped_recentered_scores, segment_ids, num_segments)
per_segment_normalization_consts = tf.log(per_segment_sums)
log_probs = recentered_scores - tf.gather(params=per_segment_normalization_consts, indices=segment_ids)
return log_probs
```
#### File: dpu_utils/utils/debughelper.py
```python
import sys
import traceback
import pdb
from typing import Callable
__all__= ['run_and_debug']
def run_and_debug(func: Callable[[], None], enable_debugging: bool)-> None:
try:
func()
except:
if enable_debugging:
_, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
else:
raise
```
|
{
"source": "jdayton3/Geney",
"score": 2
}
|
#### File: Geney/server/failsafe-app.py
```python
from flask_failsafe import failsafe
# this file is purely for development, so you can still reload the python server when it's running inside a docker container
@failsafe
def create_app():
from app import app
return app
if __name__ == '__main__':
create_app().run(debug=True, host='0.0.0.0', port=8889)
```
|
{
"source": "Jdban/orchestra",
"score": 2
}
|
#### File: orchestra/dags/dv360_create_sdf_advertisers_report_dag.py
```python
import sys
sys.path.append("..")
from datetime import datetime
from datetime import timedelta
from airflow import DAG
from airflow import models
from operators.dv360 import dv360_create_query_by_json_operator
import json
def yesterday():
return datetime.today() - timedelta(days=1)
default_args = {
"owner": "airflow",
"start_date": yesterday(),
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(seconds=10),
}
conn_id = "gmp_reporting"
dag_name = "dv360_create_sdf_advertisers_report_dag"
output_var = "dv360_sdf_advertisers_report_id"
body = {
"kind": "doubleclickbidmanager#query",
"metadata": {
"title": "Advertiser IDs",
"dataRange": "LAST_30_DAYS",
"format": "CSV",
"running": False,
"reportCount": 0,
"googleCloudStoragePathForLatestReport": "",
"latestReportRunTimeMs": "0",
"googleDrivePathForLatestReport": "",
"sendNotification": False
},
"params": {
"type": "TYPE_GENERAL",
"groupBys": ["FILTER_ADVERTISER", "FILTER_PARTNER"],
"filters": [],
"metrics": ["METRIC_IMPRESSIONS"],
"includeInviteData": True
},
"schedule": {
"frequency": "DAILY",
"endTimeMs": "1861873200000",
"nextRunMinuteOfDay": 0,
"nextRunTimezoneCode": "Europe/London"
},
"timezoneCode": "Europe/London"
}
partner_ids = models.Variable.get("partner_ids").split(",")
# Add partner ID filters using partner_id variable
for partner_id in partner_ids:
body.get("params").get("filters").append({
"type": "FILTER_PARTNER",
"value": partner_id
})
body = json.dumps(body)
dag = DAG(
dag_name, catchup=False, default_args=default_args, schedule_interval=None)
create_query_task = dv360_create_query_by_json_operator.DV360CreateQueryOperator(
task_id="create_dv360_report",
conn_id=conn_id,
depends_on_past=False,
body=body,
output_var=output_var,
dag=dag)
```
#### File: orchestra/utils/download_and_transform_erf.py
```python
import json
import os
from random import randint
import tempfile
import time
from hooks.gcs_hook import GoogleCloudStorageHook
def json_to_jsonlines(json_file):
"""Naive Implentation for json to jsonlines.
Args:
json_file: A string, the file name of the json file.
Returns:
The input json file as new line delimited json.
"""
with open(json_file) as f:
data = json.load(f)
return '\n'.join([json.dumps(d) for d in data])
def download_and_transform_erf(self, partner_id=None):
"""Load and Transform ERF files to Newline Delimeted JSON.
Then upload this file to the project GCS.
Args:
self: The operator this is being used in.
partner_id: A string of the DCM id of the partner.
Returns:
entity_read_file_ndj: The filename for the converted entity read file.
"""
if partner_id:
self.erf_bucket = 'gdbm-%s' % partner_id
else:
self.erf_bucket = 'gdbm-public'
gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.conn_id)
entity_read_file = tempfile.NamedTemporaryFile(delete=False)
gcs_hook.download(self.erf_bucket, self.erf_object, entity_read_file.name)
temp_file = None
# Creating temp file. Not using the delete-on-close functionality
# as opening the file for reading while still open for writing
# will not work on all platform
# https://docs.python.org/2/library/tempfile.html#tempfile.NamedTemporaryFile
try:
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.writelines(json_to_jsonlines(entity_read_file.name))
temp_file.close()
# Random here used as a nonce for writing multiple files at once.
filename = '%s_%s_%d.json' % (randint(1, 1000000), self.entity_type,
time.time() * 1e+9)
gcs_hook.upload(self.gcs_bucket, filename, temp_file.name)
finally:
if temp_file:
temp_file.close()
os.unlink(temp_file.name)
return filename
```
|
{
"source": "jdbartee/crafting_interpreters",
"score": 3
}
|
#### File: crafting_interpreters/src/ast_printer.py
```python
from expressions import Binary, Unary, Literal, Grouping
import tokens
class AstPrinter:
def to_string(self, expr):
s = expr.accept(self)
return s
def sexp(self, car, *cdr):
s = f"({car}"
for expr in cdr:
s += " "
s += self.to_string(expr)
s += ")"
return s
def visit_binary_expr(self, expr: Binary):
return self.sexp(expr.operator.lexeme, expr.left, expr.right)
def visit_grouping_expr(self, expr: Grouping):
return self.sexp("group", expr.expression)
def visit_literal_expr(self, expr: Literal):
return str(expr.value)
def visit_unary_expr(self, expr: Unary):
return self.sexp(expr.operator.lexeme, expr.right)
if __name__ == "__main__":
expression = Binary(
left=Unary(
operator=tokens.Token(tokens.MINUS, "-", "-", 1),
right=Literal(value="123")),
operator=tokens.Token(tokens.STAR, "*", "*", 1),
right=Grouping(
expression=Literal("45.67")))
print(AstPrinter().to_string(expression))
```
#### File: crafting_interpreters/src/environment.py
```python
from errors import LoxRuntimeError
class Environment:
def __init__(self, parent=None):
self.parent: Environment = parent
self.values = {}
def ancestor(self, distance):
e = self
for i in range(0, distance):
e = e.parent
return e
def define(self, name, value):
self.values[name.lexeme] = value
def get(self, name, fallback=True):
if name.lexeme in self.values:
return self.values[name.lexeme]
if fallback and self.parent is not None:
return self.parent.get(name)
raise LoxRuntimeError(name, f"Undefined Variable '{name.lexeme}'.")
def get_at(self, distance, name):
return self.ancestor(distance).get(name, fallback=False)
def assign(self, name, value, fallback=True):
if name.lexeme in self.values:
self.values[name.lexeme] = value
return value
if fallback and self.parent is not None:
return self.parent.assign(name, value)
raise LoxRuntimeError(name, f"Undefined Variable '{name.lexeme}'.")
def assign_at(self, distance, name, value):
self.ancestor(distance).assign(name, value, fallback=False)
```
#### File: crafting_interpreters/src/interpreter.py
```python
import tokens
from environment import Environment
from errors import LoxRuntimeError, Return
import statements as Stmt
import expressions as Expr
from lox_function import LoxFunction
from lox_class import LoxClass, LoxInstance
class CLOCK:
@staticmethod
def arity():
return 0
@staticmethod
def call(interpreter, arguments):
import datetime
return float(datetime.datetime.now().timestamp())
@staticmethod
def to_string():
return "<native fn clock>"
class PRINT:
@staticmethod
def arity():
return 1
@staticmethod
def call(interpreter, arguments):
value = arguments[0]
if hasattr(value, "to_string") and callable(value.to_string):
value = value.to_string()
print(value)
return None
def to_string(self):
return "<native fn print>"
class Interpreter:
def __init__(self, report):
self.report = report
self.global_env = Environment()
self.environment = self.global_env
self.locals = {}
self.global_env.define(tokens.Token(tokens.IDENTIFIER, "clock", "clock", -1), CLOCK)
self.global_env.define(tokens.Token(tokens.IDENTIFIER, "print", "print", -1), PRINT)
def interpret(self, stmts: [Stmt.Stmt]):
try:
for stmt in stmts:
self.execute(stmt)
except LoxRuntimeError as ir:
self.report(ir)
def execute(self, stmt: Stmt.Stmt):
return stmt.accept(self)
def evaluate(self, expr: Expr.Expr):
value = expr.accept(self)
return value
def lookup_variable(self, name: tokens.Token, expression: Expr):
distance = self.locals.get(expression)
value = None
if distance is not None:
value = self.environment.get_at(distance, name)
else:
value = self.global_env.get(name)
return value
def visit_block_stmt(self, stmt: Stmt.Block):
return self.execute_block(
stmt.statements,
Environment(self.environment))
def execute_block(self, stmts: [Stmt], env: Environment):
prev_environment = self.environment
try:
self.environment = env
for stmt in stmts:
self.execute(stmt)
finally:
self.environment = prev_environment
return None
def visit_class_stmt(self, stmt: Stmt.Class):
superclass = None
if stmt.superclass is not None:
superclass = self.evaluate(stmt.superclass)
if not isinstance(superclass, LoxClass):
raise LoxRuntimeError(stmt.superclass.name, "Superclass must be a class.")
self.environment.define(stmt.name, None)
if stmt.superclass is not None:
self.environment = Environment(self.environment)
self.environment.define(tokens.Token(tokens.SUPER, 'super', 'super', -1), superclass)
methods = {}
for method in stmt.methods:
function = LoxFunction(method, self.environment, is_initializer=method.name.lexeme == "init")
methods[method.name.lexeme] = function
klass = LoxClass(stmt.name, superclass, methods)
if superclass is not None:
self.environment = self.environment.parent
self.environment.assign(stmt.name, klass)
return None
def resolve(self, expr: Expr, depth: int):
self.locals[expr] = depth
def visit_print_stmt(self, stmt: Stmt.Print):
value = self.evaluate(stmt.expression)
if hasattr(value, "to_string") and callable(value.to_string):
value = value.to_string()
print(value)
return None
def visit_expression_stmt(self, stmt: Stmt.Expression):
self.evaluate(stmt.expression)
return None
def visit_if_stmt(self, stmt: Stmt.If):
if self.is_truthy(self.evaluate(stmt.condition)):
self.execute(stmt.then_branch)
elif stmt.else_branch is not None:
self.execute(stmt.else_branch)
return None
def visit_var_stmt(self, stmt: Stmt.Var):
value = None
if stmt.initializer is not None:
value = self.evaluate(stmt.initializer)
self.environment.define(stmt.name, value)
def visit_while_stmt(self, stmt: Stmt.While):
while self.is_truthy(self.evaluate(stmt.condition)):
self.execute(stmt.body)
return None
def visit_function_stmt(self, stmt: Stmt.Function):
function = LoxFunction(stmt, self.environment)
self.environment.define(stmt.name, function)
return None
def visit_return_stmt(self, stmt: Stmt.Return):
value = None
if stmt.value is not None:
value = self.evaluate(stmt.value)
raise Return(value)
def visit_binary_expr(self, expr: Expr.Binary):
left = self.evaluate(expr.left)
right = self.evaluate(expr.right)
if expr.operator.token_type == tokens.MINUS:
self.check_numeric_operands(expr.operator, left, right)
return left - right
if expr.operator.token_type == tokens.PLUS:
if type(left) != type(right):
raise LoxRuntimeError(
expr.operator,
"Operands must both be either strings or numbers")
if type(left) is not str and type(left) is not float:
raise LoxRuntimeError(
expr.operator,
"Operands must both be either strings or numbers")
return left + right
if expr.operator.token_type == tokens.STAR:
self.check_numeric_operands(expr.operator, left, right)
return left * right
if expr.operator.token_type == tokens.SLASH:
self.check_numeric_operands(expr.operator, left, right)
return left / right
if expr.operator.token_type == tokens.GREATER:
self.check_numeric_operands(expr.operator, left, right)
return left > right
if expr.operator.token_type == tokens.GREATER_EQUAL:
self.check_numeric_operands(expr.operator, left, right)
return left >= right
if expr.operator.token_type == tokens.LESS:
self.check_numeric_operands(expr.operator, left, right)
return left < right
if expr.operator.token_type == tokens.LESS_EQUAL:
self.check_numeric_operands(expr.operator, left, right)
return left <= right
if expr.operator.token_type == tokens.EQUAL_EQUAL:
return self.is_equal(left, right)
if expr.operator.token_type == tokens.BANG_EQUAL:
return not self.is_equal(left, right)
def visit_grouping_expr(self, expr: Expr.Grouping):
return self.evaluate(expr.expression)
def visit_literal_expr(self, expr: Expr.Literal):
return expr.value
def visit_unary_expr(self, expr: Expr.Unary):
right = self.evaluate(expr.right)
if expr.operator.token_type == tokens.MINUS:
self.check_numeric_operands(expr.operator, right)
return -right
if expr.operator.token_type == tokens.BANG:
return not self.is_truthy(right)
return None
def visit_assign_expr(self, expr: Expr.Assign):
value = self.evaluate(expr.value)
distance = self.locals.get(expr)
if distance is not None:
self.environment.assign_at(distance, expr, value)
else:
self.global_env.assign(expr, value)
return value
def visit_variable_expr(self, expr: Expr.Variable):
value = self.lookup_variable(expr.name, expr)
return value
def visit_logical_expr(self, expr: Expr.Logical):
left = self.evaluate(expr.left)
if expr.operator.token_type == tokens.OR:
if self.is_truthy(left):
return left
else:
if not self.is_truthy(left):
return left
return self.evaluate(expr.right)
def visit_call_expr(self, expr: Expr.Call):
callee = self.evaluate(expr.callee)
arguments = []
for arg in expr.arguments:
arguments.append(self.evaluate(arg))
if not hasattr(callee, "call") or not callable(callee.call):
raise LoxRuntimeError(expr.paren, "Can only call functions and classes.")
if len(arguments) != callee.arity():
raise LoxRuntimeError(expr.paren, "Wrong number of arguments.")
return callee.call(self, arguments)
def visit_get_expr(self, expr: Expr.Get):
obj = self.evaluate(expr.object)
if isinstance(obj, LoxInstance):
return obj.get(expr.name)
raise LoxRuntimeError(expr.name, "Only instances have properties")
def visit_set_expr(self, expr: Expr.Set):
obj = self.evaluate(expr.object)
if not isinstance(obj, LoxInstance):
raise LoxRuntimeError(expr.name, "Only instances have properties")
value = self.evaluate(expr.value)
obj.set(expr.name, value)
def visit_this_expr(self, expr: Expr.This):
return self.lookup_variable(expr.keyword, expr)
def visit_super_expr(self, expr: Expr.Super):
distance = self.locals.get(expr)
superclass = self.environment.get_at(distance, expr.keyword)
obj = self.environment.get_at(distance - 1, tokens.Token(tokens.THIS, 'this', 'this', -1))
method = superclass.find_method(expr.method.lexeme)
if method is None:
raise LoxRuntimeError(expr.method, f"Undefined property {expr.method.lexeme}.")
return method.bind(obj)
def is_truthy(self, value):
return value is not None and value is not False
def is_equal(self, left, right):
return left == right
def check_numeric_operands(self, operator, *operands):
for operand in operands:
if type(operand) is not float:
raise LoxRuntimeError(operator, "Operand must be a number.")
```
#### File: crafting_interpreters/src/lox_class.py
```python
from errors import LoxRuntimeError
from lox_function import LoxFunction
from tokens import Token
class LoxClass:
def __init__(self, name: Token, superclass, methods):
self.name = name
self.superclass = superclass
self.methods = methods
def to_string(self):
return self.name.lexeme
def call(self, interpreter, arguments):
instance = LoxInstance(self)
initializer = self.find_method('init')
if initializer is not None:
initializer.bind(instance).call(interpreter, arguments)
return instance
def arity(self):
initializer = self.find_method('init')
if initializer is not None:
return initializer.arity()
return 0
def find_method(self, name):
method = self.methods.get(name)
if method is None and self.superclass is not None:
return self.superclass.find_method(name)
return method
class LoxInstance:
def __init__(self, klass: LoxClass):
self.klass = klass
self.fields = {}
def to_string(self):
return f"{self.klass.to_string()} instance"
def get(self, name: Token):
if name.lexeme in self.fields:
return self.fields[name.lexeme]
method = self.klass.find_method(name.lexeme)
if method is not None:
return method.bind(self)
raise LoxRuntimeError(name, f"Undefined property {name.lexeme}.")
def set(self, name, value):
self.fields[name.lexeme] = value
```
#### File: crafting_interpreters/src/lox_function.py
```python
import statements as Stmt
from environment import Environment
from errors import Return
from tokens import Token, THIS
this_token = Token(THIS, "this", "this", -1)
class LoxFunction:
def __init__(self, declaration: Stmt.Function, closure: Environment, is_initializer=False):
self.closure = closure
self.declaration = declaration
self.is_initializer = is_initializer
def call(self, interpreter, arguments):
env = Environment(self.closure)
for i, param in enumerate(self.declaration.params):
arg = arguments[i]
env.define(param, arg)
try:
interpreter.execute_block(self.declaration.body, env)
except Return as return_value:
if self.is_initializer:
return self.closure.get_at(0, this_token)
return return_value.value
if self.is_initializer: return self.closure.get_at(0, this_token)
return None
def arity(self):
return len(self.declaration.params)
def to_string(self):
return f"<fn {self.declaration.name.lexeme}>"
def bind(self, instance):
env = Environment(self.closure)
env.define(this_token, instance)
return LoxFunction(self.declaration, env, is_initializer=self.is_initializer)
```
|
{
"source": "jd-bartlett96/PeriPy",
"score": 2
}
|
#### File: peripy/examples/run.py
```python
import argparse
import ast
import os
import sys
import subprocess
HERE = os.path.dirname(__file__) or '.'
def _exec_file(filename):
ns = {'__name__': '__main__', '__file__': filename}
co = compile(open(filename, 'rb').read(), filename, 'exec')
exec(co, ns)
def _extract_full_doc(filename):
p = ast.parse(open(filename, 'rb').read())
return ast.get_docstring(p)
def _extract_short_doc(dirname, fname):
return open(os.path.join(dirname, fname)).readline()[3:].strip()
def _get_module(fname):
start = fname
parts = ['peripy.examples']
while os.path.dirname(start) != '':
dirname, start = os.path.split(start)
parts.append(dirname)
return '.'.join(parts + [start[:-3]])
def example_info(module, filename):
"""Print example information."""
print("Information for example: %s" % module)
print(_extract_full_doc(filename))
def get_all_examples():
"""Get all of the examples."""
basedir = HERE
examples = []
_ignore = [['run.py'],
['example3'],
['example3', 'profiling'],
['example3', 'damage models'],
['example3', 'SS'],
['example3', 'profiling', 'txt'],
['example4']]
ignore = [os.path.abspath(os.path.join(basedir, *pth))
for pth in _ignore]
for dirpath, dirs, files in os.walk(basedir):
rel_dir = os.path.relpath(dirpath, basedir)
if rel_dir == '.':
rel_dir = ''
py_files = [x for x in files
if x.endswith('.py') and not x.startswith('_')]
data = []
for f in py_files:
path = os.path.join(rel_dir, f)
full_path = os.path.join(basedir, path)
dirname = os.path.dirname(full_path)
full_dirname = os.path.join(basedir, dirname)
if ((os.path.abspath(full_path) in ignore)
or (os.path.abspath(full_dirname) in ignore)):
continue
module = _get_module(path)
doc = _extract_short_doc(dirpath, f)
data.append((module, doc))
examples.extend(data)
return examples
def get_input(prompt):
"""Get input (python 3 required)."""
return input(prompt)
def get_path(module):
"""Return the path to the module filename given the module."""
x = module[len('peripy.examples.'):].split('.')
x[-1] = x[-1] + '.py'
return os.path.join(HERE, *x)
def guess_correct_module(example):
"""
Given some form of the example name guess and return a reasonable module.
Examples
--------
>>> guess_correct_module('example1')
'peripy.examples.example1.example'
>>> guess_correct_module('peripy.examples.example1')
'peripy.examples.example1.example'
>>> guess_correct_module('examples.example1')
'peripy.examples.example1.example'
>>> guess_correct_module('example1/example.py')
'peripy.examples.example1.example'
>>> guess_correct_module('example1/example')
'peripy.examples.example1.example'
"""
if example.endswith('.py'):
example = example[:-3]
if not example.endswith('example'):
example = example + '.example'
example = example.replace('/', '.')
if example.startswith('examples.'):
module = 'peripy.' + example
print(module, '1')
elif not example.startswith('peripy.examples.'):
module = 'peripy.examples.' + example
print(module, '2')
else:
module = example
print(module, '3')
return module
def cat_example(module):
"""Cat example."""
filename = get_path(module)
print("# File: %s" % filename)
print(open(filename).read())
def list_examples(examples):
"""List example."""
for idx, (module, doc) in enumerate(examples):
print("%d. %s" % (idx + 1, module[len('peripy.examples.'):]))
print(" %s" % doc)
def run_command(module, args):
"""Run command."""
print("Running example %s.\n" % module)
filename = get_path(module)
if '-h' not in args and '--help' not in args:
example_info(module, filename)
subprocess.call(
["python",
filename]
+
args)
def main(argv=None):
"""Run a PeriPy example."""
if argv is None:
argv = sys.argv[1:]
examples = get_all_examples()
parser = argparse.ArgumentParser(
prog="run", description=__doc__.split("\n\n")[0], add_help=False
)
parser.add_argument(
"-h", "--help", action="store_true", default=False, dest="help",
help="show this help message and exit"
)
parser.add_argument(
"-l", "--list", action="store_true", default=False, dest="list",
help="List examples"
)
parser.add_argument(
"--cat", action="store_true", default=False, dest="cat",
help="Show/cat the example code on stdout"
)
parser.add_argument(
"args", type=str, nargs="?",
help='''optional example name (for example both cavity or
peripy.examples.cavity will work) and arguments to the example.'''
)
if len(argv) > 0 and argv[0] in ['-h', '--help']:
parser.print_help()
sys.exit()
options, extra = parser.parse_known_args(argv)
if options.list:
return list_examples(examples)
if options.cat:
module = guess_correct_module(options.args)
return cat_example(module)
if len(argv) > 0:
module = guess_correct_module(argv[0])
run_command(module, argv[1:])
else:
list_examples(examples)
try:
ans = int(get_input("Enter example number you wish to run: "))
except ValueError:
ans = 0
if ans < 1 or ans > len(examples):
print("Invalid example number, exiting!")
sys.exit()
args = str(get_input(
"Enter additional arguments (leave blank to skip): "
))
module, doc = examples[ans - 1]
print("-" * 80)
run_command(module, args.split())
if __name__ == '__main__':
main()
```
#### File: peripy/test/test_cl_euler.py
```python
from .conftest import context_available
from ..cl import get_context
import numpy as np
import pyopencl as cl
from pyopencl import mem_flags as mf
import pathlib
import pytest
@pytest.fixture(scope="module")
def context():
"""Create a context using the default platform, prefer GPU."""
return get_context()
@context_available
@pytest.fixture(scope="module")
def queue(context):
"""Create a CL command queue."""
return cl.CommandQueue(context)
@context_available
@pytest.fixture(scope="module")
def program(context):
"""Create a program object for the Euler integrator."""
kernel_source = open(
pathlib.Path(__file__).parent.absolute() /
"../cl/euler.cl").read()
return cl.Program(context, kernel_source).build()
class TestUpdateDisplacement:
"""Test the displacement update."""
@context_available
def test_update_displacement(self, context, queue, program):
"""Test basic displacement update."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([0, 0, 0], dtype=np.intc)
bc_values = np.array([0, 0, 0], dtype=np.float64)
displacement_bc_scale = 0
dt = 1
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
assert np.all(u == force)
@context_available
def test_update_displacement2(self, context, queue, program):
"""Test displacement update."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([0, 0, 0], dtype=np.intc)
bc_values = np.array([0, 0, 0], dtype=np.float64)
displacement_bc_scale = 0
dt = 2.0
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
assert np.all(u == 2.0*force)
@context_available
def test_update_displacement3(self, context, queue, program):
"""Test displacement update with displacement boundary conditions."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([1, 1, 0], dtype=np.intc)
bc_values = np.array([0.0, 0.0, 0.0], dtype=np.float64)
displacement_bc_scale = 1.0
dt = 2.0
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
u_expected = np.array([0.0, 0.0, 6.0])
assert np.all(u == u_expected)
@context_available
def test_update_displacement4(self, context, queue, program):
"""Test displacement update with displacement B.C. scale."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([1, 1, 0], dtype=np.intc)
bc_values = np.array([2.0, 2.0, 0.0], dtype=np.float64)
displacement_bc_scale = 0.5
dt = 2.0
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
u_expected = np.array([1.0, 1.0, 6.0])
assert np.all(u == u_expected)
```
#### File: peripy/test/test_neighbour_list.py
```python
from .conftest import context_available
from peripy.create_crack import (create_crack)
from ..integrators import Euler, EulerCL
from ..model import Model
import numpy as np
from scipy.spatial.distance import cdist
import pytest
@pytest.fixture()
def basic_model_3d(data_path, simple_displacement_boundary):
"""Create a basic 3D model object."""
mesh_file = data_path / "example_mesh_3d.vtk"
euler = Euler(dt=1e-3)
model = Model(mesh_file, integrator=euler, horizon=0.1,
critical_stretch=0.05,
bond_stiffness=18.0 * 0.05 / (np.pi * 0.1**4),
dimensions=3,
is_displacement_boundary=simple_displacement_boundary)
return model, euler
@pytest.fixture()
def basic_model_3d_cl(data_path, simple_displacement_boundary):
"""Create a basic 3D model object using an OpenCL integrator."""
mesh_file = data_path / "example_mesh_3d.vtk"
euler = EulerCL(dt=1e-3)
model = Model(mesh_file, integrator=euler, horizon=0.1,
critical_stretch=0.05,
bond_stiffness=18.0 * 0.05 / (np.pi * 0.1**4),
dimensions=3,
is_displacement_boundary=simple_displacement_boundary)
return model, euler
def test_family(basic_model_3d):
"""Test family function."""
model, integrator = basic_model_3d
r = np.random.random((100, 3))
horizon = 0.2
(family_actual,
*_) = model._set_neighbour_list(
r, horizon, 100)
family_expected = np.sum(cdist(r, r) < horizon, axis=0) - 1
assert np.all(family_actual == family_expected)
class TestNeigbourList():
"""Test neighbour list function."""
def test_neighbour_list_cython(self, basic_model_3d):
"""Test cython version of the neighbour list function."""
model, integrator = basic_model_3d
r = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 0.0, 0.0]
])
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(r, 1.1, 4)
nl_expected = np.array([
[1, 2],
[0, 3],
[0, 0],
[1, 0]
])
n_neigh_expected = np.array([2, 2, 1, 1])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
assert max_neighbours_actual == 2
@context_available
def test_neighbour_list_cl1(self, basic_model_3d_cl):
"""Test OpenCL version of the neighbourlist function."""
model, integrator = basic_model_3d_cl
r = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 0.0, 0.0]
])
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(
r, 1.1, 4, context=integrator.context)
nl_expected = np.array([
[1, 2],
[0, 3],
[0, -1],
[1, -1]
])
n_neigh_expected = np.array([2, 2, 1, 1])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
assert max_neighbours_actual == 2
@context_available
def test_neighbour_list_cl2(self, basic_model_3d_cl):
"""Test OpenCL version of the neighbourlist function."""
model, integrator = basic_model_3d_cl
r = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0]
])
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(
r, 1.1, 4, context=integrator.context)
nl_expected = np.array([
[1, 2, 3, -1],
[0, 3, -1, -1],
[0, -1, -1, -1],
[0, 1, -1, -1]
])
n_neigh_expected = np.array([3, 2, 1, 2])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
assert max_neighbours_actual == 4
def test_create_crack_cython(self, basic_model_3d):
"""Test crack creations function."""
model, integrator = basic_model_3d
r = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
])
horizon = 1.1
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(r, horizon, 5)
nl_expected = np.array([
[1, 2, 4],
[0, 3, 0],
[0, 0, 0],
[1, 0, 0],
[0, 0, 0]
])
n_neigh_expected = np.array([3, 2, 1, 1, 1])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
crack = np.array([(0, 2), (1, 3)], dtype=np.int32)
create_crack(crack, nlist_actual, n_neigh_actual)
nl_expected = np.array([
[1, 4, -1],
[0, -1, 0],
[-1, 0, 0],
[-1, 0, 0],
[0, 0, 0]
])
n_neigh_expected = np.array([2, 1, 0, 0, 1])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(
r, horizon, 5, initial_crack=crack)
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
@context_available
def test_create_crack_cl(self, basic_model_3d_cl):
"""Test crack creations function."""
model, integrator = basic_model_3d_cl
r = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
])
horizon = 1.1
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(
r, horizon, 5, context=integrator.context)
nl_expected = np.array([
[1, 2, 4, -1],
[0, 3, -1, -1],
[0, -1, -1, -1],
[1, -1, -1, -1],
[0, -1, -1, -1]
])
n_neigh_expected = np.array([3, 2, 1, 1, 1])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
crack = np.array([(0, 2), (1, 3)], dtype=np.int32)
create_crack(crack, nlist_actual, n_neigh_actual)
nl_expected = np.array([
[1, 4, -1, -1],
[0, -1, -1, -1],
[-1, -1, -1, -1],
[-1, -1, -1, -1],
[0, -1, -1, -1]
])
n_neigh_expected = np.array([2, 1, 0, 0, 1])
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
(_,
nlist_actual,
n_neigh_actual,
max_neighbours_actual) = model._set_neighbour_list(
r, horizon, 5, initial_crack=crack, context=integrator.context)
assert np.all(nlist_actual == nl_expected)
assert np.all(n_neigh_actual == n_neigh_expected)
```
|
{
"source": "JDBetteridge/FiredrakeMLMC",
"score": 2
}
|
#### File: examples/Old_Files/example-v5-2.py
```python
from firedrake import *
from randomgen import RandomGenerator, MT19937
import json
import matplotlib.pyplot as plt
import time
from MLMCv5 import MLMC_Solver, MLMC_Problem
rg = RandomGenerator(MT19937(12345))
def samp(lvl_f, lvl_c):
start = time.time()
rand = 20*rg.random_sample()
samp_c = None
#print(lvl_f.mesh())
x_f, y_f = SpatialCoordinate(lvl_f.mesh())
base_f = exp(-(((x_f-0.5)**2)/2) - (((y_f-0.5)**2)/2))
samp_f = Function(lvl_f)
samp_f.interpolate(Constant(rand)*base_f)
#print(type(samp_f))
if lvl_c != None:
x_c, y_c = SpatialCoordinate(lvl_c.mesh())
base_c = exp(-(((x_c-0.5)**2)/2) - (((y_c-0.5)**2)/2))
samp_c = Function(lvl_c)
samp_c.interpolate(Constant(rand)*base_c)
print("samp time: {}".format(time.time() - start))
return samp_f, samp_c
def lvl_maker(level_f, level_c):
coarse_mesh = UnitSquareMesh(10, 10)
hierarchy = MeshHierarchy(coarse_mesh, level_f, 1)
if level_c < 0:
return FunctionSpace(hierarchy[level_f], "Lagrange", 4), None
else:
return FunctionSpace(hierarchy[level_f], "Lagrange", 4), \
FunctionSpace(hierarchy[level_c], "Lagrange", 4)
class problemClass:
"""
Needs to take an integer initialisation argument to define the level (0 - L)
Needs to have a .solve() method which takes a sample as an argument and returns
a scalar solution
"""
def __init__(self, level_obj):
self._V = level_obj
self._sample = Function(self._V)
self._uh = Function(self._V)
self._vs = self.initialise_problem()
def solve(self, sample):
#print(self._V.mesh())
self._sample.assign(sample)
self._vs.solve()
return assemble(Constant(0.5) * dot(self._uh, self._uh) * dx)
# HELPER
def initialise_problem(self):
u = TrialFunction(self._V)
v = TestFunction(self._V)
a = (dot(grad(v), grad(u)) + v * u) * dx
bcs = DirichletBC(self._V, 0, (1,2,3,4))
f = self._sample
L = f * v * dx
vp = LinearVariationalProblem(a, L, self._uh, bcs=bcs)
return LinearVariationalSolver(vp, solver_parameters={'ksp_type': 'cg'})
def general_test():
# Levels and repetitions
levels = 3
repetitions = [100, 50, 10]
MLMCprob = MLMC_Problem(problemClass, samp, lvl_maker)
MLMCsolv = MLMC_Solver(MLMCprob, levels, repetitions)
estimate = MLMCsolv.solve()
print(estimate)
evaluate_result(estimate)
def evaluate_result(result):
with open("10_int.json") as handle:
e_10 = json.load(handle)
with open("100_int.json") as handle:
e_100 = json.load(handle)
with open("1000_int.json") as handle:
e_1000 = json.load(handle)
with open("10000_int.json") as handle:
e_10000 = json.load(handle)
with open("20000_int.json") as handle:
e_20000 = json.load(handle)
d_10 = result - e_10
d_100 = result - e_100
d_1000 = result - e_1000
d_10000 = result - e_10000
d_20000 = result - e_20000
print("% difference from 10 sample MC: ",(d_10*100)/result)
print("% difference from 100 sample MC: ",(d_100*100)/result)
print("% difference from 1000 sample MC: ",(d_1000*100)/result)
print("% difference from 10000 sample MC: ",(d_10000*100)/result)
print("% difference from 20000 sample MC: ",(d_20000*100)/result)
convergence_tests(result)
def convergence_tests(param = None):
"""
Function which compares result to 10,000 sample MC
"""
with open("20000_list.json") as handle:
results = json.load(handle)
res2 = [sum(results[:i+1])/(i+1) for i in range(len(results))]
#print(res2[0], results[0])
fig, axes = plt.subplots()
axes.plot([i for i in range(20000)], res2, 'r')
if param != None:
plt.axhline(y=param, color='b')
#axes.hist(solutions, bins = 40, color = 'blue', edgecolor = 'black')
plt.show()
if __name__ == '__main__':
general_test()
```
|
{
"source": "JDBetteridge/nbfancy",
"score": 3
}
|
#### File: nbfancy/nbfancy/nbfancy_tools.py
```python
import os
import csv
import pkg_resources
import re
import traceback
import nbformat as nf
import nbconvert as nc
from urllib.parse import quote as urlquote
def isdir(path):
'''
Checks whether given path is a directory
'''
if not os.path.isdir(path):
raise OSError('"' + path + '"' + ' is not a direcotry')
else:
return path
def try_config(configdir, filename):
'''
Tries to read specified config, else uses global config
returns file handle to requested file
'''
resource_package = 'nbfancy'
config_path = '/config' # Do not use os.path.join()
if not os.path.isdir(configdir):
configdir = pkg_resources.resource_filename(resource_package, config_path)
try:
filepath = os.path.join(configdir, filename)
filehandle = open(filepath, 'r')
except FileNotFoundError:
configdir = pkg_resources.resource_filename(resource_package, config_path)
filepath = os.path.join(configdir, filename)
filehandle = open(filepath, 'r')
return filehandle
def read_header(configdir):
'''
Reads header from config directory
'''
# Open file and extract text in second cell
with try_config(configdir, 'header.ipynb') as fh:
notebook = nf.read(fh, nf.NO_CONVERT)
template = notebook['cells'][1]
return template
def read_footer(configdir):
'''
Reads footer from config directory
'''
# Open file and extract text in second cell
with try_config(configdir, 'footer.ipynb') as fh:
notebook = nf.read(fh, nf.NO_CONVERT)
template = notebook['cells'][1]
return template
def read_box_template(configdir):
'''
Reads box template from given file handle
'''
filehandle = try_config(configdir, 'box.ipynb')
# File is already open
# Open file and extract text in second cell
notebook = nf.read(filehandle, nf.NO_CONVERT)
box = notebook['cells'][1]
template = box['source']
# Replace known values with placeholders
template = template.replace('pale-green', '{bg-colour}')
template = template.replace('green', '{fg-colour}')
template = template.replace('fa-star', '{symbol}')
template = template.replace('TITLE', '{title}')
template = template.replace('BODY', '{body}')
return template
def colour2fgbg(colour):
'''
Pairs foreground colour with background colour
'''
colour = colour.lower()
colour_list = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
colour_list += ['brown', 'black', 'grey', 'gray', 'white']
assert colour in colour_list
fg = colour
if fg == 'red':
bg = 'pale-red'
elif fg == 'orange':
bg = 'sand'
elif fg == 'yellow':
bg = 'pale-yellow'
elif fg == 'green':
bg = 'pale-green'
elif fg == 'blue':
bg = 'pale-blue'
elif fg == 'purple':
bg = 'pale-red'
elif fg == 'brown':
bg = 'khaki'
elif fg == 'black':
bg = 'gray'
elif (fg == 'gray') or (fg == 'grey'):
fg = 'gray'
bg = 'light-gray'
elif fg == 'white':
bg = 'white'
return fg, bg
def read_box_colour_config(configdir):
'''
Create a dict of configurations for each keyword in filename
Lines starting with # are ignored as are blank lines
'''
config = dict()
def isTF(val):
'''
Return true or false if val is boolean
'''
true_words = ['true', 't', '1']
false_words = ['false', 'f', '0']
test_val = val.strip().lower()
if test_val in true_words:
test_val = True
elif test_val in false_words:
test_val = False
return test_val
with try_config(configdir, 'keywords.cfg') as fh:
no_comments = filter(lambda line: len(line)>3 and line.lstrip()[0]!='#' , fh)
reader = csv.DictReader(no_comments)
for row in reader:
key = row.pop('Keyword')
row_dict = {key.strip().lower() : isTF(row[key]) for key in row}
row_dict['fg-colour'], row_dict['bg-colour'] = colour2fgbg(row_dict['colour'])
config[key.strip().lower()] = row_dict
return config
def box_title(line, config):
'''
Creates title for box.
Returns html formattted title, index and which keyword was found
'''
keywords = config.keys()
# Search for keyword (lowercase) in first line and set that as the key
for word in keywords:
if word in line.lower().split(':')[0]:
key = word
# Recover paramters from keyword
keep_keyword = config[key]['keep_keyword']
hidden = config[key]['hide']
# Whether to print keyword in title
if keep_keyword:
title = line.lstrip('#')
else:
subtitle = line.split(':')
title = ':'.join(subtitle[1:])
# Safe version of title for links
safetitle = title.replace(' ', '-')
safetitle = safetitle.replace('`', '')
index = urlquote(safetitle, safe='?!$\\') + '%0A'
# Mark up title, incase markdown syntax is used
htmltitle = nc.filters.markdown2html(title)
htmltitle = htmltitle.replace('<p>', '')
htmltitle = htmltitle.replace('</p>', '')
#link = './' + solnfilename.split('/')[-1] + '#' + index
return htmltitle, index, key
def recursion_detector(f):
'''
Detects whether a given function is calling itself
'''
def decorated_f(*args, **kwargs):
stack = traceback.extract_stack()
if len([1 for line in stack if line[2] == f.__name__]) > 0:
print('Warning: Nested environments detected, this is actively discouraged!')
return f(*args, **kwargs)
return decorated_f
@recursion_detector
def box_body(body, config, template, solnfilename, link=None, multicell=None):
'''
Creates body of the box
'''
# If an empty link to a solution is found, populate it with link
# that was generated by the title (for single cell)
if len(body) > 0 and '[solution]()' in body[-1].lower():
k = body[-1].lower().find('[solution]()')
solution_phrase = body[-1][k:k+13]
new_solution_phrase = '\n\n' + solution_phrase.replace('()','({link})')
new_solution_phrase = new_solution_phrase.format(link=link)
body[-1] = body[-1].replace(solution_phrase, new_solution_phrase)
body = '\n'.join(body)
# Apply markup
htmlbody = nc.filters.markdown2html(body)
if multicell is not None:
# Bit of recursion
#print('Warning nested cell environments')
rendered, soln = notebook2rendered(multicell, config, template, solnfilename)
# Export to html to include in cell
html_exp = nc.HTMLExporter()
html_exp.template_file = 'basic'
temphtml, resources = html_exp.from_notebook_node(rendered)
# Remove multiple newlines
temphtml = re.sub(r'(\n\s*)+\n', '\n', temphtml)
# Add boxy thing
temphtml = temphtml.replace('class="input_area"',
'class="output_area" style="background-color:#F7F7F7;border:1px solid #CFCFCF"')
# If an empty link to a solution is found, populate it with link
# that was generated by the title (for multicell)
if '<a href="">solution</a>' in temphtml.lower():
k = temphtml.lower().find('<a href="">solution</a>')
solution_phrase = temphtml[k:k+24]
new_solution_phrase = solution_phrase.replace('href=""','href="{link}"')
new_solution_phrase = new_solution_phrase.format(link=link)
temphtml = temphtml.replace(solution_phrase, new_solution_phrase)
htmlbody += temphtml
# Escape symbols
htmlbody = htmlbody.replace('*', '*')
#htmlbody = htmlbody.replace('_', '_')
# Format tables
htmlbody = htmlbody.replace('<table>', '<table class="w3-table w3-striped w3-hoverable">')
htmlbody = htmlbody.replace('<thead>', '<thead class="w3-black">')
# Be sure to remove final newline
if len(htmlbody) > 0 and htmlbody[-1] == '\n':
htmlbody = htmlbody[:-1]
return htmlbody
def notebook2rendered(plain, config, template, solnfilename):
'''
Converts notebook JSON to rendered notebook JSON for output
'''
# List all the markdown cells
celllist = plain['cells']
markdownlist = [c for c in celllist if c['cell_type']=='markdown']
solnb = None
# For each markdown cell check for keywords and format according to
# the cell template and config files
end = -1
for c in markdownlist:
line = c['source'].split('\n')
# Check for a colon in the first line
if line[0].find(':') < 0:
continue
# Check for a keyword if a colon is found
temp_line = line[0].split(':')
if any(keyword in temp_line[0].lower().strip('# ') for keyword in config.keys()):
htmltitle, index, key = box_title(line[0], config)
# Recover paramters from keyword
hidden = config[key]['hide']
# Multicell procedure
if key + '+' in temp_line[0].lower().strip('# '):
start = celllist.index(c) + 1
end = None
# Find end cell
for subcell in celllist[start:]:
if subcell['cell_type'] == 'markdown':
lastline = subcell['source'].split('\n')
temp_lastline = lastline[-1].split(':')
if key in temp_lastline[-1].lower().strip():
end = celllist.index(subcell) + 1
lastline[-1] = ':'.join(temp_lastline[:-1]).strip()
subcell['source'] = '\n'.join(lastline)
break
else:
# If no end cell found print warning
try:
print('Warning in file', infile, ':')
print('\tNo end tag found for', key + '+', 'environment in cell', start)
except NameError:
print('Warning in temporary file:')
print('\tNo end tag found for', key + '+', 'environment in cell', start)
print('\tCheck you haven\'t nested environments')
# Move multicells to new notebook for processing
multicell = celllist[start:end]
for subcell in multicell:
celllist.remove(subcell)
multicellnb = nf.v4.new_notebook()
multicellnb['metadata'] = plain['metadata']
multicellnb['cells'] = multicell
else:
# If we aren't in a multicell environment
# we don't need the additional notebook
multicellnb = None
# If hidden move cell to new notebook
if hidden:
# Make a new notebook if it doesn't exist already
if solnb is None:
solnb = nf.v4.new_notebook()
solnb['metadata'] = plain['metadata']
solnb['cells'].append(nf.v4.new_markdown_cell(source='# Solutions'))
solnb['cells'].append(nf.v4.new_markdown_cell(source=''))
# REDEFINE c
solnb['cells'][-1] = c.copy()
plain['cells'].remove(c)
c = solnb['cells'][-1]
htmlbody = box_body(line[1:], config, template, solnfilename, multicell=multicellnb)
else:
link = './' + solnfilename.split('/')[-1] + '#' + index
htmlbody = box_body(line[1:], config, template, solnfilename, link=link, multicell=multicellnb)
values = config[key].copy()
values['index'] = index
values['title'] = htmltitle
values['body'] = htmlbody
c['source'] = template.format_map(values)
return plain, solnb
def notebook2HTML(filename):
'''
Converts notebook file to a html string
'''
html_exp = nc.HTMLExporter()
html, resources = html_exp.from_filename(filename)
# SED rules:
# Replace '../folders' in links with './folders'
# for folders images, data, code
html = html.replace('../images', './images')
html = html.replace('../data', './data')
html = html.replace('../code', './code')
# Replace '.ipynb' in links with '.html'
# the '"' ensures this (hopefully) only happens in links
html = html.replace('.ipynb"', '.html"')
html = html.replace('.ipynb#', '.html#')
# Horrible hack because <code> environment doesn't seem to work with CSS sheet
# For plaintext blocks
html = html.replace('<pre><code>', '<pre><code style="">')
# For inline highlighting
html = html.replace('<code>', '<code style="background-color:#F7F7F7;border:1px solid #CFCFCF">')
# Another hack since \n is converted to [space] in links
html = html.replace('%0A"','%20"')
# Add the favicon
html = html.replace('<head><meta charset="utf-8" />',
'<head><meta charset="utf-8" />\n<link rel="icon" type="image/png" href="css/favicon.png"/>')
return html
def notebook2slides(filename):
'''
Converts notebook file to a slide show
'''
slides_exp = nc.SlidesExporter()
slides_exp.reveal_scroll = True # Doesn't work?
slides, resources = slides_exp.from_filename(filename)
# Custom CSS is in the directory above slides
slides = slides.replace('href="custom.css"', 'href="../custom.css"')
# Replace '.ipynb' in links with '.html'
# the '"' ensures this (hopefully) only happens in links
slides = slides.replace('.ipynb"', '.html"')
slides = slides.replace('.ipynb#', '.html#')
# Horrible hack because <code> environment doesn't seem to work with CSS sheet
# For plaintext blocks
slides = slides.replace('<pre><code>', '<pre><code style="">')
# For inline highlighting
slides = slides.replace('<code>', '<code style="background-color:#F7F7F7;border:1px solid #CFCFCF">')
# Another hack since \n is converted to [space] in links
slides = slides.replace('%0A"','%20"')
# Add the favicon
slides = slides.replace('<head><meta charset="utf-8" />',
'<head><meta charset="utf-8" />\n<link rel="icon" type="image/png" href="css/favicon.png"/>')
return slides
def directory_contents(directory):
'''
Returns directory notebook contents
split into lessons and solutions
'''
# Store contents of directory as list
contents = os.listdir(directory)
contents.sort()
try:
# Remove checkpoints folder from list
contents.remove('.ipynb_checkpoints')
except ValueError:
pass
# Removes everything that isn't a notebook ending with .ipynb
contents = [f for f in contents if '.ipynb' in f]
# Remove solution files from contents and store in seperate list
soln_contents = [f for f in contents if '-soln' in f]
contents = [f for f in contents if '-soln' not in f]
return contents, soln_contents
def navigation_triple(directory, inputfile):
'''
Given a directory and file determines which file is
- previous lesson
- schedule
- next lesson
and returns these files as a dict
'''
contents, _ = directory_contents(directory)
contents.append(contents[0])
current = inputfile.split('/')[-1]
# Exceptional case if you're making a new solution document
if '-soln' in current:
current = current.replace('-soln','')
index = contents.index(current)
outdir = './'
triple = { 'previous' : outdir+contents[index-1],
'index' : outdir+contents[0],
'next' : outdir+contents[index+1] }
return triple
```
|
{
"source": "jdbit/django-community",
"score": 2
}
|
#### File: django-community/bookmarks/models.py
```python
from django.db import models
from community.models import User, Post
class Bookmark(models.Model):
user = models.ForeignKey(User, verbose_name="User",
on_delete=models.CASCADE)
post = models.ForeignKey(Post, verbose_name="Post",
on_delete=models.CASCADE, null=True)
def __str__(self):
return self.user.username
```
#### File: django-community/voting/views.py
```python
import json
from django.http import HttpResponse
from django.views import View
from django.contrib.contenttypes.models import ContentType
from .models import LikeDislike
class VotesView(View):
model = None
vote_type = None
def post(self, request, id):
if not request.user.is_authenticated:
return HttpResponse(
json.dumps({
"result": "access_denied"
}),
content_type="application/json"
)
obj = self.model.objects.get(id=id)
try:
likedislike = LikeDislike.objects.get(content_type=ContentType.objects.get_for_model(obj), object_id=obj.id, user=request.user)
if likedislike.vote is not self.vote_type:
likedislike.vote = self.vote_type
likedislike.save(update_fields=['vote'])
result = True
else:
likedislike.delete()
result = False
except LikeDislike.DoesNotExist:
obj.votes.create(user=request.user, vote=self.vote_type)
result = True
obj.upvotes = obj.votes.likes().count()
obj.downvotes = obj.votes.dislikes().count()
obj.save()
return HttpResponse(
json.dumps({
"result": result,
"action": self.vote_type,
"related_id": id,
"like_count": obj.votes.likes().count(),
"dislike_count": obj.votes.dislikes().count(),
"sum_rating": obj.votes.sum_rating()
}),
content_type="application/json"
)
```
|
{
"source": "JDBLambert/Personal_Projects",
"score": 3
}
|
#### File: Personal_Projects/Chat Room/client2.py
```python
import socket
from time import sleep
import threading
peer_address = "127.0.0.1"
class Server(threading.Thread):
def run(self):
print("Starting server")
ip = "127.0.0.1"
listening_port = 10002
addr = (ip, listening_port)
self.listening_server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.listening_server.bind(addr)
self.listening_server.listen(1)
(listening_socket, sending_addr) = self.listening_server.accept()
print("Server started")
print(f"connection established with {sending_addr}")
while(1):
chunk = listening_socket.recv(4096).decode()
print(f"{addr}>>{chunk}")
class Client(threading.Thread):
def run(self):
print("Starting client")
peer_address = "127.0.0.1"
sending_port = 10001
self.sending_client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
while(1):
try:
self.sending_client.connect((peer_address, sending_port))
print("Connection established")
break
except:
print("Failed. Trying again soon...")
sleep(1)
print("Client Started")
while(1):
sending_data = input(">>").encode()
self.sending_client.sendall(sending_data)
if __name__ == "__main__":
srv = Server()
srv.daemon = True
srv.start()
sleep(1)
cli = Client()
cli.start()
```
|
{
"source": "jdblischak/boot-camps",
"score": 4
}
|
#### File: 08-debugging/conways_game_of_life/2_conway_pre_formatted.py
```python
def conway(population,
generations = 100):
for i in range(generations): population = evolve(population)
return population
def evolve(population):
activeCells = population[:]
for cell in population:
for neighbor in neighbors(cell):
if neighbor not in activeCells: activeCells.append(neighbor)
newPopulation = []
for cell in activeCells:
count = sum(neighbor in population for neighbor in neighbors(cell))
if count == 3 or (count == 2 and cell in population):
if cell not in newPopulation: newPopulation.append(cell)
return newPopulation
def neighbors(cell):
x, y = cell
return [(x, y), (x+1, y), (x-1, y), (x, y+1), (x, y-1), (x+1, y+1), (x+1, y-1), (x-1, y+1), (x-1, y-1)]
glider = [(30, 30), (31, 30), (32, 30), (30, 31), (31, 32)]
print conway(glider)
```
#### File: 08-debugging/conways_game_of_life/5_conway_final.py
```python
def conway(population, generations=100):
"""Runs Conway's game of life on an initial population."""
population = set(population)
for i in range(generations):
population = evolve(population)
return list(population)
def evolve(population):
"""Evolves the population by one generation."""
# Get a unique set of discrete cells that need to be checked
active_cells = population | set([neighbor for p in population
for neighbor in neighbors(p)])
# For each cell in the set, test if it lives or dies
new_population = set()
for cell in active_cells:
count = sum([neighbor in population for neighbor in neighbors(cell)])
if count == 3 or (count == 2 and cell in population):
new_population.add(cell)
# Return the new surviving population
return new_population
def neighbors(cell):
"""Returns the neighbors of a given cell."""
x, y = cell
return [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1), (x + 1, y + 1),
(x + 1, y - 1), (x - 1, y + 1), (x - 1, y - 1)]
glider = [(30, 30), (31, 30), (32, 30), (30, 31), (31, 32)]
print conway(glider)
```
|
{
"source": "jdblischak/dox",
"score": 3
}
|
#### File: dox/code/check-download.py
```python
def get_fn(fn):
with open(fn) as f:
a=[ g.strip().split()[1] for g in f.readlines() ]
return a
a=get_fn("../data/fastq-md5sum.txt")
b=get_fn("../data/fastq-md5sum-dak.txt")
missing=list(set(a)-set(b))
missing.sort()
fn="../data/missing-fastq.txt"
if len(missing)==0:
print("No missing files")
else:
print("%i missing files, writing to %s" % (len(missing), fn))
with open(fn,"w") as f:
f.writelines(missing)
import os
print("Problematic md5sums:")
os.system("diff ../data/fastq-md5sum.txt ../data/fastq-md5sum-dak.txt")
```
|
{
"source": "jdblischak/singlecell-qtl",
"score": 2
}
|
#### File: browser/replication/main.py
```python
import bokeh.io
import bokeh.layouts
import bokeh.models
import bokeh.plotting
import os.path
import numpy as np
import pandas as pd
import scipy.stats as st
import scipy.special as sp
import sqlite3
db = '/project2/mstephens/aksarkar/projects/singlecell-qtl/browser/browser.db'
# This needs to be global to be visible to callbacks
gene = None
def update_gene(attr, old, new):
selected = gene_data.selected['1d']['indices']
if not selected:
return
with sqlite3.connect(db) as conn:
global gene
gene = next(conn.execute('select gene from qtls where qtls.gene == ?;', (gene_data.data['gene'][selected[0]],)))[0]
print('Selected {}'.format(gene))
ind_data.data = bokeh.models.ColumnDataSource.from_df(pd.read_sql(
sql="""select genotype.ind, genotype.value as genotype, log_mu as mean, log_phi as
disp, logodds, bulk.value as bulk from genotype, bulk, params where
genotype.gene == ? and genotype.gene == bulk.gene and
bulk.gene == params.gene and genotype.ind == bulk.ind and bulk.ind ==
params.ind;""",
params=(gene,),
con=conn))
def update_umi(attr, old, new):
selected = ind_data.selected['1d']['indices']
with sqlite3.connect(db) as conn:
if selected:
ind = ind_data.data['ind'][selected[0]]
print("Selected {}, {}".format(ind, gene))
umi = pd.read_sql(
"""select umi.value, annotation.size from annotation, umi
where umi.gene == ? and annotation.chip_id == ? and
umi.sample == annotation.sample""",
con=conn,
params=(gene, ind,))
keep = umi['value'] < 19
edges = np.arange(20)
counts, _ = np.histogram(umi['value'].values, bins=edges)
umi_data.data = bokeh.models.ColumnDataSource.from_df(pd.DataFrame({'left': edges[:-1], 'right': edges[1:], 'count': counts}))
params = pd.read_sql('select log_mean, log_disp, logodds from params where gene == ? and ind == ?', con=conn, params=(gene, ind))
n = np.exp(params['log_disp'])
p = 1 / (1 + np.outer(umi['size'], np.exp(params['log_mean'] - params['log_disp'])))
assert (n > 0).all(), 'n must be non-negative'
assert (p >= 0).all(), 'p must be non-negative'
assert (p <= 1).all(), 'p must be <= 1'
G = st.nbinom(n=n.values.ravel(), p=p.ravel()).pmf
grid = np.arange(19)
pmf = np.array([G(x).mean() for x in grid])
if params.iloc[0]['logodds'] is not None:
pmf *= sp.expit(-params['logodds']).values
pmf[0] += sp.expit(params['logodds']).values
exp_count = umi.shape[0] * pmf
dist_data.data = bokeh.models.ColumnDataSource.from_df(pd.DataFrame({'x': .5 + grid, 'y': exp_count}))
else:
umi_data.data = bokeh.models.ColumnDataSource.from_df(pd.DataFrame(columns=['left', 'right', 'count']))
dist_data.data = bokeh.models.ColumnDataSource.from_df(pd.DataFrame(columns=['x', 'y']))
def init():
with sqlite3.connect(db) as conn:
gene_data.data = bokeh.models.ColumnDataSource.from_df(pd.read_sql(
sql="""select qtls.gene, qtls.id,
qtls.p_adjust, qtls.beta_bulk, qtls.p_sc, qtls.beta_sc
from qtls order by p_bulk;""",
con=conn))
# These need to be separate because they have different dimension
ind_data = bokeh.models.ColumnDataSource(pd.DataFrame(columns=['ind', 'genotype', 'mean', 'disp', 'logodds', 'bulk']))
ind_data.on_change('selected', update_umi)
gene_data = bokeh.models.ColumnDataSource(pd.DataFrame(columns=['gene', 'name', 'id', 'p_bulk', 'beta_bulk', 'p_sc', 'beta_sc']))
gene_data.on_change('selected', update_gene)
umi_data = bokeh.models.ColumnDataSource(pd.DataFrame(columns=['left', 'right', 'count']))
dist_data = bokeh.models.ColumnDataSource(pd.DataFrame(columns=['x', 'y']))
# These need to be module scope because bokeh.server looks there
qtls = bokeh.models.widgets.DataTable(
source=gene_data,
columns=[bokeh.models.widgets.TableColumn(field=x, title=x) for x in ['gene', 'id', 'p_bulk', 'beta_bulk', 'p_sc', 'beta_sc', 'replicated']],
width=1200,
height=200)
hover = bokeh.models.HoverTool(tooltips=[('Individual', '@ind')])
bulk_mean_by_geno = bokeh.plotting.figure(width=300, height=300, tools=['tap', hover])
bulk_mean_by_geno.scatter(source=ind_data, x='genotype', y='bulk', color='black', size=8)
bulk_mean_by_geno.xaxis.axis_label = 'Centered dosage'
bulk_mean_by_geno.yaxis.axis_label = 'Bulk log TPM'
sc_mean_by_geno = bokeh.plotting.figure(width=300, height=300, tools=['tap', hover])
sc_mean_by_geno.scatter(source=ind_data, x='genotype', y='mean', color='black', size=8)
sc_mean_by_geno.xaxis.axis_label = 'Centered dosage'
sc_mean_by_geno.yaxis.axis_label = 'Single cell log μ'
umi = bokeh.plotting.figure(width=300, height=300, tools=[])
umi.quad(source=umi_data, bottom=0, top='count', left='left', right='right', color='black')
umi.line(source=dist_data, x='x', y='y', color='red', line_width=2)
umi.xaxis.axis_label = 'Observed UMI'
umi.yaxis.axis_label = 'Number of cells'
sc_disp_by_geno = bokeh.plotting.figure(width=300, height=300, tools=['tap', hover])
sc_disp_by_geno.scatter(source=ind_data, x='genotype', y='disp', color='black', size=8)
sc_disp_by_geno.xaxis.axis_label = 'Centered genotype'
sc_disp_by_geno.yaxis.axis_label = 'Single cell log φ'
sc_logodds_by_geno = bokeh.plotting.figure(width=300, height=300, tools=['tap', hover])
sc_logodds_by_geno.scatter(source=ind_data, x='genotype', y='logodds', color='black', size=8)
sc_logodds_by_geno.xaxis.axis_label = 'Centered genotype'
sc_logodds_by_geno.yaxis.axis_label = 'Single cell logit π'
layout = bokeh.layouts.layout([[qtls], [bulk_mean_by_geno, sc_mean_by_geno, umi], [sc_disp_by_geno, sc_logodds_by_geno]], sizing_mode='fixed')
doc = bokeh.io.curdoc()
doc.title = 'scQTL browser'
doc.add_root(layout)
init()
```
|
{
"source": "jdbocarsly/magnets",
"score": 2
}
|
#### File: jdbocarsly/magnets/dosplot.py
```python
from os.path import join as j
import pandas as pd
#from bokeh.charts import output_file, show, Line
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.models import HoverTool, CrosshairTool
from bokeh.models import DataRange1d as bmdr
from numpy import arange
#todo:
# custom HTML tooltips with title
# fix axis labels (subscripts?)
# add a vertical line through the fermi level - DONE
# remove unused data from dataframes before making the column data sources
# remove hovertool from horizontal/vertical liness - DONE
# hover highlighting vertically
TOOLS = "box_zoom, pan, crosshair,undo, redo, save, reset"
def create_dosplot(compound, atoms_per_unit_cell):
path = j("dos_data",compound)
df1 = pd.read_csv(j(path,"nonsp_dost.dat"), names=["energy", "dos","idos"], sep="\s+")
df1["type"] = "nonsp"
df2 = pd.read_csv(j(path,"sp_dost.dat"), names=["energy", "udos","ddos"], sep="\s+")
df2["type"] = "sp"
df2["ddos"] = -df2["ddos"]
# convert to DOS/atom
df1["dos"] = df1["dos"]/atoms_per_unit_cell
df2["udos"] = df2["udos"]/atoms_per_unit_cell
df2["ddos"] = df2["ddos"]/atoms_per_unit_cell
p = figure(x_axis_label=r"E - Ef (eV)", y_axis_label="density of states (states/eV atom)", responsive=True,
tools=TOOLS,active_drag="box_zoom",
x_range=bmdr(start=-5, end=5, bounds=(-10,10)),
y_range=bmdr(start=-4, end=4, bounds=(-10,10))
)
p.toolbar.logo = "grey"
line_width=2
cds1 = ColumnDataSource(df1)
cds2 = ColumnDataSource(df2)
p_nonsp = p.line("energy","dos", legend="No spin polarization", line_color="#606060", line_dash="solid", line_width=line_width, source=cds1)
p_up = p.line("energy", "udos", legend="Spin-up", line_color="#28A0BA", line_dash="solid", line_alpha=0.8, line_width=line_width, source=cds2)
p_down = p.line("energy", "ddos", legend="Spin-down", line_color="#DB7C06", line_dash="solid", line_alpha=0.8, line_width=line_width, source=cds2)
hover_nonsp = HoverTool(
tooltips=[
('Energy','@energy'),
('DOS','@dos')
],
renderers=[p_nonsp]
)
hover_up = HoverTool(
tooltips=[
# ('type','@type'),
('Energy','@energy'),
('Spin-up DOS','@udos')
],
renderers=[p_up]
)
hover_down = HoverTool(
tooltips=[
('Energy','@energy'),
('Spin-down DOS','@ddos')
],
renderers=[p_down]
)
p.add_tools(hover_nonsp, hover_up, hover_down)
c = p.select(type=CrosshairTool)
c.dimensions = "height"
x_h = arange(-15, 15, 1)
y_h = 0*x_h
p.line(x_h, y_h, line_color="black", line_dash="solid", line_width=1)
y_v = arange(-50, 50, 1)
x_v = 0*y_v
p.line(x_v, y_v, line_color="black", line_dash="solid", line_width=1)
return p
```
#### File: jdbocarsly/magnets/magnet_main.py
```python
from flask import Flask, render_template, request, url_for
import numpy as np
import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import HoverTool, OpenURL, TapTool, CustomJS, LinearColorMapper,ColorBar
from bokeh.palettes import Viridis256, Magma256, Plasma256, brewer
from bokeh.plotting import curdoc, figure, ColumnDataSource
from bokeh.embed import components
from bokeh.io import curdoc
from bokeh.themes import Theme
import resources
from ashby import main_plot
from dosplot import create_dosplot
from corr import plot_corr
app = Flask(__name__)
df = pd.read_pickle("clean_pickle2.df")
df["formula_nosubs"] = df["formula"]
df["formula_html"]=[resources.make_html_subscripts(f) for f in df["formula"]]
df["formula"] = [resources.make_unicode_subscripts(name) for name in df["formula"]]
df["class"] = df["class"].fillna("classless")
#print(df["class"])
# cols = sorted(df.columns)
@app.route('/')
@app.route('/ashby')
def ashby():
xprm = request.args.get("x_axis")
if xprm == None:
xprm = "Curie temperature (K)"
yprm = request.args.get("y_axis")
if yprm == None:
yprm = "volumetric moment (emu/cm³)"
cprm = request.args.get("color_axis")
if cprm == None:
cprm = "largest local moment (µB)"
plot = main_plot(df, xprm, yprm,cprm)
script, div = components(plot)
return render_template("ashby.html", script=script, div=div,
cols=resources.axis_columns, curr_x=xprm, curr_y=yprm, curr_c=cprm, col_groups=resources.axis_columns_groups,
color_cols=resources.axis_columns_groups)
@app.route('/c/<int:cid>')
def single_compound_view(cid):
if cid >= len(df): return index()
c = df.iloc[cid]
#c = c.dropna()
# vals = ["{:.2f}".format(c[x]) if isinstance(c[x], (np.floating, float)) else c[x] for x in resources.dos_cols]
groups_with_vals = []
for category_name, parameters in resources.dos_columns_groups:
# sorry about this line. The end is to filter out delta Sm labels from showing up at NaN if the data is not available
parameters = [x for x in parameters if not (isinstance(c[x], (np.floating, float)) and np.isnan(c[x]))]
vals = ["{:.2f}".format(c[x]) if isinstance(c[x], (np.floating, float)) else c[x] for x in parameters]
print('\n',category_name,'\n')
[print(g,v) for g,v in zip(parameters,vals)]
groups_with_vals.append( (category_name, list(zip(parameters, vals))) )
# [print(x) for x in groups_with_vals]
plot = create_dosplot(df.iloc[cid]["material_name"], df.iloc[cid]["natoms"])
script, div = components(plot)
return render_template("dos.html",script=script, div=div,formula=c["formula_html"],dos_columns_groups=groups_with_vals)
@app.route('/correlations')
def corr_page():
plot = plot_corr(df[resources.corr_cols])
script, div = components(plot)
return render_template("corr.html",script=script, div=div)
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/datatable')
def table_page():
df_rounded = df.round(
{"Curie temperature (K)":0,
"gravimetric moment (emu/g)":1,
"largest local moment (µB)":2,
})
df_rounded = df_rounded.fillna("")
print(df_rounded.head())
return render_template("datatable.html", df=df_rounded.iterrows())
if __name__ == '__main__':
app.run(debug=True)
```
|
{
"source": "jdbooth9516/sweepstakes",
"score": 2
}
|
#### File: jdbooth9516/sweepstakes/manager.py
```python
from user_interface import User_interface
from queue_manager import *
from stack_manager import *
class Marketing_firm:
def __init__(self,manager_type):
self.ui = User_interface()
self.manager_type = manager_type
def add_sweepstake(self):
sweepstake = self.ui.get_sweepstakes_info()
#using depenancy injection to send sweepstake info to either data managers
self.manager_type.insert_sweepstake(sweepstake)
```
#### File: jdbooth9516/sweepstakes/stack_manager.py
```python
from stack import Stack
class Stack_manager:
def __init__(self):
self.stack = Stack()
def insert_sweepstake(self, sweepstake):
self.stack.push(sweepstake)
def get_sweepstake(self):
return self.stack.pop()
#create insert sweepstakes and get sweepstakes method.
```
#### File: jdbooth9516/sweepstakes/user_interface.py
```python
class User_interface:
def __init__(self):
self.first = ''
self.last = ' '
self.email = ''
self.registration = ''
def main_menu(self):
print("""Welcome to the main menu \n\t
press -1- to create a contest\n\t
press -2- to add a contestant\n\t
press -3- to pick a winner\n\t
press -4- to quit program""")
choice = int(input("select a option: "))
return choice
def contestant_info(self, contest_name):
print(f"Enter contestand for the {contest_name} sweepstake\n")
self.first = input("Please enter contestant's first name: ")
self.last = input("Please enter contestant's last name: ")
self.email = input("Please enter contestant's email: ")
self.registration = int(input("Please enter contestant's four digit registration number: "))
def get_manager_type(self):
manager_type = input("Select a data type to order Sweepstakes (stack or queue): ")
return manager_type
def get_sweepstakes_info(self):
prize = input("Please enter a prize: ")
return prize
def finished_entry(self):
finished = input("Would anyone else like to join the sweepstake (y/n): ")
if finished == 'y':
return False
else:
return True
def finished_sweepstakes(self):
entry_finished = input("Would you like to add another sweepstake (y/n): ")
if entry_finished == "n":
return True
def finished_contest(self):
contest_finished = input('Would you like to run another sweepstake (y/n): ')
if contest_finished == "n":
return True
def close(self):
print("Registration has closed stand by for the winner.")
def recieve_message(self, first_name):
print(f"{first_name} has recieved the message")
def recieve_winner(self, first_name):
print(f"{first_name} has recieved prize information")
```
|
{
"source": "jdborowy/pluto",
"score": 3
}
|
#### File: pluto/data/fetcher.py
```python
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from .store import DataStore, StoreRequest
from .connector import Connector
FetcherRequest = namedtuple("DataRequest", ("source", "ticker", "freq", "start"))
class DataFetcher:
def __init__(self, store: DataStore, max_workers=16):
self.store = store
self.max_workers = max_workers
def fetch_one(self, request: FetcherRequest):
connector = Connector.from_name(request.source)
df = connector.fetch(request.ticker, request.freq, request.start)
self.store.save(request, df)
def fetch(self, requests):
def _fetch_one_noexcept(request):
try:
self.fetch_one(request)
except Exception as e:
return e
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
results = executor.map(_fetch_one_noexcept, requests)
return {req: res for req, res in zip(requests, results)
if res is not None}
```
|
{
"source": "jdbower/stoker",
"score": 3
}
|
#### File: bin/pi/log_mon.py
```python
import telnetlib
import time
import os
import sys
import threading
import logging
logging.basicConfig(filename='/tmp/log_mon.log', level=logging.INFO)
# I should add a keyboard interrupt handler
# The main datapoint array
datapoints = {}
# A datapoint consists of a sensor_id (redundant), timestamp, and state
class datapoint:
def __init__(self, sensor_id, timestamp, state):
self.sensor_id = sensor_id
self.timestamp = timestamp
self.state = state
# Updates the variables in /tmp/[id].fan
def update_var():
# First, make a thread that runs once a second
threading.Timer(1.0, update_var).start()
# Grab the datapoint array
global datapoints
# Iterate over each sensor in the datapoints
for id, datapoint_list in datapoints.iteritems():
total_points = 0
total_on = 0
total_off = 0
# For each datapoint for each sensor...
for curr_datapoint in datapoint_list:
# If the timestamp is more than a minute old, remove it.
if int(time.time()) - curr_datapoint.timestamp > 60:
datapoint_list.pop(0)
else:
# Technically we don't need total_points...
total_points = total_points + 1
if curr_datapoint.state == 'on':
total_on = total_on + 1
else:
total_off = total_off + 1
# If there are datapoints calculate the duty cycle and cast as float.
if total_points > 0:
duty_cycle = total_on / float(total_points)
else:
duty_cycle = 0
# Clobber the old file and write a new one.
outfile = open("/tmp/"+id+".fan", "w")
outfile.write(str(duty_cycle));
outfile.close()
# Add a new datapoint to the series
def update_datapoint(id, state):
# Grab the global variable
global datapoints
# Grab the time, note that this is the time of processing not the time
# the message came in. This should be close enough.
timestamp = int(time.time())
new_point = datapoint(id, timestamp, state)
# Try adding the datapoint to the series
try:
datapoints[id].append(new_point)
except KeyError:
# This must be the first datapoint for the sensor, create an array first
datapoints[id] = [];
datapoints[id].append(new_point)
def open_connect():
global session
global host
logging.info("Opening connection to "+host+"...")
while (session is None) or (session.get_socket() is None):
try:
session = telnetlib.Telnet(host, 23, timeout)
except socket.timeout:
logging.error("socket timeout")
else:
logging.info("Connected...waiting for login")
time.sleep(30)
logging.info("Sending login.")
session.write("root\r")
time.sleep(1)
logging.info("Sending password.")
session.write("tini\r")
time.sleep(10)
logging.info("Sending bbq command.")
session.write("bbq\r")
time.sleep(10)
session.write("bbq -temps \r\r")
time.sleep(10)
logging.info("Reading data.")
if len(sys.argv) != 2:
logging.error("Usage: "+sys.argv[0]+" [ip_address]")
sys.exit()
host = sys.argv[1]
timeout = 10
session = None
# Start the update thread.
update_var()
logging.info("Connecting...")
open_connect()
while True:
if (session.get_socket() is None):
open_connect()
line = session.read_until("\n",timeout);
if len(line.strip()) != 0:
# Uncomment to debug
logging.debug(str(len(line))+" "+str(int(time.time()))+" "+line)
# I have no idea why these don't work when they work above...
if line.endswith("login: "):
logging.info("Sending login.")
try:
session.write("root\r")
except:
logging.error("Couldn't login")
if line.endswith("password: "):
logging.info("Sending password.")
session.write("tini\r")
if line.endswith("/> "):
logging.info("Starting BBQ.")
session.write("bbq -temps\r")
line_arr = line.rsplit(' ', 1)
last_word = line_arr[-1].strip()
if (last_word.startswith("blwr:")):
# The very first messaure starts with the prompt, so we need to grab
# the last word after the split by colon
sensorid = line_arr[0].split(':', 1)[0].rsplit(' ',1)[-1]
status = last_word.split(':', 1)[1]
if len(sensorid) == 16:
update_datapoint(sensorid,status)
```
|
{
"source": "jd-boyd/corker",
"score": 2
}
|
#### File: corker/corker/app.py
```python
from __future__ import absolute_import
import logging
from routes.util import URLGenerator
from webob import exc
from webob.dec import wsgify
from webob.response import Response
from corker.controller import BaseController
log = logging.getLogger(__name__)
class Application(object):
"""Give it a mapper, and it will return a wsgi app that gets routes via
the mapper and executes them."""
def __init__(self, mapper, **config):
self.map = mapper
self.config = config
@wsgify
def __call__(self, req):
results = self.map.routematch(environ=req.environ)
if not results:
return exc.HTTPNotFound()
match, route = results
link = URLGenerator(self.map, req.environ)
if route.redirect:
# Taken from the routes middleware module
route_name = '_redirect_%s' % id(route)
location = link(route_name, **match)
# Build the response manually so we don't have to try to map the
# route status to a specific webob exception
redirect_response = Response(status=route.redirect_status)
redirect_response.location = location
return redirect_response
match_controller = match.get('controller', None)
if not callable(match_controller):
log.error('Unsupported route match: %s', match)
return exc.HTTPNotFound()
req.urlvars = ((), match)
req.link = link
controller = match_controller(req, **self.config)
return controller()
```
#### File: corker/tests/test_controller.py
```python
from __future__ import absolute_import, print_function
from nose.tools import eq_
from corker.controller import BaseController, route
def test_route():
@route('bob')
def meth():
pass
eq_(meth._route, [(('bob',), {})])
def test_double_route():
@route('bob')
@route('fred')
def meth():
pass
eq_(meth._route, [(('fred',), {}), (('bob',), {})])
def test_config():
import webob
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
i = Index({}, bdb={'a': 1})
print(i.bdb)
eq_(i.bdb, {'a': 1})
```
|
{
"source": "jd-boyd/typednamedtuple",
"score": 3
}
|
#### File: typednamedtuple/tests/test_namedtuple.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import copy
import pickle
from collections import OrderedDict
from random import choice
import string
import six
if six.PY2:
import cPickle
from nose.tools import eq_
from nose import SkipTest
from typednamedtuple import TypedNamedTuple, TProp
from tests.test_basic import assert_exception
def test_tupleness():
class Point(TypedNamedTuple):
"""Point(x, y)"""
x = TProp(int)
y = TProp(int)
p = Point(11, 22)
assert isinstance(p, tuple)
eq_(p, (11, 22)) # matches a real tuple
eq_(tuple(p), (11, 22)) # coercable to a real tuple
eq_(list(p), [11, 22]) # coercable to a list
eq_(max(p), 22) # iterable
eq_(max(*p), 22) # star-able
x, y = p
eq_(p, (x, y)) # unpacks like a tuple
eq_((p[0], p[1]), (11, 22)) # indexable like a tuple
with assert_exception(IndexError):
p.__getitem__(3)
eq_(p.x, x)
eq_(p.y, y)
with assert_exception(AttributeError):
eval('p.z', locals())
def test_factory():
class Point(TypedNamedTuple):
"""Point(x, y)"""
x = TProp(int)
y = TProp(int)
eq_(Point.__name__, 'Point')
eq_(Point.__slots__, ())
eq_(Point.__module__, __name__)
eq_(Point.__getitem__, tuple.__getitem__)
eq_(Point._fields, ('x', 'y'))
# Tests for:
# type has non-alpha char
# type has keyword
# type starts with digit
# field with non-alpha char
# field has keyword
# field starts with digit
# don't make sense with class based definition of typednamedtuple
# field with leading underscore is now allowed.
# Duplicate field now is last copy wins, not an exception anymore.
# Verify that numbers are allowed in names
class Point0(TypedNamedTuple):
x1 = TProp(int)
y2 = TProp(int)
eq_(Point0._fields, ("x1", "y2"))
# Test leading underscores in a typename
class _(TypedNamedTuple):
a = TProp(int)
b = TProp(int)
c = TProp(int)
# Previous unicode input tests don't make sense since strings are involved.
with assert_exception(TypeError):
Point._make([11]) # catch too few args
with assert_exception(TypeError):
Point._make([11, 22, 33]) # catch too many args
def test_factory_doc_attr():
class Point(TypedNamedTuple):
"""Point(x, y)"""
x = TProp(int)
y = TProp(int)
eq_(Point.__doc__, 'Point(x, y)')
def test_instance():
class Point(TypedNamedTuple):
"""Point(x, y)"""
x = TProp(int)
y = TProp(int)
p = Point(11, 22)
eq_(p, Point(x=11, y=22))
eq_(p, Point(11, y=22))
eq_(p, Point(y=22, x=11))
eq_(p, Point(*(11, 22)))
eq_(p, Point(**dict(x=11, y=22)))
with assert_exception(TypeError):
Point(1) # too few args
with assert_exception(TypeError,
args=("__new__() takes 2 positional arguments but 3 were given",)):
Point(1, 2, 3) # too many args
with assert_exception(TypeError):
Point(XXX=1, y=2) # wrong keyword argument
with assert_exception(TypeError):
Point(x=1) # missing keyword argument
eq_(repr(p), 'Point(x=11, y=22)')
assert '__weakref__' not in dir(p)
eq_(p, Point._make([11, 22])) # test _make classmethod
eq_(p._fields, ('x', 'y')) # test _fields attribute
eq_(p._replace(x=1), (1, 22)) # test _replace method
eq_(p._asdict(), dict(x=11, y=22)) # test _asdict method
eq_(vars(p), p._asdict()) # verify that vars() works
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
assert False, 'Did not detect an incorrect fieldname'
def test_odd_sizes():
class Zero(TypedNamedTuple):
pass
eq_(Zero(), ())
eq_(Zero._make([]), ())
eq_(repr(Zero()), 'Zero()')
eq_(Zero()._asdict(), {})
eq_(Zero()._fields, ())
class Dot(TypedNamedTuple):
d = TProp(int)
eq_(Dot(1), (1,))
eq_(Dot._make([1]), (1,))
eq_(Dot(1).d, 1)
eq_(repr(Dot(1)), 'Dot(d=1)')
eq_(Dot(1)._asdict(), {'d':1})
eq_(Dot(1)._replace(d=999), (999,))
eq_(Dot(1)._fields, ('d',))
n = 5000
names = list(set(''.join([choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
class_start = 'class Big(TypedNamedTuple):\n'
class_def = class_start + "\n".join([" %s = TProp(int)" % name
for name in names])
test_globals = {
'TypedNamedTuple': TypedNamedTuple,
'TProp': TProp
}
test_locals = {}
six.exec_(class_def, test_globals, test_locals)
Big = test_locals['Big']
b = Big(*range(n))
eq_(b, tuple(range(n)))
eq_(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
eq_(getattr(b, name), pos)
_ = repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
eq_(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = list(range(n))
b2_expected[1] = 999
b2_expected[-5] = 42
eq_(b2, tuple(b2_expected))
eq_(b._fields, tuple(names))
class TestTNT(TypedNamedTuple):
x = TProp(int)
y = TProp(int)
z = TProp(int)
def test_pickle():
p = TestTNT(x=10, y=20, z=30)
if six.PY2:
pickles = pickle, cPickle
else:
pickles = (pickle,)
for module in pickles:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
eq_(p, q)
eq_(p._fields, q._fields)
def test_copy():
p = TestTNT(10, 20, 30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
eq_(p, q)
eq_(p._fields, q._fields)
```
|
{
"source": "jdbranham/graphite-web",
"score": 2
}
|
#### File: graphite/render/evaluator.py
```python
import re
import six
from graphite.errors import NormalizeEmptyResultError, InputParameterError
from graphite.functions import SeriesFunction
from graphite.logger import log
from graphite.render.grammar import grammar
from graphite.render.datalib import fetchData, TimeSeries, prefetchData
from graphite.functions.params import validateParams
from django.conf import settings
def evaluateTarget(requestContext, targets):
if not isinstance(targets, list):
targets = [targets]
pathExpressions = extractPathExpressions(requestContext, targets)
prefetchData(requestContext, pathExpressions)
seriesList = []
for target in targets:
if not target:
continue
if isinstance(target, six.string_types):
if not target.strip():
continue
target = grammar.parseString(target)
result = evaluateTokens(requestContext, target)
# we have to return a list of TimeSeries objects
if isinstance(result, TimeSeries):
seriesList.append(result)
elif result:
seriesList.extend(result)
return seriesList
def evaluateTokens(requestContext, tokens, replacements=None, pipedArg=None):
if tokens.template:
arglist = dict()
if tokens.template.kwargs:
arglist.update(dict([(kwarg.argname, evaluateScalarTokens(kwarg.args[0])) for kwarg in tokens.template.kwargs]))
if tokens.template.args:
arglist.update(dict([(str(i+1), evaluateScalarTokens(arg)) for i, arg in enumerate(tokens.template.args)]))
if 'template' in requestContext:
arglist.update(requestContext['template'])
return evaluateTokens(requestContext, tokens.template, arglist)
if tokens.expression:
if tokens.expression.pipedCalls:
# when the expression has piped calls, we pop the right-most call and pass the remaining
# expression into it via pipedArg, to get the same result as a nested call
rightMost = tokens.expression.pipedCalls.pop()
return evaluateTokens(requestContext, rightMost, replacements, tokens)
return evaluateTokens(requestContext, tokens.expression, replacements)
if tokens.pathExpression:
expression = tokens.pathExpression
if replacements:
for name in replacements:
if expression == '$'+name:
val = replacements[name]
if not isinstance(val, six.string_types):
return val
elif re.match(r'^-?[\d.]+$', val):
return float(val)
else:
return val
else:
expression = expression.replace('$'+name, str(replacements[name]))
return fetchData(requestContext, expression)
if tokens.call:
if tokens.call.funcname == 'template':
# if template propagates down here, it means the grammar didn't match the invocation
# as tokens.template. this generally happens if you try to pass non-numeric/string args
raise InputParameterError("invalid template() syntax, only string/numeric arguments are allowed")
if tokens.call.funcname == 'seriesByTag':
return fetchData(requestContext, tokens.call.raw)
try:
func = SeriesFunction(tokens.call.funcname)
except KeyError:
msg = 'Received request for unknown function: {func}'.format(func=tokens.call.funcname)
log.warning(msg)
# even if input validation enforcement is disabled, there's nothing else we can do here
raise InputParameterError(msg)
rawArgs = tokens.call.args or []
if pipedArg is not None:
rawArgs.insert(0, pipedArg)
args = [evaluateTokens(requestContext, arg, replacements) for arg in rawArgs]
requestContext['args'] = rawArgs
kwargs = dict([(kwarg.argname, evaluateTokens(requestContext, kwarg.args[0], replacements))
for kwarg in tokens.call.kwargs])
def handleInvalidParameters(e):
if not getattr(handleInvalidParameters, 'alreadyLogged', False):
log.warning(
'Received invalid parameters ({msg}): {func} ({args})'.format(
msg=str(e),
func=tokens.call.funcname,
args=', '.join(
argList
for argList in [
', '.join(str(arg) for arg in args),
', '.join('{k}={v}'.format(k=str(k), v=str(v)) for k, v in kwargs.items()),
] if argList
)
))
# only log invalid parameters once
setattr(handleInvalidParameters, 'alreadyLogged', True)
if settings.ENFORCE_INPUT_VALIDATION:
raise
if hasattr(func, 'params'):
try:
validateParams(tokens.call.funcname, func.params, args, kwargs)
except InputParameterError as e:
handleInvalidParameters(e)
try:
return func(requestContext, *args, **kwargs)
except NormalizeEmptyResultError:
return []
except InputParameterError as e:
handleInvalidParameters(e)
return evaluateScalarTokens(tokens)
def evaluateScalarTokens(tokens):
if tokens.number:
if tokens.number.integer:
return int(tokens.number.integer)
if tokens.number.float:
return float(tokens.number.float)
if tokens.number.scientific:
return float(tokens.number.scientific[0])
raise InputParameterError("unknown numeric type in target evaluator")
if tokens.string:
return tokens.string[1:-1]
if tokens.boolean:
return tokens.boolean[0] == 'true'
if tokens.none:
return None
raise InputParameterError("unknown token in target evaluator")
def extractPathExpressions(requestContext, targets):
# Returns a list of unique pathExpressions found in the targets list
pathExpressions = set()
def extractPathExpression(requestContext, tokens, replacements=None):
if tokens.template:
arglist = dict()
if tokens.template.kwargs:
arglist.update(dict([(kwarg.argname, evaluateScalarTokens(kwarg.args[0])) for kwarg in tokens.template.kwargs]))
if tokens.template.args:
arglist.update(dict([(str(i+1), evaluateScalarTokens(arg)) for i, arg in enumerate(tokens.template.args)]))
if 'template' in requestContext:
arglist.update(requestContext['template'])
extractPathExpression(requestContext, tokens.template, arglist)
elif tokens.expression:
extractPathExpression(requestContext, tokens.expression, replacements)
if tokens.expression.pipedCalls:
for token in tokens.expression.pipedCalls:
extractPathExpression(requestContext, token, replacements)
elif tokens.pathExpression:
expression = tokens.pathExpression
if replacements:
for name in replacements:
if expression != '$'+name:
expression = expression.replace('$'+name, str(replacements[name]))
pathExpressions.add(expression)
elif tokens.call:
# if we're prefetching seriesByTag, pass the entire call back as a path expression
if tokens.call.funcname == 'seriesByTag':
pathExpressions.add(tokens.call.raw)
else:
for a in tokens.call.args:
extractPathExpression(requestContext, a, replacements)
for target in targets:
if not target:
continue
if isinstance(target, six.string_types):
if not target.strip():
continue
target = grammar.parseString(target)
extractPathExpression(requestContext, target)
return list(pathExpressions)
```
|
{
"source": "jdbrice/root-io",
"score": 2
}
|
#### File: root-io/rootio/ROOT.py
```python
import logging
from . import UnZip
import json
from rootio.StreamerDict import Streamers
from rootio.IOData import IOData
def BIT( n ) :
return (1 << n)
class ROOT(object):
logger = logging.getLogger( "ROOT" )
@staticmethod
def AddClassMethods( classname, streamer ) :
if None == streamer :
return None
ROOT.logger.debug( "AddClassMethods : Missing Impl" )
return streamer
@staticmethod
def GetArrayKind( type_name ) :
ROOT.logger.debug( "GetArrayKind( %s )", type_name )
# HERE
if "TString" == type_name or "string" == type_name :
return 0
if type_name in Streamers.CustomStreamers and 'TString' == Streamers.CustomStreamers[ type_name ] :
return 0
if len(type_name) < 7 or -1 == type_name.find('TArray') :
return -1
# key is string type_name
# value is the enum type id
array_types = {
"TArrayI" : IOData.kInt,
"TArrayD" : IOData.kDouble,
"TArrayF" : IOData.kFloat,
"TArrayS" : IOData.kShort,
"TArrayC" : IOData.kChar,
"TArrayL" : IOData.kLong,
"TArrayL64" : IOData.kLong64,
}
if type_name in array_types :
return array_types[ type_name ]
return -1
@staticmethod
def CreateMemberSimpleStreamer( name, code ) :
def streamer_func( buf, obj ) :
obj[name] = buf.ntox( code )
return streamer_func
@staticmethod
def CreateMember (element, file) :
# create member entry for streamer element, which is used for reading of such data
ROOT.logger.debug( "CreateMember( element=%s, file=%s )", element, file )
found = False
member = {
"name": element['fName'],
"type": element['fType'],
"fArrayLength": element['fArrayLength'],
"fArrayDim": element['fArrayDim'],
"fMaxIndex": element['fMaxIndex']
}
if "BASE" == element['fTypeName'] :
if ROOT.GetArrayKind( member['name'] ) > 0 :
# this is workaround for arrays as base class
# we create 'fArray' member, which read as any other data member
member['name'] = 'fArray'
member['type'] = IOData.kAny
else :
# create streamer for base class
member['type'] = IOData.kBase;
# this.GetStreamer(element.fName);
t = member['type']
simple = {
IOData.kShort: "h",
IOData.kInt: "i",
IOData.kCounter: "i",
IOData.kLong: "u",
IOData.kLong64: "u",
IOData.kDouble: "d",
IOData.kFloat: "f",
IOData.kLegacyChar: "B",
IOData.kUChar: "B",
IOData.kUShort: "H",
IOData.kBits: "I",
IOData.kUInt: "I",
IOData.kULong64: "U",
IOData.kULong: "U"
}
if t == IOData.kBase :
found = True
member['base'] = element['fBaseVersion'] # indicate base class
member['basename'] = element['fName']; # keep class name
def func(buf, obj) :
buf.ClassStreamer( obj, member['basename'] )
member['func'] = func
if member['type'] in simple :
found = True
member['func'] = ROOT.CreateMemberSimpleStreamer( member['name'], simple[ member['type'] ] )
return member
if t == IOData.kBool :
found = True
def func( buf, obj ) :
obj[member['name']] = True if buf.ntou1() != 0 else False
member['func'] = func
memberL = [
(IOData.kBool),
(IOData.kInt),
(IOData.kCounter),
(IOData.kDouble),
(IOData.kUChar),
(IOData.kShort),
(IOData.kUShort),
(IOData.kBits),
(IOData.kUInt),
(IOData.kULong),
(IOData.kULong64),
(IOData.kLong),
(IOData.kLong64),
(IOData.kFloat)
]
if (t - IOData.kOffsetL) in memberL :
found = True
if element['fArrayDim'] < 2 :
member['arrlength'] = element['fArrayLength']
def func( buf, obj ) :
ROOT.getLogger("memberL").info( "member %s", member )
obj[member['name']] = buf.ReadFastArray( member['arrlength'], member['type'] - IOData.kOffsetL )
member[ 'func' ] = func
else :
member['arrlength'] = element['fMaxIndex'][ element['fArrayDim'] - 1 ]
member['minus1'] = True
def rnda( buf, obj ) :
def rfa( buf1, handle ) :
ROOT.getLogger("memberL").info( "member %s", member )
return buf1.ReadFastArray( handle['arrlength'], handle['type'] - IOData.kOffsetL )
obj[member['name']] = buf.ReadNdimArray( member, rfa )
member['func'] = rnda
if t == IOData.kOffsetL+IOData.kChar :
found = True
if element['fArrayDim'] < 2 :
member['arrlength'] = element['fArrayLength'];
def func( buf, obj ) :
obj[member['name']] = buf.ReadFastString(member['arrlength']);
member['func'] = func
else :
member['minus1'] = True # one dimension is used for char*
member['arrlength'] = element['fMaxIndex'][ element['fArrayDim']-1 ]
def rnda( buf, obj ) :
def rfs( buf1, handle ) :
return buf1.ReadFastString( handle['arrlength'])
obj[ member['name'] ] = buf.ReadNdimArray( member, rfs )
if (t - IOData.kOffsetP) in memberL :
found = True
member['cntname'] = element['fCountName'];
def func( buf, obj ) :
v = buf.ntou1()
if 1 == v :
# ROOT.getLogger("memberL").info( "obj \n%s, member \n%s ", json.dumps( {k:v for k, v in obj.iteritems() if k is not "func"} , indent=4), json.dumps({k:v for k, v in member.iteritems() if k is not "func"}, indent=4) )
obj[ member['name'] ] = buf.ReadFastArray( obj[ member['cntname'] ], member['type'] - IOData.kOffsetP )
else :
obj[ member['name'] ] = []
member['func'] = func
if t == (IOData.kOffsetP+IOData.kChar) :
found = True
member['cntname'] = element['fCountName'];
def func( buf, obj ) :
v = buf.ntou1()
if 1 == v :
obj[member['name']] = buf.ReadFastString(obj[member['cntname']]);
else :
obj[member['name']] = None
member['func'] = func
if t == IOData.kDouble32 or t == (IOData.kOffsetL+IOData.kDouble32) or t == (IOData.kOffsetP+IOData.kDouble32):
found = True
member['double32'] = True;
# SKIP - need to fill in
if t == IOData.kAnyP or t == IOData.kObjectP :
found = True
def func( buf, obj ) :
def roa( buf1, handle ) :
return buf1.ReadObjectAny()
obj[ member['name'] ] = buf.ReadNdimArray( member, roa )
member['func'] = func
if t == IOData.kAny or t == IOData.kAnyp or t == IOData.kObjectp or t == IOData.kObject:
found = True
classname = element[ 'fName' ] if "BASE" == element['fTypeName'] else element['fTypeName']
if classname[-1] == "*" :
classname = classname[ 0 : -1 ]
arr_kind = ROOT.GetArrayKind( classname )
if arr_kind > 0 :
member['arrkind'] = arr_kind
def func( buf, obj ) :
obj[ member['name']] = buf.ReadFastArray( buf.ntou4(), member['arrkind'] )
member['func'] = func
elif arr_kind == 0 :
def func( buf, obj ) :
obj[ member['name'] ] = buf.ReadTString()
member['func'] = func
else :
member['classname'] = classname
if element['fArrayLength'] > 1 :
def func( buf, obj ) :
def rcs( buf1, handle ) :
return buf1.ClassStreamer( {}, handle['classname'] )
obj[ member['name'] ] = buf.ReadNdimArray( member, rcs )
member['func'] = func
else :
def func( buf, obj ) :
obj[ member['name'] ] = buf.ClassStreamer( {}, member['classname'] )
member['func'] = func
# Skip - need to fill in
if t == IOData.kTString:
found = True
def func( buf, obj ) :
member['name'] = buf.ReadTString()
member['func'] = func
if not found :
ROOT.logger.error( "Not FOUND : %d", t )
return member
@staticmethod
def GetTypeId( typename, recurse = True ) :
# optimize by not doing this inside func
type_ids = {
"bool": IOData['kBool'],
"Bool_t": IOData['kBool'],
"char": IOData['kChar'],
"signed char": IOData['kChar'],
"Char_t": IOData['kChar'],
"Color_t": IOData['kShort'],
"Style_t": IOData['kShort'],
"Width_t": IOData['kShort'],
"short": IOData['kShort'],
"Short_t": IOData['kShort'],
"int": IOData['kInt'],
"EErrorType": IOData['kInt'],
"Int_t": IOData['kInt'],
"long": IOData['kLong'],
"Long_t": IOData['kLong'],
"float": IOData['kFloat'],
"Float_t": IOData['kFloat'],
"double": IOData['kDouble'],
"Double_t": IOData['kDouble'],
"unsigned char": IOData['kUChar'],
"UChar_t": IOData['kUChar'],
"unsigned short": IOData['kUShort'],
"UShort_t": IOData['kUShort'],
"unsigned": IOData['kUInt'],
"unsigned int": IOData['kUInt'],
"UInt_t": IOData['kUInt'],
"unsigned long": IOData['kULong'],
"ULong_t": IOData['kULong'],
"int64_t": IOData['kLong64'],
"long long": IOData['kLong64'],
"Long64_t": IOData['kLong64'],
"uint64_t": IOData['kULong64'],
"unsigned long long": IOData['kULong64'],
"ULong64_t": IOData['kULong64'],
"Double32_t": IOData['kDouble32'],
"Float16_t": IOData['kFloat16'],
"char*": IOData['kCharStar'],
"const char*": IOData['kCharStar'],
"const Char_t*": IOData['kCharStar'],
}
if typename in type_ids :
return type_ids[ typename ]
if not recurse :
return -1
if typename in Streamers.CustomStreamers :
replace = Streamers.CustomStreamers[ typename ];
if type( replace ) == str :
return ROOT.GetTypeId(replace, true);
return -1;
```
#### File: root-io/rootio/TDirectory.py
```python
from rootio import TBuffer
class TDirectory (object) :
def __init__(self, file, dirname, cycle) :
self.fFile = file
self._typename = "TDirectory"
self.dir_name = dirname
self.dir_cycle = cycle
self.fKeys = []
def to_json( self ) :
obj = {
'fSeekKeys' : self['fSeekKeys'],
'dir_name' : self['dir_name'],
'dir_cycle' : self['dir_cycle'],
'fDatimeM' : self['fDatimeM'],
'fNbytesName' : self['fNbytesName'],
'fTitle' : self['fTitle'],
'fDatimeC' : self['fDatimeC'],
'fSeekParent' : self['fSeekParent'],
'fKeys' : self['fKeys'],
'fSeekDir' : self['fSeekDir'],
'fNbytesKeys' : self['fNbytesKeys'],
# "_typename": self._typename,
# "dir_name" : self.dir_name,
# "dir_cycle" : self.dir_cycle,
# "fKeys" : self.fKeys
}
return obj
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value) :
object.__setattr__( self, key, value )
def list_keys(self, prefix) :
for k in self.fKeys :
fqn=prefix + "/" + k['fName']
print( "[%s]: " %( k['fClassName'] ) + fqn )
if "TDirectory" == k['fClassName'] :
tdir = self.fFile.ReadObject( fqn )
tdir.list_keys( prefix=fqn )
def GetKey(self, keyname, cycle ) :
for i in range( 0, len(self.fKeys) ) :
if self.fKeys[i]['fName'] == keyname and self.fKeys[i]['fCycle'] == cycle :
return self.fKeys[i]
pos = keyname.rfind( '/' )
while pos > 0 :
dirname = keyname[0:pos]
subname = keyname[pos+1:]
dirkey = self.GetKey( dirname, 1 )
if None != dirkey and "fClassName" in dirkey and "TDirectory" in dirkey['fClassName'] :
tdir = self.ReadObject( dirname )
if None != tdir :
return tdir.GetKey( subname, cycle )
pos = keyname.rfind( '/', 0, pos-1 )
return None
return None
#TODO : add second part of impl
def ReadKeys(self, objbuf ) :
objbuf.ClassStreamer( self, 'TDirectory' )
if self.fSeekKeys <= 0 or self.fNbytesKeys <= 0 :
return None
file = self.fFile
blob = file.ReadBuffer([ self.fSeekKeys, self.fNbytesKeys ] )
if None == blob :
return None
buf = TBuffer( blob, 0, file, None )
buf.ReadTKey()
nkeys = buf.ntoi4()
for i in range(0, nkeys) :
self.fKeys.append( buf.ReadTKey() )
file.fDirectories.append( self )
```
|
{
"source": "jdbrice/RootMD",
"score": 2
}
|
#### File: RootMD/rootmd/__main__.py
```python
import json
import requests
import os
from xmlrpc.client import boolean
import mistletoe as md
from mistletoe.ast_renderer import ASTRenderer
from rich import inspect
from rich.console import Console
from rich.markdown import Markdown
from rich.logging import RichHandler
import logging
import argparse
from rootmd import RootHtmlRenderer
from rootmd import RootMdRenderer
from rootmd import Md2MacroRenderer
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import time
# watchdog observer
observer = Observer()
# setup our logger
FORMAT = "%(message)s"
logging.basicConfig(
level="NOTSET", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
log = logging.getLogger("rich")
"""
Entry point for RootMd
"""
parser = argparse.ArgumentParser(description='Convert Markdown with inline c++ code to ROOT output.', prog="rootmd")
parser.add_argument(
'input', nargs="?", help='input Markdown file to execute and convert', default=""
)
parser.add_argument(
'--output', help='output filename default <input>.<ext> where <ext> is determined by the chosen format, default html, output can include {input} to allow substitution of input filename, or {path} for full path, or {basepath} for path to input file, or {ext} for default output extension for format specified. TODO', default=""
)
parser.add_argument(
'--format', help='output format', default="html",
choices=["html", "md", "obsidian", "json", "terminal", "macro"]
)
parser.add_argument(
'--embed', help='embed images as base 64', default=False, action="store_true"
)
parser.add_argument(
'--asset-dir',
help='specify asset output directory, paths are NOTE re-written to support unless using obsidian format', default=""
)
parser.add_argument(
'--verbosity', help='specify log verbosity', default=0, type=int
)
parser.add_argument(
'--watch', help='watch a file or directory for changes')
parser.add_argument(
'--run',
help='command to run after processing a file. The filename can be substituted into the command string with {{file}}. Example: --run="echo {{file}}"')
parser.add_argument(
'--clean',
help='clean artifacts, caution - should only use with embed or asset copy to <asset-dir>', action='store_true', default=False)
parser.add_argument(
'--no-exec',
help='Do not execute any code blocks, just process file (useful for viewing and testing conversion)', action='store_true', default=False)
parser.add_argument(
'--html-template',
help='Template HTML file should include {js}, {css}, {content} ... TODO', default="")
parser.add_argument(
'--css',
help='CSS used for the HTML output, overrides default ... TODO', default="")
parser.add_argument(
'--share',
help='Upload to sharing server', default=False, action='store_true')
args = parser.parse_args()
if "input" not in args and "watch" not in args :
log.error( "Must provide one of [\"[input]\" or \"--watch\" to specify input files ]" )
exit()
EXTENSIONS = {
"html" : ".html",
"md" : ".md",
"obsidian" : ".md",
"json" : ".json",
"terminal" : ".md",
"macro" : ".C"
}
console = Console()
log.setLevel( logging.INFO )
VERBOSITY = args.verbosity
if VERBOSITY >= 10 :
log.setLevel( logging.DEBUG )
if VERBOSITY >= 20 :
log.setLevel( logging.INFO )
if VERBOSITY >= 30 :
log.setLevel( logging.WARNING )
if VERBOSITY >= 40 :
log.setLevel( logging.ERROR )
if VERBOSITY >= 50 :
log.setLevel( logging.CRITICAL )
ASSET_PREFIX=""
EMBED = args.embed
ASSET_DIR = args.asset_dir
if args.output == "" and args.input != "":
args.output = args.input + EXTENSIONS[args.format] # ext will be added later
# if VERBOSITY <= 1:
# inspect( args )
basename = os.path.basename( args.output )
# handle obsidian's way of removing the attachment/ (or whatever dir) from paths
if args.format == "obsidian":
ASSET_PREFIX = os.path.splitext(basename)[0]
ASSET_DIR = os.path.join( ASSET_DIR, ASSET_PREFIX )
if not os.path.isdir( ASSET_DIR ):
log.info( "Making asset directory: %s" % ASSET_DIR )
os.mkdir( ASSET_DIR )
#input working dir
log.info( "I am %s" % __file__ )
inwdir = os.path.dirname(os.path.abspath(args.input))
log.info( "input working directory: %s" % inwdir )
if ASSET_DIR == "" and input != "":
ASSET_DIR = inwdir
class RootMd :
def __init__(self, *args, **kwargs) -> None:
log.debug("args:")
# inspect( args[0] )
self.args = args[0]
log.debug( self.args.input )
self.title = ""
self.last_run_input = ""
self.last_run_time = 0
def run(self, input):
delta_time = time.time() - self.last_run_time
self.args.input = input
# log.info( "delta_time = %d" % delta_time )
if self.args.input == self.last_run_input and delta_time < 1 :
return
self.last_run_input = self.args.input
artifacts = []
if not os.path.exists(self.args.input) :
log.error("File %s does not exist" % ( self.args.input ) )
return
inwdir = os.path.dirname(os.path.abspath(self.args.input))
log.info( "input working directory: %s" % inwdir )
ASSET_DIR = self.args.asset_dir
if ASSET_DIR == "":
ASSET_DIR = inwdir
log.info( "Processing %s to %s output format" % (self.args.input, self.args.format ) )
self.title = args.input
theRenderer = RootHtmlRenderer()
theRenderer.set( embed=EMBED, asset_dir=ASSET_DIR, asset_prefix=ASSET_PREFIX )
if args.format == "md" or args.format == "obsidian" or args.format == "terminal":
theRenderer = RootMdRenderer()
theRenderer.set( embed=EMBED, asset_dir=ASSET_DIR, asset_prefix=ASSET_PREFIX )
if args.format == "ast" :
theRenderer = ASTRenderer()
if args.format == "macro" :
theRenderer = Md2MacroRenderer()
with open( args.input , 'r') as fin:
html = theRenderer.render(md.Document(fin))
artifacts = theRenderer.artifacts
if args.format == "terminal" :
console.print( Markdown(html) )
return
output = args.output
# if "" == output :
# output = args.input + "." + args.format
if output == "":
output = input + EXTENSIONS[args.format] # ext will be added later
log.info( "Writing output to %s" % output )
with open( output , "w", encoding="utf-8", errors="xmlcharrefreplace") as output_file:
output_file.write(html)
token = os.environ.get( "ROOTMD_TOKEN", "" )
if "share" in args and args.share and token != "":
files = { output : html }
for fn in artifacts:
try :
with open( fn, 'rb' ) as f:
files[ fn ] = f.read()
except Exception as e:
log.error( e )
log.info( "SHARE:" )
for fi in files :
log.info( "file_name: %s" % fi )
res = requests.post(
"https://rootmd.jdbburg.com/upload",
data={ "token": token},
files = files )
log.info( res.json() )
if res.status_code == 200 and res.json()["status"] == True and len(res.json()["data"]) >= 1:
url = res.json()["data"][0]["link"]
# "https://rootmd.jdbburg.com/bfbf11835023423dd316586acb9fbbb3/confidenceintervals.md.html"
log.info( "Uploaded to:" )
print( url )
self.last_run_time = time.time()
rootmd = RootMd(args)
class Handler(FileSystemEventHandler):
@staticmethod
def on_any_event(event):
if event.is_directory:
return None
elif event.event_type == 'modified' :
# inspect( event )
# Event is modified, you can process it now
log.info("Watchdog received modified event - % s" % event.src_path)
rootmd.run(input=event.src_path)
if args.watch :
event_handler = Handler()
observer.schedule(event_handler, args.watch, recursive = True)
log.info( 'Watching "%s" for changes' % args.watch )
observer.start()
try:
while True:
time.sleep(5)
except:
observer.stop()
# print("Observer Stopped")
observer.join()
exit()
rootmd.run(args.input)
```
#### File: RootMD/rootmd/Md2MacroRenderer.py
```python
from mistletoe.base_renderer import BaseRenderer
import hashlib
import rich
import logging
from rich.logging import RichHandler
from .YamlBlock import YamlFence, CodeFence
FORMAT = "%(message)s"
logging.basicConfig(
level="NOTSET", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
log = logging.getLogger("rich")
class Md2MacroRenderer(BaseRenderer):
def __init__(self, *extras):
super().__init__(YamlFence, CodeFence, *extras)
self.blockid = 0
self.embed = False
self.asset_prefix = ""
self.asset_dir = ""
self.artifacts = []
def set( self, **kwargs ) :
if "embed" in kwargs :
self.embed = kwargs.get( "embed" )
if "asset_dir" in kwargs :
self.asset_dir = kwargs.get( "asset_dir" )
if "asset_prefix" in kwargs:
self.asset_prefix = kwargs.get( "asset_prefix" )
# SHOULD CENTRALIZE WITH OTHER RENDERERS
def optionAsBool( self, options, n, default = False ):
# option is false by default
if n not in options :
log.info( "A" )
return default
if type( options[n] ) == bool:
log.info( "B" )
return options[n]
log.info( "C" )
return not (options.get( n, "" ).lower() == 'false' or options.get( n, "" ) == '0')
"""
Code Fence is a custom Code Block token that accepts optional arguments after the language id
example:
```cpp in:0
...
```
the above turns off echoing the input to rendered document
"""
def render_code_fence( self, token ):
log.info( "render_code_fence" )
# rich.inspect( token )
return self.render_block_code( token )
def render_block_code(self, token):
log.info( "options:" )
log.info( token.options )
if token.language != "cpp" and token.options.get( "exec", "" ) != "cpp":
return self.render_raw_text( token )
log.info( "exec : %s" % ( self.optionAsBool( token.options, "exec", True ) ) )
if self.optionAsBool( token.options, "exec", True ) == False:
return ""
# return self.render_raw_text( token )
# code =token.children[0].content
hash = hashlib.md5( token.children[0].content.encode() )
output = "// %%>{id} ----------ROOTMD_START_BLOCK{hash}----------\n".format( id = self.blockid, hash = hash.hexdigest() )
output += 'printf( "START_BLOCK%%>{id}<%%{hash} {{\\n" );\n'.format( id = self.blockid, hash = hash.hexdigest() )
for l in token.children[0].content.splitlines():
output += " " + l + "\n"
output += 'printf( "END_BLOCK%%<{id}>%%{hash} }}\\n" );\n'.format( id = self.blockid, hash = hash.hexdigest() )
output += "// %%<{id} ----------ROOTMD_END_BLOCK{hash}----------\n".format( id = self.blockid, hash = hash.hexdigest() )
self.blockid += 1
return output
def render_inline_code(self, token):
return "`" + self.render_raw_text(token) + "`"
def render_thematic_break(self, token):
# inspect( token )
return "---"
@staticmethod
def render_line_break(token):
log.debug( 'line break' )
return '\n' if token.soft else '\n'
def render_inner( self, token ):
return ''.join(map(self.render_raw_text, token.children))
def render_raw_text(self, token):
"""
Default render method for RawText. Simply return token.content.
"""
if hasattr(token, 'content'):
output = ""
for l in token.content.splitlines():
output += "//" + l + "\n"
return output
return self.render_inner( token )
def render_to_plain(self, token):
log.info( "render_to_plain" )
# if hasattr(token, 'children'):
# inner = [self.render_to_plain(child) for child in token.children]
# return ( '//' + ''.join(inner))
# return ("//" + token.content)
return ""
def render_heading(self, token):
inner = self.render_inner(token)
out = "#" * int(token.level) + " " + inner
return out
def render_document(self, token):
# inner = '\n'.join([self.render(child) for child in token.children])
inner = ""
parts = []
for child in token.children :
log.info( child.__class__.__name__ )
if "CodeFence" == child.__class__.__name__ or "InlineCode" == child.__class__.__name__:
parts.append( self.render( child ) )
else:
parts.append( self.render_raw_text( child ) )
# rich.inspect( child )
inner = '\n'.join( parts )
return inner
```
#### File: RootMD/rootmd/RootBaseRenderer.py
```python
from mistletoe.base_renderer import BaseRenderer
from shutil import move
import re
import logging
from .Executor import RootExecutor
from .Executor import GnuPlotExecutor
from .Executor import RnuPlotExecutor
from .YamlBlock import YamlFence, CodeFence
import yaml
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(
level="NOTSET", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
log = logging.getLogger("rich")
"""
Base renderer for mistletoe with ROOT code execution and asset injection.
"""
class RootBaseRenderer(BaseRenderer):
def __init__(self, *extras):
super().__init__(YamlFence, CodeFence, *extras)
# externlal code executors
self.gnuplot = GnuPlotExecutor()
self.rnuplot = RnuPlotExecutor()
self.root = RootExecutor()
self.executors = {
'cpp' : self.root,
'gnuplot' : self.gnuplot,
'rnuplot' : self.rnuplot,
}
self.handlers = {
'cpp' : self.handle_root,
'gnuplot' : self.handle_gnuplot,
'rnuplot' : self.handle_rnuplot
}
def render_yaml_fence(self, token):
log.info("YAML Fence")
try:
y = yaml.safe_load( token.children[0].content )
self.process_yaml( y )
# Do something here
except yaml.YAMLError as e:
log.error( e )
token.language = "yaml"
if self.yaml.get( "hide", False ) == True or self.yaml.get( "hide", False ) == "1" or self.yaml.get( "hide", False ) == 1 :
return ""
return super().render_block_code(token)
def render_inline_code( self, token):
# rich.inspect( token )
code = token.children[0].content
# evaluate code inside BASH style blocks
m = re.search( "\${(.*?)}", code )
if m:
# log.info( "--->CODE: %s" % ( m.groups()[0] ) )
evalcode = m.groups()[0]
evalcode += ';printf("\\n");'
log.info( 'eval: "%s"' % evalcode )
output, err, imgs = self.run_cmd( evalcode )
log.info( "stdout: %s" % ( output ) )
log.info( "stderr: %s" % ( err ) )
return output
return super().render_inline_code( token )
def render_code_fence( self, token ):
log.info( "render_code_fence" )
# rich.inspect( token )
return self.render_block_code( token )
def render_block_code( self, token ) :
pass
def process_yaml( self, yaml ):
pass
def addExecutor( self, name, exec, handler ):
self.executors[ name ] = exec
self.handler[ name ] = handler
def handle_root( self ) :
pass
def handle_gnuplot( self ) :
pass
def handle_rnuplot( self ) :
pass
```
|
{
"source": "jdbrice/star-sw",
"score": 3
}
|
#### File: star-sw/scripts/gdrive.py
```python
import requests
def download_file_from_google_drive(id):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
sys.stdout.buffer.write(response.content)
if __name__ == "__main__":
import sys
if len(sys.argv) is not 2:
print("Usage: python gdrive.py drive_file_id")
else:
# TAKE ID FROM SHAREABLE LINK
file_id = sys.argv[1]
download_file_from_google_drive(file_id)
```
|
{
"source": "JDBumgardner/stone_ground_hearth_battles",
"score": 3
}
|
#### File: hearthstone/battlebots/bot_types.py
```python
import random
import typing
from typing import List, Callable
from hearthstone.simulator.agent.actions import StandardAction, DiscoverChoiceAction, RearrangeCardsAction
from hearthstone.simulator.agent.agent import Agent
if typing.TYPE_CHECKING:
from hearthstone.simulator.core.cards import MonsterCard
from hearthstone.simulator.core.player import Player
class PriorityFunctionBot(Agent):
def __init__(self, authors: List[str], priority: Callable[['Player', 'MonsterCard'], float], seed: int):
if not authors:
authors = ["JB", "AS", "ES", "JS", "DVP"]
self.authors = authors
self.priority = priority
self.local_random = random.Random(seed)
async def discover_choice_action(self, player: 'Player') -> DiscoverChoiceAction:
discover_cards = player.discover_queue[0].items
discover_cards = sorted(discover_cards, key=lambda card: self.priority(player, card), reverse=True)
return DiscoverChoiceAction(player.discover_queue[0].items.index(discover_cards[0]))
async def rearrange_cards(self, player: 'Player') -> RearrangeCardsAction:
permutation = list(range(len(player.in_play)))
self.local_random.shuffle(permutation)
return RearrangeCardsAction(permutation)
async def buy_phase_action(self, player: 'Player') -> StandardAction:
pass
```
#### File: hearthstone/battlebots/no_action_bot.py
```python
import typing
from hearthstone.simulator.agent.actions import StandardAction, EndPhaseAction, DiscoverChoiceAction, \
RearrangeCardsAction, FreezeDecision
from hearthstone.simulator.agent.agent import Agent
if typing.TYPE_CHECKING:
from hearthstone.simulator.core.player import Player
from hearthstone.simulator.core.player import DiscoverIndex
class NoActionBot(Agent):
authors = ["<NAME>"]
async def rearrange_cards(self, player: 'Player') -> RearrangeCardsAction:
return RearrangeCardsAction([])
async def buy_phase_action(self, player: 'Player') -> StandardAction:
return EndPhaseAction(FreezeDecision.NO_FREEZE)
async def discover_choice_action(self, player: 'Player') -> DiscoverChoiceAction:
return DiscoverChoiceAction(DiscoverIndex(0))
```
#### File: hearthstone/battlebots/ordering.py
```python
from hearthstone.simulator.agent import RearrangeCardsAction
from hearthstone.simulator.core.card_pool import *
from hearthstone.simulator.core.tavern import Player
def rate_position(card: 'MonsterCard') -> float:
if type(card) is MonstrousMacaw:
return 0.0
if type(card) is UnstableGhoul or type(card) is SpawnOfNzoth:
return 1.0
if type(card) is SelflessHero or type(card) is GlyphGuardian or type(card) is DeflectOBot:
return 2.0
if type(card) is OldMurkeye:
return 3.0
if type(card) is InfestedWolf or type(card) is SavannahHighmane or type(card) is SecurityRover:
return 4.5
if type(card) is DragonspawnLieutenant or type(card) is Imprisoner or type(card) is ImpGangBoss or type(
card) is TwilightEmissary:
return 5.0
if type(card) is ScavengingHyena or type(card) is RatPack:
return 6.0
if type(card) is PackLeader or type(card) is MurlocWarleader or type(card) is Khadgar or type(
card) is SouthseaCaptain:
return 6.5
if type(card) is MamaBear or type(card) is SoulJuggler or type(card) is RipsnarlCaptain:
return 7.0
return 4.0
def naive_rearrange_cards(player: 'Player') -> RearrangeCardsAction:
in_play = list(enumerate(player.in_play))
in_play.sort(key=lambda pair: rate_position(pair[1]))
return RearrangeCardsAction([pair[0] for pair in in_play])
```
#### File: hearthstone/ladder/ladder.py
```python
import json
import random
from datetime import datetime
from typing import List, Callable
import trueskill
from hearthstone.battlebots.cheapo_bot import CheapoBot
from hearthstone.battlebots.get_bot_contestants import get_priority_bot_contestant_tuples, \
get_priority_heuristics_bot_contestant_tuples
from hearthstone.battlebots.no_action_bot import NoActionBot
from hearthstone.battlebots.random_bot import RandomBot
from hearthstone.battlebots.saurolisk_bot import SauroliskBot
from hearthstone.battlebots.supremacy_bot import SupremacyBot
from hearthstone.simulator.agent.agent import AnnotatingAgent
from hearthstone.simulator.core.monster_types import MONSTER_TYPES
from hearthstone.simulator.host.round_robin_host import RoundRobinHost
from hearthstone.training.pytorch.agents.pytorch_bot import PytorchBot
from hearthstone.training.pytorch.encoding.default_encoder import DefaultEncoder
from hearthstone.training.pytorch.networks.save_load import load_from_saved
class ContestantAgentGenerator:
def __init__(self, function, *args, **kwargs):
self.function = function
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
return self.function(*self.args, **self.kwargs)
class Contestant:
def __init__(self, name, agent_generator: Callable[[], AnnotatingAgent], initial_trueskill=None):
self.name = name
self.agent_generator = agent_generator
self.elo = 1200
self.trueskill = initial_trueskill or trueskill.Rating()
self.games_played = 0
def __repr__(self):
return f'(Agent "{self.name}" Trueskill {self.trueskill.mu:.2f})'
def probability_of_win(elo1: int, elo2: int) -> float:
return 1.0 / (1.0 + 10 ** ((elo2 - elo1) / 400.0))
def update_ratings(outcome: List[Contestant]):
coefficient: float = 30
num_contestants = len(outcome)
elo_delta = [0 for _ in range(num_contestants)]
for i in range(num_contestants):
for j in range(i, num_contestants):
win_prob = probability_of_win(outcome[i].elo, outcome[j].elo)
elo_delta[i] += coefficient * (1 - win_prob)
elo_delta[j] += coefficient * (win_prob - 1)
for contestant, elo_diff in zip(outcome, elo_delta):
contestant.elo += elo_diff
new_trueskills = trueskill.rate([(contestant.trueskill,) for contestant in outcome], ranks=range(len(outcome)))
for new_trueskill, contestant in zip(new_trueskills, outcome):
contestant.trueskill = new_trueskill[0]
def print_standings(contestants: List[Contestant]):
contestants = sorted(contestants, key=lambda c: c.trueskill.mu, reverse=True)
print(contestants)
def run_tournament(contestants: List[Contestant], num_rounds=10, game_size=8):
agents = {contestant.name: contestant.agent_generator() for contestant in contestants}
for _ in range(num_rounds):
round_contestants = random.sample(contestants, k=game_size)
host = RoundRobinHost({c.name: agents[c.name] for c in round_contestants})
host.play_game()
winner_names = list(reversed([name for name, player in host.tavern.losers]))
print(host.tavern.losers[-1][1].in_play, "-", host.tavern.losers[-1][1].hero, host.tavern.losers[-1][1].name)
ranked_contestants = sorted(round_contestants, key=lambda c: winner_names.index(c.name))
update_ratings(ranked_contestants)
print_standings(contestants)
for contestant in round_contestants:
contestant.games_played += 1
def all_contestants():
all_bots = [Contestant(f"RandomBot", lambda: RandomBot(1))]
all_bots += [Contestant(f"NoActionBot ", lambda: NoActionBot())]
all_bots += [Contestant(f"CheapoBot", lambda: CheapoBot(3))]
all_bots += [Contestant(f"SupremacyBot {t}", lambda: SupremacyBot(t, False, i)) for i, t in
enumerate([MONSTER_TYPES.MURLOC, MONSTER_TYPES.BEAST, MONSTER_TYPES.MECH, MONSTER_TYPES.DRAGON,
MONSTER_TYPES.DEMON, MONSTER_TYPES.PIRATE])]
all_bots += [Contestant(f"SupremacyUpgradeBot {t}", lambda: SupremacyBot(t, True, i)) for i, t in
enumerate([MONSTER_TYPES.MURLOC, MONSTER_TYPES.BEAST, MONSTER_TYPES.MECH, MONSTER_TYPES.DRAGON,
MONSTER_TYPES.DEMON, MONSTER_TYPES.PIRATE])]
all_bots += [Contestant("SauroliskBot", lambda: SauroliskBot(5))]
all_bots += [Contestant(name, lambda: bot) for name, bot in get_priority_bot_contestant_tuples()]
all_bots += [Contestant(name, lambda: bot) for name, bot in get_priority_heuristics_bot_contestant_tuples()]
return all_bots
def saved_learningbot_1v1_contestants() -> List[Contestant]:
hparams = {
"resume": False,
'resume.from': '2020-10-18T02:14:22.530381',
'export.enabled': True,
'export.period_epochs': 200,
'export.path': datetime.now().isoformat(),
'opponents.initial': 'easiest',
'opponents.self_play.enabled': True,
'opponents.self_play.only_champions': True,
'opponents.max_pool_size': 7,
'adam.lr': 0.0001,
'batch_size': 1024,
'minibatch_size': 1024,
'cuda': True,
'entropy_weight': 0.001,
'gae_gamma': 0.999,
'gae_lambda': 0.9,
'game_size': 2,
'gradient_clipping': 0.5,
'approx_kl_limit': 0.015,
'nn.architecture': 'transformer',
'nn.hidden_layers': 1,
'nn.hidden_size': 32,
'nn.activation': 'gelu',
'nn.shared': False,
'nn.encoding.redundant': True,
'normalize_advantage': True,
'normalize_observations': False,
'num_workers': 1,
'optimizer': 'adam',
'policy_weight': 0.581166675499831,
'ppo_epochs': 8,
'ppo_epsilon': 0.1}
all_bots = []
# Jeremy has this bot, ask him for it!
all_bots += [Contestant("LearningBot94200", lambda: PytorchBot(
load_from_saved("../../data/learning/pytorch/ppo/2020-10-30T20:50:44.311231/saved_models/94200", hparams),
DefaultEncoder(),
annotate=False))]
return all_bots
def load_ratings(contestants: List[Contestant], path):
with open(path) as f:
standings = json.load(f)
standings_dict = dict(standings)
for contestant in contestants:
if contestant.name in standings_dict:
contestant.elo = standings_dict[contestant.name]["elo"]
contestant.trueskill = trueskill.Rating(standings_dict[contestant.name]["trueskill.mu"],
standings_dict[contestant.name]["trueskill.sigma"])
contestant.games_played = standings_dict[contestant.name]["games_played"]
def save_ratings(contestants: List[Contestant], path):
ranked_contestants = sorted(contestants, key=lambda c: c.trueskill, reverse=True)
standings = [
(c.name, {"elo": c.elo,
"trueskill.mu": c.trueskill.mu,
"trueskill.sigma": c.trueskill.sigma,
"games_played": c.games_played,
"last_time_updated": datetime.now().isoformat(),
"authors": c.agent_generator().authors}) for c
in ranked_contestants]
with open(path, "w") as f:
json.dump(standings, f, indent=4)
```
#### File: simulator/agent/agent.py
```python
from typing import Any
from hearthstone.simulator.agent.actions import HeroChoiceAction, DiscoverChoiceAction, \
RearrangeCardsAction, StandardAction
from hearthstone.simulator.core.player import HeroChoiceIndex
from hearthstone.simulator.core.tavern import Player
Annotation = Any
class AnnotatingAgent:
async def hero_choice_action(self, player: 'Player') -> HeroChoiceAction:
return HeroChoiceAction(HeroChoiceIndex(0))
async def annotated_rearrange_cards(self, player: 'Player') -> (RearrangeCardsAction, Annotation):
"""
here the player selects a card arrangement one time per combat directly preceding combat
Args:
player: The player object controlled by this agent. This function should not modify it.
Returns: A tuple containing an arrangement of the player's board, and the Agent Annotation to attach to the replay.
"""
pass
async def annotated_buy_phase_action(self, player: 'Player') -> (StandardAction, Annotation):
"""
here the player chooses a buy phase action including:
purchasing a card from the store
summoning a card from hand to in_play
selling a card from hand or from in_play
and ending the buy phase
Args:
player: The player object controlled by this agent. This function should not modify it.
Returns:
A tuple containing the Action, and the Agent Annotation to attach to the replay.
"""
pass
async def annotated_discover_choice_action(self, player: 'Player') -> (DiscoverChoiceAction, Annotation):
"""
Args:
player: The player object controlled by this agent. This function should not modify it.
Returns:
Tuple of MonsterCard to discover, and Annotation to attach to the action.
"""
pass
async def game_over(self, player: 'Player', ranking: int) -> Annotation:
"""
Notifies the agent that the game is over and the agent has achieved a given rank
:param ranking: Integer index 0 to 7 of where the agent placed
:return:
"""
pass
class Agent(AnnotatingAgent):
async def buy_phase_action(self, player: 'Player') -> StandardAction:
pass
async def annotated_buy_phase_action(self, player: 'Player') -> (StandardAction, Annotation):
return await self.buy_phase_action(player), None
async def rearrange_cards(self, player: 'Player') -> RearrangeCardsAction:
pass
async def annotated_rearrange_cards(self, player: 'Player') -> (RearrangeCardsAction, Annotation):
return await self.rearrange_cards(player), None
async def discover_choice_action(self, player: 'Player') -> DiscoverChoiceAction:
pass
async def annotated_discover_choice_action(self, player: 'Player') -> (DiscoverChoiceAction, Annotation):
return await self.discover_choice_action(player), None
```
#### File: simulator/core/card_graveyard.py
```python
import sys
from inspect import getmembers, isclass
from typing import Union
from hearthstone.simulator.core.cards import MonsterCard
from hearthstone.simulator.core.events import CardEvent, EVENTS, BuyPhaseContext, CombatPhaseContext
from hearthstone.simulator.core.monster_types import MONSTER_TYPES
class FloatingWatcher(MonsterCard):
tier = 4
monster_type = MONSTER_TYPES.DEMON
pool = MONSTER_TYPES.DEMON
base_attack = 4
base_health = 4
mana_cost = 5
def handle_event_powers(self, event: 'CardEvent', context: Union['BuyPhaseContext', 'CombatPhaseContext']):
if event.event is EVENTS.PLAYER_DAMAGED:
bonus = 4 if self.golden else 2
self.attack += bonus
self.health += bonus
class ElistraTheImmortal(MonsterCard):
tier = 6
monster_type = MONSTER_TYPES.NEUTRAL
base_attack = 4
base_health = 4
base_divine_shield = True
base_reborn = True
divert_taunt_attack = True
legendary = True
class BarrensBlacksmith(MonsterCard):
tier = 3
monster_type = None
base_attack = 3
base_health = 5
def frenzy(self, context: CombatPhaseContext):
bonus = 4 if self.golden else 2
for card in context.friendly_war_party.board:
if card != self:
card.attack += bonus
card.health += bonus
class Siegebreaker(MonsterCard):
tier = 4
monster_type = MONSTER_TYPES.DEMON
pool = MONSTER_TYPES.DEMON
base_attack = 5
base_health = 8
base_taunt = True
mana_cost = 7
def handle_event_powers(self, event: 'CardEvent', context: Union['BuyPhaseContext', 'CombatPhaseContext']):
bonus = 2 if self.golden else 1
if event.event is EVENTS.COMBAT_PREPHASE or (event.event is EVENTS.SUMMON_COMBAT and event.card == self):
demons = [card for card in context.friendly_war_party.board if
card != self and card.check_type(MONSTER_TYPES.DEMON)]
for demon in demons:
demon.attack += bonus
elif event.event is EVENTS.SUMMON_COMBAT and event.card in context.friendly_war_party.board \
and event.card != self and event.card.check_type(MONSTER_TYPES.DEMON):
event.card.attack += bonus
elif event.event is EVENTS.DIES and event.card == self:
demons = [card for card in context.friendly_war_party.board if
card != self and card.check_type(MONSTER_TYPES.DEMON)]
for demon in demons:
demon.attack -= bonus
REMOVED_CARDS = [member[1] for member in getmembers(sys.modules[__name__],
lambda member: isclass(member) and issubclass(member,
MonsterCard) and member.__module__ == __name__)]
```
#### File: simulator/core/cards.py
```python
import copy
import enum
import itertools
import typing
from collections import defaultdict
from typing import List, Optional, Callable, Type, Union, Iterator
from boltons.setutils import IndexedSet
from hearthstone.simulator.core import events
from hearthstone.simulator.core.events import BuyPhaseContext, CombatPhaseContext, EVENTS, CardEvent
from hearthstone.simulator.core.monster_types import MONSTER_TYPES
from hearthstone.simulator.core.randomizer import Randomizer
if typing.TYPE_CHECKING:
from hearthstone.simulator.core.adaptations import Adaptation
from hearthstone.simulator.core.player import Player
def one_minion_per_type(cards: List['MonsterCard'], randomizer: 'Randomizer',
excluded_card: Optional['MonsterCard'] = None) -> List['MonsterCard']:
minions = []
restricted_cards = [card for card in cards]
if excluded_card in restricted_cards:
restricted_cards.remove(excluded_card)
filler_minions = [card for card in restricted_cards if card.monster_type == MONSTER_TYPES.ALL]
for minion_type in MONSTER_TYPES.single_types():
minions_by_type = [card for card in restricted_cards if card.monster_type == minion_type]
if minions_by_type:
card = randomizer.select_friendly_minion(minions_by_type)
minions.append(card)
elif filler_minions:
card = randomizer.select_friendly_minion(filler_minions)
filler_minions.remove(card)
minions.append(card)
return minions
def one_minion_per_tier(cards: List['MonsterCard'], randomizer: 'Randomizer') -> List['MonsterCard']:
minions = []
for tavern_tier in range(1, 7):
minions_by_tier = [card for card in cards if card.tier == tavern_tier]
if minions_by_tier:
card = randomizer.select_friendly_minion(minions_by_tier)
minions.append(card)
return minions
BOOL_ATTRIBUTE_LIST = ["divine_shield", "magnetic", "poisonous", "taunt", "windfury", "cleave", "reborn",
"mega_windfury", "gruul_rules"]
class MonsterCard:
coin_cost = 3
mana_cost: Optional[int] = None
base_health: int
base_attack: int
monster_type: 'MONSTER_TYPES' = MONSTER_TYPES.NEUTRAL
base_divine_shield = False
base_magnetic = False
base_poisonous = False
base_taunt = False
base_windfury = False
base_cleave = False
base_deathrattle = None
base_battlecry = None
num_battlecry_targets = [0]
base_reborn = False
redeem_rate = 1
tier: int
not_in_pool = False
cant_attack = False
give_immunity = False
legendary = False
pool: 'MONSTER_TYPES' = MONSTER_TYPES.ALL
divert_taunt_attack = False
def __init__(self):
self.health = self.base_health
self.attack = self.base_attack
self.divine_shield = self.base_divine_shield
self.magnetic = self.base_magnetic
self.poisonous = self.base_poisonous
self.taunt = self.base_taunt
self.windfury = self.base_windfury
self.mega_windfury = False
self.cleave = self.base_cleave
self.deathrattles: List[Callable[[MonsterCard, CombatPhaseContext], None]] = []
if self.base_deathrattle is not None:
self.deathrattles.append(self.base_deathrattle.__func__)
self.reborn = self.base_reborn
self.dead = False
self.golden = False
self.battlecry: Optional[Callable[[List[MonsterCard], CombatPhaseContext], None]] = self.base_battlecry
self.attached_cards = []
self.frozen = False
self.nomi_buff = 0
self.ticket = False
self.token = self.not_in_pool
self.link: Optional['MonsterCard'] = None # links a card during combat to itself in the buy phase board
self.dealt_lethal_damage_by = None
self.frenzy_triggered = False
self.gruul_rules = False
def __repr__(self):
rep = f"{type(self).__name__} {self.attack}/{self.health} (t{self.tier})" # TODO: add a proper enum to the monster typing
if self.dead:
rep += ", [dead]"
if self.battlecry:
rep += ", [battlecry]"
for attribute in BOOL_ATTRIBUTE_LIST:
if getattr(self, attribute):
rep += f", [{attribute}]"
if self.deathrattles:
rep += ", [%s]" % ",".join([f"deathrattle-{i}" for i in range(len(self.deathrattles))])
if self.golden:
rep += ", [golden]"
if self.frozen:
rep += ", [frozen]"
if self.ticket:
rep += ", [ticket]"
return "{" + rep + "}"
def take_damage(self, damage: int, combat_phase_context: CombatPhaseContext, foe: Optional['MonsterCard'] = None,
defending: Optional[bool] = True):
if self.divine_shield and not damage <= 0:
self.divine_shield = False
combat_phase_context.broadcast_combat_event(events.DivineShieldLostEvent(self, foe=foe))
else:
self.health -= damage
if foe is not None and foe.poisonous and self.health > 0:
self.health = 0
if defending and foe is not None and self.health < 0:
foe.overkill(combat_phase_context.enemy_context())
combat_phase_context.damaged_minions.add(self)
combat_phase_context.broadcast_combat_event(events.CardDamagedEvent(self, foe=foe))
if self.is_dying():
self.dealt_lethal_damage_by = foe
elif self.health >= 0 and not self.dead and not self.frenzy_triggered:
self.frenzy(combat_phase_context)
self.frenzy_triggered = True
def resolve_death(self, context: CombatPhaseContext, foe: Optional['MonsterCard'] = None):
if self.is_dying():
self.dead = True
context.friendly_war_party.dead_minions.append(self)
context.event_queue.load_minion(EVENTS.DIES, context.friendly_war_party, self, foe)
def trigger_reborn(self, context: CombatPhaseContext):
index = context.friendly_war_party.get_index(self)
for i in range(context.summon_minion_multiplier()):
reborn_self = self.unbuffed_copy()
reborn_self.health = 1
reborn_self.reborn = False
context.friendly_war_party.summon_in_combat(reborn_self, context, index + i + 1)
def handle_event(self, event: 'CardEvent', context: Union['BuyPhaseContext', 'CombatPhaseContext']):
if self == event.card:
if event.event is EVENTS.DIES:
if self.deathrattles:
context.event_queue.load_minion(EVENTS.DEATHRATTLE_TRIGGERED, context.friendly_war_party, self)
if self.reborn:
self.trigger_reborn(context)
elif event.event is EVENTS.SUMMON_BUY:
if self.magnetic:
self.magnetize(event.targets, context)
if self.battlecry:
for _ in range(context.battlecry_multiplier()):
self.battlecry(event.targets, context)
if event.event is EVENTS.BUY_END:
if self.gruul_rules:
self.attack += 2
self.health += 2
if not self.dead or self == event.card: # minions will trigger their own death events
self.handle_event_powers(event, context)
def handle_event_in_hand(self, event: CardEvent, context: BuyPhaseContext):
return
def handle_event_powers(self, event: 'CardEvent', context: Union['BuyPhaseContext', 'CombatPhaseContext']):
return
def valid_battlecry_target(self, card: 'MonsterCard') -> bool:
return True
def golden_transformation(self, base_cards: List['MonsterCard']):
self.attack += self.base_attack
self.health += self.base_health
self.golden = True
for card in base_cards:
self.health += card.health - card.base_health
self.attack += card.attack - card.base_attack
self.attached_cards.extend(card.attached_cards)
if card.base_deathrattle:
self.deathrattles.extend(card.deathrattles[1:])
else:
self.deathrattles.extend(card.deathrattles)
for attr in BOOL_ATTRIBUTE_LIST:
if getattr(card, attr):
if attr == "windfury" and card.base_windfury:
setattr(self, "mega_windfury", True)
setattr(self, attr, False)
else:
setattr(self, attr, True)
def magnetize(self, targets: List['MonsterCard'], context: 'BuyPhaseContext'):
if targets:
targets[0].attack += self.attack
targets[0].health += self.health
if self.deathrattles:
targets[0].deathrattles.extend(self.deathrattles)
for attr in BOOL_ATTRIBUTE_LIST:
if getattr(self, attr) and attr != 'magnetic':
setattr(targets[0], attr, True)
targets[0].attached_cards.append(self)
context.owner.remove_board_card(self)
def overkill(self, context: CombatPhaseContext):
return
def frenzy(self, context: CombatPhaseContext):
return
def dissolve(self) -> List['MonsterCard']:
golden_modifier = 3 if self.golden else 1
attached_cards = []
for card in self.attached_cards:
attached_cards.extend(card.dissolve())
if self.token:
return attached_cards
else:
dissolving_cards = [type(self)() for _ in range(golden_modifier)]
dissolving_cards.extend(attached_cards)
return dissolving_cards
def summon_minion_multiplier(self) -> int:
return 1
def deathrattle_multiplier(self) -> int:
return 1
def battlecry_multiplier(self) -> int:
return 1
@classmethod
def check_type(cls, desired_type: 'MONSTER_TYPES') -> bool:
return cls.monster_type in (desired_type, MONSTER_TYPES.ALL)
def is_targetable(self) -> bool:
return not self.dead and self.health > 0
def is_dying(self) -> bool:
return not self.dead and self.health <= 0
def adapt(self, adaptation: 'Adaptation'):
assert adaptation.valid(self)
adaptation.apply(self)
def unbuffed_copy(self) -> 'MonsterCard':
copy = type(self)()
if self.golden:
copy.golden_transformation([])
return copy
def valid_attack_targets(self, live_enemies: List['MonsterCard']) -> List['MonsterCard']:
if self.attack <= 0 or not live_enemies:
return []
taunt_monsters = [card for card in live_enemies if card.taunt]
if taunt_monsters:
return taunt_monsters
else:
return live_enemies
def num_attacks(self):
if self.mega_windfury:
return 4
elif self.windfury:
return 2
else:
return 1
def apply_nomi_buff(self, player: 'Player'):
if self.check_type(MONSTER_TYPES.ELEMENTAL):
self.attack += (player.nomi_bonus - self.nomi_buff)
self.health += (player.nomi_bonus - self.nomi_buff)
self.nomi_buff = player.nomi_bonus
def copy(self) -> 'MonsterCard':
"""
For when gaining a "copy" of a card.
:return:
"""
clone = copy.copy(self)
clone.deathrattles = clone.deathrattles.copy()
clone.attached_cards = [card.copy() for card in clone.attached_cards]
return clone
def adjacent_minions(self, context: 'BuyPhaseContext', predicate: Callable[['MonsterCard'], bool]) -> List[
'MonsterCard']:
return [card for card in context.owner.in_play if
predicate(card) and abs(context.owner.in_play.index(card) - context.owner.in_play.index(self)) == 1]
class CardList:
def __init__(self, cards: List[MonsterCard]):
# We use an IndexedSet instead of a set here for deterministic iteration order.
self.cards_by_tier: typing.DefaultDict[int, IndexedSet] = defaultdict(lambda: IndexedSet())
for card in cards:
self.cards_by_tier[card.tier].add(card)
def draw(self, player: 'Player', num: int, predicate: Optional[Callable] = lambda card: True) -> List['MonsterCard']:
valid_cards = []
for tier in range(player.tavern_tier + 1):
valid_cards.extend(self.cards_by_tier[tier])
valid_cards = [card for card in valid_cards if predicate(card)]
selected_cards = []
for i in range(num):
assert valid_cards, "fnord"
random_card = player.tavern.randomizer.select_draw_card(valid_cards, player.name, player.tavern.turn_count)
self.cards_by_tier[random_card.tier].remove(random_card)
valid_cards.remove(random_card)
selected_cards.append(random_card)
return selected_cards
def return_cards(self, cards: Iterator[MonsterCard]):
for card in cards:
self.return_card(card)
def return_card(self, card: MonsterCard):
self.cards_by_tier[card.tier].add(card)
def remove_card(self, card: MonsterCard):
self.cards_by_tier[card.tier].remove(card)
def remove_card_of_type(self, card_type: 'Type'):
cards_of_type = [card for card in self.cards_by_tier[card_type.tier] if type(card) == card_type]
self.cards_by_tier[card_type.tier].remove(cards_of_type[0])
def all_cards(self):
return itertools.chain.from_iterable(self.cards_by_tier.values())
def __len__(self) -> int:
return sum(len(value) for value in self.cards_by_tier.values())
def unique_cards(self) -> List['MonsterCard']:
cards_by_type = {type(card): card for card in self.all_cards()}
return list(cards_by_type.values())
def cards_of_monstertype(self, monster_type: 'MONSTER_TYPES'):
return [card for card in self.all_cards() if card.check_type(monster_type)]
def cards_with_battlecry(self):
return [card for card in self.all_cards() if card.base_battlecry]
class CardLocation(enum.Enum):
STORE = 1
HAND = 2
BOARD = 3
DISCOVER = 4
SPELLS = 5
SECRETS = 6
```
#### File: simulator/core/randomizer.py
```python
import os
import random
import sys
import typing
from collections import deque
from typing import List, Tuple, Type, Optional
from hearthstone.simulator.core.monster_types import MONSTER_TYPES
if typing.TYPE_CHECKING:
from hearthstone.simulator.core.cards import MonsterCard
from hearthstone.simulator.core.hero import Hero
from hearthstone.simulator.core.player import Player
from hearthstone.simulator.core.secrets import Secret
from hearthstone.simulator.core.spell import Spell
class Randomizer:
def select_draw_card(self, cards: List['MonsterCard'], player_name: str, round_number: int) -> 'MonsterCard':
raise NotImplementedError()
def select_player_pairings(self, players: List['Player']) -> List[Tuple['Player', 'Player']]:
raise NotImplementedError()
def select_attack_target(self, defenders: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_friendly_minion(self, friendly_minions: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_enemy_minion(self, enemy_minions: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_discover_card(self, discoverables: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_from_store(self, store: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_gain_card(self, cards: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_hero(self, hero_pool: List['Hero']) -> 'Hero':
raise NotImplementedError()
def select_summon_minion(self, cards: List['Type']) -> 'Type':
raise NotImplementedError()
def select_add_to_store(self, cards: List['MonsterCard']) -> 'MonsterCard':
raise NotImplementedError()
def select_monster_type(self, monster_types: List['MONSTER_TYPES'], round_number: int) -> 'MONSTER_TYPES':
raise NotImplementedError()
def select_random_minion(self, cards: List['Type'], round_number: int) -> 'Type':
raise NotImplementedError()
def select_adaptation(self, adaptations: List['Type']) -> 'Type':
raise NotImplementedError()
def select_random_number(self, lo: int, hi: int) -> int:
raise NotImplementedError()
def select_secret(self, secrets: List[Type['Secret']]) -> Type['Secret']:
raise NotImplementedError()
def select_combat_matchup(self, pairings: List[Tuple['Player', 'Player']]) -> Tuple['Player', 'Player']:
raise NotImplementedError()
def select_event_queue(self, queues: List[deque]) -> deque:
raise NotImplementedError()
def select_spell(self, spell: List[Type['Spell']]) -> Type['Spell']:
raise NotImplementedError()
class DefaultRandomizer(Randomizer):
def __init__(self, seed: Optional[int] = None):
if seed is None:
seed = random.getrandbits(32)
self.seed = seed
self.rand = random.Random(seed)
def select_draw_card(self, cards: List['MonsterCard'], player_name: str, round_number: int) -> 'MonsterCard':
return self.rand.choice(cards)
def select_player_pairings(self, players: List['Player']) -> List[Tuple['Player', 'Player']]:
self.rand.shuffle(players)
number_of_battles = len(players) // 2
return list(zip(players[:number_of_battles], players[number_of_battles:]))
def select_attack_target(self, defenders: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(defenders)
def select_friendly_minion(self, friendly_minions: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(friendly_minions)
def select_enemy_minion(self, enemy_minions: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(enemy_minions)
def select_discover_card(self, discoverables: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(discoverables)
def select_from_store(self, store: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(store)
def select_gain_card(self, cards: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(cards)
def select_hero(self, hero_pool: List['Hero']) -> 'Hero':
return self.rand.choice(hero_pool)
def select_summon_minion(self, cards: List['Type']) -> 'Type':
return self.rand.choice(cards)
def select_add_to_store(self, cards: List['MonsterCard']) -> 'MonsterCard':
return self.rand.choice(cards)
def select_monster_type(self, monster_types: List['MONSTER_TYPES'], round_number: int) -> 'MONSTER_TYPES':
return self.rand.choice(monster_types)
def select_random_minion(self, cards: List['Type'], round_number: int) -> 'Type':
return self.rand.choice(cards)
def select_adaptation(self, adaptations: List['Type']) -> 'Type':
return self.rand.choice(adaptations)
def select_random_number(self, lo: int, hi: int) -> int:
return self.rand.randint(lo, hi)
def select_secret(self, secrets: List[Type['Secret']]) -> Type['Secret']:
return self.rand.choice(secrets)
def select_combat_matchup(self, pairings: List[Tuple['Player', 'Player']]) -> Tuple['Player', 'Player']:
return self.rand.choice(pairings)
def select_event_queue(self, queues: List[deque]) -> deque:
return self.rand.choice(queues)
def select_spell(self, spells: List[Type['Spell']]) -> Type['Spell']:
return self.rand.choice(spells)
```
#### File: simulator/host/cyborg_host.py
```python
import asyncio
import logging
from hearthstone.asyncio import asyncio_utils
from hearthstone.battlebots.early_game_bot import EarlyGameBot
from hearthstone.battlebots.priority_functions import PriorityFunctions
from hearthstone.simulator.agent import EndPhaseAction
from hearthstone.simulator.host.async_host import AsyncHost
logger = logging.getLogger(__name__)
class CyborgArena(AsyncHost):
async def async_play_round(self):
self.tavern.buying_step()
async def perform_player_actions(agent, player):
for _ in range(40):
if player.discover_queue:
try:
discovered_card = await agent.discover_choice_action(player)
except ConnectionError:
print("replace with a bot")
# replace the agent and player
agent = PriorityFunctions.battlerattler_priority_bot(3, EarlyGameBot)
self.agents[player.name] = agent
discovered_card = await agent.discover_choice_action(player)
player.select_discover(discovered_card)
else:
try:
action = await agent.buy_phase_action(player)
except ConnectionError:
print("replace with a bot")
# replace the agent and player
agent = PriorityFunctions.battlerattler_priority_bot(3, EarlyGameBot)
self.agents[player.name] = agent
action = await agent.buy_phase_action(player)
action.apply(player)
if type(action) is EndPhaseAction:
break
if len(player.in_play) > 1:
try:
arrangement = await agent.rearrange_cards(player)
except ConnectionError:
print("replace with a bot")
# replace the agent and player
agent = PriorityFunctions.battlerattler_priority_bot(3, EarlyGameBot)
self.agents[player.name] = agent
arrangement = await agent.rearrange_cards(player)
player.rearrange_cards(arrangement.permutation)
perform_player_action_tasks = []
for player_name, player in self.tavern.players.items():
if player.dead:
continue
perform_player_action_tasks.append(
asyncio_utils.create_task(perform_player_actions(self.agents[player_name], player), logger=logger))
await asyncio.gather(*perform_player_action_tasks)
self.tavern.combat_step()
if self.tavern.game_over():
game_over_tasks = []
for position, (name, player) in enumerate(reversed(self.tavern.losers)):
game_over_tasks.append(asyncio_utils.create_task(self.agents[name].game_over(player, position), logger=logger))
await asyncio.gather(*game_over_tasks)
```
#### File: simulator/host/host.py
```python
import typing
from typing import Dict, Optional, List
from frozenlist.frozen_list import FrozenList
from hearthstone.simulator import agent
from hearthstone.simulator.core.tavern import Tavern
from hearthstone.simulator.replay.replay import Replay, ReplayStep
if typing.TYPE_CHECKING:
from hearthstone.simulator.agent import agent
from hearthstone.simulator.agent.agent import AnnotatingAgent
from hearthstone.simulator.agent.actions import Action
from hearthstone.simulator.core.randomizer import Randomizer
from hearthstone.simulator.replay.observer import Observer
class Host:
tavern: Tavern
agents: Dict[str, 'AnnotatingAgent']
replay: Replay
observers: FrozenList # [Observer]
def __init__(self, agents: Dict[str, 'AnnotatingAgent'], observers: Optional[List['Observer']] = None,
randomizer: Optional['Randomizer'] = None):
self.tavern = Tavern(randomizer=randomizer)
self.agents = agents
for player_name in sorted(agents.keys()): # Sorting is important for replays to be exact with RNG.
self.tavern.add_player(player_name)
self.replay = Replay(self.tavern.randomizer.seed, list(self.tavern.players.keys()))
if not observers:
observers = []
self.observers = FrozenList(observers)
def _apply_and_record(self, player_name: str, action: 'Action', agent_annotation: 'agent.Annotation' = None):
observer_annotations = {}
for observer in self.observers:
annotation = observer.on_action(self.tavern, player_name, action)
if annotation is not None:
observer_annotations[observer.name()] = annotation
action.apply(self.tavern.players[player_name])
self.replay.append_action(ReplayStep(player_name, action, agent_annotation, observer_annotations))
def _on_game_over(self):
for observer in self.observers:
annotation = observer.on_game_over(self.tavern)
if annotation is not None:
self.replay.observer_annotate(observer.name(), annotation)
def start_game(self):
raise NotImplementedError()
def play_round(self):
raise NotImplementedError()
def game_over(self):
raise NotImplementedError()
def play_game(self):
raise NotImplementedError()
def get_replay(self) -> Replay:
raise NotImplementedError()
```
#### File: replay/annotators/final_board_annotator.py
```python
from typing import Dict
from hearthstone.simulator.agent.actions import Action
from hearthstone.simulator.core.tavern import Tavern
from hearthstone.simulator.replay.observer import Observer, Annotation
class FinalBoardAnnotator(Observer):
"""
This annotator records the final boards for all players.
"""
def name(self) -> str:
return "FinalBoardAnnotator"
def on_action(self, tavern: 'Tavern', player: str, action: 'Action') -> Annotation:
return None
def on_game_over(self, tavern: 'Tavern') -> Dict[str, tuple]:
return {name: [str(card) for card in player.in_play] for name, player in tavern.players.items()}
```
#### File: simulator/replay/observer.py
```python
from typing import Any
from hearthstone.simulator.agent.actions import Action
from hearthstone.simulator.core.tavern import Tavern
Annotation = Any
class Observer:
def name(self) -> str:
pass
def on_action(self, tavern: 'Tavern', player: str, action: 'Action') -> Annotation:
pass
def on_game_over(self, tavern: 'Tavern') -> Annotation:
pass
```
#### File: simulator/replay/replay.py
```python
from typing import List, Any, Dict, Optional
import autoslot
from hearthstone.simulator.agent.actions import EndPhaseAction, Action, HeroChoiceAction, RearrangeCardsAction
from hearthstone.simulator.core.randomizer import DefaultRandomizer
from hearthstone.simulator.core.tavern import Tavern
class ReplayStep(autoslot.Slots):
def __init__(self, player: str, action: 'Action', agent_annotation: Any = None,
observer_annotations: Optional[Dict[str, Any]] = None):
self.player = player
self.action = action
self.agent_annotation = agent_annotation
self.observer_annotations = observer_annotations or {}
def __repr__(self):
return f"{self.player}: {self.action} ({self.agent_annotation}) ({self.observer_annotations})"
class Replay:
def __init__(self, seed: int, players: List[str]):
self.seed = seed
self.players = players
self.steps: List[ReplayStep] = []
self.agent_annotations: Dict[str, Any] = {} # mapping player name to agent annotation
self.observer_annotations: Dict[str, Any] = {} # Mapping observer to its annotation.
def append_action(self, replay_step: ReplayStep):
self.steps.append(replay_step)
def agent_annotate(self, player: str, annotation: Any):
self.agent_annotations[player] = annotation
def observer_annotate(self, observer: str, annotation: Any):
self.observer_annotations[observer] = annotation
def run_replay(self) -> 'Tavern':
tavern = Tavern(randomizer=DefaultRandomizer(self.seed))
for player in sorted(self.players): # Sorting is important for replays to be exact with RNG.
tavern.add_player(player)
hero_chosen_players = set()
for replay_step in self.steps[:len(self.players)]:
assert isinstance(replay_step.action, HeroChoiceAction)
replay_step.action.apply(tavern.players[replay_step.player])
hero_chosen_players.add(replay_step.player)
assert hero_chosen_players == set(self.players)
tavern.buying_step()
end_phase_actions = set()
i = len(self.players)
while i < len(self.steps):
replay_step = self.steps[i]
if type(replay_step.action) is EndPhaseAction:
assert replay_step.player not in end_phase_actions
end_phase_actions.add(replay_step.player)
replay_step.action.apply(tavern.players[replay_step.player])
if len(end_phase_actions) + len(tavern.losers) == len(self.players):
while i + 1 < len(self.steps) and type(self.steps[i + 1].action) is RearrangeCardsAction:
self.steps[i + 1].action.apply(tavern.players[replay_step.player])
i += 1
tavern.combat_step()
end_phase_actions = set()
if not tavern.game_over():
tavern.buying_step()
i += 1
return tavern
```
#### File: hearthstone/text_agent/stdio.py
```python
from hearthstone.text_agent.text_agent import TextAgentProtocol
class StdIOTransport(TextAgentProtocol):
"""
Note this agent is blocking, since it uses the same stdin/stdout for all agents.
"""
async def receive_line(self) -> str:
return input()
async def send(self, text: str):
print(text, end='')
```
#### File: training/pettingzoo/pettingzoo_agent.py
```python
import asyncio
from hearthstone.simulator.agent.actions import StandardAction, RearrangeCardsAction, DiscoverChoiceAction
from hearthstone.simulator.agent.agent import Agent
class AgentRequestQueue:
"""
A class for passing data back and forth between the PettingZoo API and the PettingZooAgents.
The `requests` queue contains tuples of (player_name, Future)
"""
def __init__(self, maxsize: int = 8):
self.requests: asyncio.Queue = asyncio.Queue(maxsize=maxsize)
async def request_agent_action(self, player_name: str):
future = asyncio.Future()
self.requests.put_nowait((player_name, future))
return await future
class PettingZooAgent(Agent):
def __init__(self, queue: AgentRequestQueue):
self.queue = queue
async def buy_phase_action(self, player: 'Player') -> StandardAction:
queu
async def rearrange_cards(self, player: 'Player') -> RearrangeCardsAction:
pass
async def discover_choice_action(self, player: 'Player') -> DiscoverChoiceAction:
pass
```
#### File: training/pytorch/a2c.py
```python
import logging
import random
from datetime import datetime
from typing import List
import torch
from torch import optim, nn
from torch.utils.tensorboard import SummaryWriter
from hearthstone.ladder.ladder import Contestant, update_ratings, print_standings, save_ratings
from hearthstone.simulator.host import RoundRobinHost
from hearthstone.training.pytorch.encoding.default_encoder import get_indexed_action_component, \
DefaultEncoder
from hearthstone.training.common.state_encoding import Transition
from hearthstone.training.pytorch.networks.feedforward_net import HearthstoneFFNet
from hearthstone.training.pytorch.policy_gradient import easier_contestants, tensorize_batch
from hearthstone.training.pytorch.replay_buffer import ReplayBuffer
from hearthstone.training.pytorch.surveillance import SurveiledPytorchBot, ReplayBufferSaver
# TODO STOP THIS HACK
global_step = 0
def learn(tensorboard: SummaryWriter, optimizer: optim.Adam, learning_net: nn.Module, replay_buffer: ReplayBuffer,
batch_size, policy_weight):
global global_step
if len(replay_buffer) < batch_size:
return
transitions: List[Transition] = replay_buffer.sample(batch_size)
replay_buffer.clear()
transition_batch = tensorize_batch(transitions)
# TODO turn off gradient here
# Note transition_batch.valid_actions is not the set of valid actions from the next state, but we are ignoring the policy network here so it doesn't matter
next_policy_, next_value = learning_net(transition_batch.next_state, transition_batch.valid_actions)
next_value = next_value.detach()
policy, value = learning_net(transition_batch.state, transition_batch.valid_actions)
advantage = transition_batch.reward.unsqueeze(-1) + next_value.masked_fill(
transition_batch.is_terminal.unsqueeze(-1), 0.0) - value
tensorboard.add_histogram("policy/train", torch.exp(policy), global_step)
masked_reward = transition_batch.reward.masked_select(transition_batch.is_terminal)
if masked_reward.size()[0]:
tensorboard.add_histogram("reward/train", transition_batch.reward.masked_select(transition_batch.is_terminal),
global_step)
tensorboard.add_histogram("value/train", value, global_step)
tensorboard.add_histogram("next_value/train", next_value, global_step)
tensorboard.add_histogram("advantage/train", advantage, global_step)
tensorboard.add_text("action/train", str(get_indexed_action_component(int(transition_batch.action[0]))), global_step)
tensorboard.add_scalar("avg_reward/train",
transition_batch.reward.masked_select(transition_batch.is_terminal).float().mean(),
global_step)
tensorboard.add_scalar("avg_value/train", value.mean(), global_step)
tensorboard.add_scalar("avg_advantage/train", advantage.mean(), global_step)
policy_loss = -(policy.gather(1, transition_batch.action.unsqueeze(-1)) * advantage.detach()).mean()
value_loss = advantage.pow(2).mean()
tensorboard.add_scalar("policy_loss/train", policy_loss, global_step)
tensorboard.add_scalar("value_loss/train", value_loss, global_step)
entropy_loss = 0.000001 * torch.sum(policy * torch.exp(policy))
tensorboard.add_scalar("entropy_loss/train", entropy_loss, global_step)
loss = policy_loss * policy_weight + value_loss + entropy_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
def main():
batch_size = 1024
tensorboard = SummaryWriter(f"../../../data/learning/pytorch/tensorboard/{datetime.now().isoformat()}")
logging.getLogger().setLevel(logging.INFO)
other_contestants = easier_contestants()
learning_net = HearthstoneFFNet(DefaultEncoder())
optimizer = optim.Adam(learning_net.parameters(), lr=0.0001)
replay_buffer = ReplayBuffer(100000)
learning_bot_contestant = Contestant("LearningBot",
lambda: SurveiledPytorchBot(learning_net, [ReplayBufferSaver(replay_buffer)]))
contestants = other_contestants + [learning_bot_contestant]
standings_path = "../../../data/learning/pytorch/a2c/standings.json"
# load_ratings(contestants, standings_path)
# add_net_to_tensorboard(tensorboard, learning_net)
for _ in range(10000):
round_contestants = [learning_bot_contestant] + random.sample(other_contestants, k=7)
host = RoundRobinHost({contestant.name: contestant.agent_generator() for contestant in round_contestants})
host.play_game()
winner_names = list(reversed([name for name, player in host.tavern.losers]))
print("---------------------------------------------------------------")
print(winner_names)
print(host.tavern.losers[-1][1].in_play)
ranked_contestants = sorted(round_contestants, key=lambda c: winner_names.index(c.name))
update_ratings(ranked_contestants)
print_standings(contestants)
for contestant in round_contestants:
contestant.games_played += 1
if learning_bot_contestant in round_contestants:
learn(tensorboard, optimizer, learning_net, replay_buffer, batch_size, 2.0)
save_ratings(contestants, standings_path)
tensorboard.close()
if __name__ == '__main__':
main()
```
#### File: training/pytorch/optuner.py
```python
import joblib
import optuna
from hearthstone.training.pytorch.ppo import PPOHyperparameters, PPOLearner
def objective(trial: optuna.Trial):
hparams = PPOHyperparameters({
"optimizer": trial.suggest_categorical("optimizer", ["adam", "sgd"]),
"batch_size": trial.suggest_int("batch_size", 1, 4096, log=True),
"ppo_epochs": trial.suggest_int("ppo_epochs", 1, 40),
"ppo_epsilon": trial.suggest_float("ppo_epsilon", 0.01, 0.5, log=True),
"policy_weight": trial.suggest_float("policy_weight", 0.3, 3, log=True),
"entropy_weight": trial.suggest_float("entropy_weight", 1e-7, 1e-2, log=True),
"nn.hidden_layers": trial.suggest_int("nn.hidden_layers", 0, 3),
"normalize_observations": trial.suggest_categorical("normalize_observations", [True, False]),
"gradient_clipping": trial.suggest_float("gradient_clipping", 0.5, 0.5),
"normalize_advantage": trial.suggest_categorical("normalize_advantage", [True, False]),
})
hparams["num_workers"] = trial.suggest_int("num_workers", 1, hparams["batch_size"], log=True)
if hparams["optimizer"] == "adam":
hparams["adam.lr"] = trial.suggest_float("adam.lr", 1e-6, 1e-3, log=True)
elif hparams["optimizer"] == "sgd":
hparams["sgd_lr"] = trial.suggest_float("sgd_lr", 1e-6, 1e-3, log=True)
hparams["sgd_momentum"] = trial.suggest_float("sgd_momentum", 0.0, 1.0)
if hparams["nn.hidden_layers"] > 0:
hparams["nn.hidden_size"] = trial.suggest_int("nn.hidden_size", 32, 2048)
hparams["nn.shared"] = trial.suggest_categorical("nn.shared", [True, False])
hparams["nn.activation"] = trial.suggest_categorical("nn.activation", ["relu", "gelu", "tanh"])
ppo_learner = PPOLearner(hparams, 600, trial)
return ppo_learner.run()
def main():
"""
A wise man once said, "You can optuna neural net but you can't optuna fish." - <NAME>.
Returns: No returns. No refunds. No shirt. No service.
"""
study = optuna.create_study(
storage="postgres://localhost/optuna", study_name="ppo_study",
direction="maximize",
load_if_exists=True,
pruner=optuna.pruners.NopPruner())
try:
try:
with joblib.parallel_backend("multiprocessing"):
study.optimize(objective, n_jobs=10, catch=(RuntimeError,))
except KeyboardInterrupt:
pass
except Exception as e:
print(e)
print(study.best_params)
if __name__ == '__main__':
main()
```
#### File: training/pytorch/replay_buffer.py
```python
import collections
import random
from queue import Queue
from typing import List, Generator, Union
from hearthstone.simulator.replay.replay import Replay
from hearthstone.training.pytorch.replay import ActorCriticGameStepInfo
class EpochBuffer:
"""
A replay buffer for a2c or ppo, containing an unordered list of transitions.
"""
def __init__(self, bot_name: str):
"""
:param bot_name: The name of the agent that this buffer is collecting samples for. Only this bots actions will be added to the replay buffer.
:param observation_normalizer: Observation normalizer to use for computing rolling average observation normalization.
"""
self.bot_name = bot_name
self.transitions: List[ActorCriticGameStepInfo] = []
def __len__(self):
return len(self.transitions)
def clear(self):
self.transitions.clear()
def recycle(self, queue: Union[Queue, collections.deque]):
for transition in self.transitions:
if isinstance(queue, collections.deque):
queue.append((transition.state, transition.valid_actions))
else:
queue.put_nowait((transition.state, transition.valid_actions))
self.clear()
def add_replay(self, replay: Replay):
for replay_step in replay.steps:
if replay_step.player == self.bot_name and replay_step.agent_annotation:
bot_info = replay_step.agent_annotation
self.transitions.append(bot_info)
def sample_minibatches(self, batch_size: int) -> Generator[List[ActorCriticGameStepInfo], None, None]:
random.shuffle(self.transitions)
i = 0
while True:
batch = self.transitions[i:i + batch_size]
if len(batch) < batch_size:
break
yield batch
i += batch_size
```
#### File: worker/distributed/simulation_worker.py
```python
import asyncio
import random
import time
from typing import List
import logging
import torch
from torch.distributed import rpc
from hearthstone.asyncio import asyncio_utils
from hearthstone.ladder.ladder import Contestant
from hearthstone.simulator.host.async_host import AsyncHost
from hearthstone.simulator.replay.annotators.final_board_annotator import FinalBoardAnnotator
from hearthstone.simulator.replay.annotators.ranking_annotator import RankingAnnotator
from hearthstone.simulator.replay.replay import Replay
from hearthstone.training.pytorch.agents.pytorch_bot import PytorchBot
from hearthstone.training.pytorch.tensorboard_altair import TensorboardAltairAnnotator
from hearthstone.training.pytorch.worker.distributed.remote_net import RemoteNet, BatchedRemoteNet
logger = logging.getLogger(__name__)
class SimulationWorker:
def __init__(self, inference_worker):
self.id = rpc.get_worker_info().id
self.inference_worker = inference_worker
torch.set_num_threads(1)
async def play_game(self, learning_bot_contestant, other_contestants, game_size):
round_contestants = [learning_bot_contestant] + random.sample(other_contestants,
k=game_size - 1)
with torch.no_grad():
host = AsyncHost(
{contestant.name: contestant.agent_generator() for contestant in round_contestants},
[RankingAnnotator(),
FinalBoardAnnotator(),
TensorboardAltairAnnotator([learning_bot_contestant.name])]
)
await host.async_play_game()
return host.get_replay()
def play_interleaved_games(self,
num_games: int,
learning_bot_contestant: Contestant,
other_contestants: List[Contestant],
game_size: int) -> List[Replay]:
start = time.time()
async def run_games():
nets = {}
for contestant in [learning_bot_contestant] + other_contestants:
if contestant.agent_generator.function == PytorchBot:
if type(contestant.agent_generator.kwargs['net']) is RemoteNet:
if contestant.name not in nets:
nets[contestant.name] = BatchedRemoteNet(contestant.name, self.inference_worker)
contestant.agent_generator.kwargs['net'] = nets[contestant.name]
for _, net in nets.items():
await net.start_worker()
tasks = [asyncio_utils.create_task(self.play_game(learning_bot_contestant, other_contestants, game_size), logger=logger) for _ in
range(num_games)]
result = await asyncio.gather(
*tasks)
for _, net in nets.items():
await net.stop_worker()
return result
replays = asyncio_utils.get_or_create_event_loop().run_until_complete(run_games())
print(f"Worker played {num_games} game(s). Time taken: {time.time() - start} seconds.")
return replays
```
#### File: worker/distributed/tensorize_batch.py
```python
from typing import Tuple, List, Optional
import torch
from hearthstone.simulator.agent.actions import Action
from hearthstone.training.common.state_encoding import EncodedActionSet, State
from hearthstone.training.pytorch.replay import ActorCriticGameStepDebugInfo
def _tensorize_batch(batch: List[Tuple[State, EncodedActionSet, Optional[List[Action]]]],
device: torch.device) -> Tuple[
State, EncodedActionSet, Optional[List[Action]]]:
player_tensor = torch.cat([b[0].player_tensor for b in batch], dim=0).detach()
cards_tensor = torch.cat([b[0].cards_tensor for b in batch], dim=0).detach()
spells_tensor = torch.cat([b[0].spells_tensor for b in batch], dim=0).detach()
valid_player_actions_tensor = torch.cat(
[b[1].player_action_tensor for b in batch], dim=0).detach()
valid_card_actions_tensor = torch.cat(
[b[1].card_action_tensor for b in batch], dim=0).detach()
valid_no_target_battlecry_tensor = torch.cat([b[1].no_target_battlecry_tensor for b in batch], dim=0).detach()
valid_battlecry_target_tensor = torch.cat(
[b[1].battlecry_target_tensor for b in batch], dim=0).detach()
valid_spell_action_tensor = torch.cat(
[b[1].spell_action_tensor for b in batch], dim=0).detach()
valid_no_target_spell_action_tensor = torch.cat(
[b[1].no_target_spell_action_tensor for b in batch], dim=0).detach()
valid_store_target_spell_action_tensor = torch.cat(
[b[1].store_target_spell_action_tensor for b in batch], dim=0).detach()
valid_board_target_spell_action_tensor = torch.cat(
[b[1].board_target_spell_action_tensor for b in batch], dim=0).detach()
rearrange_phase = torch.cat([b[1].rearrange_phase for b in batch], dim=0).detach()
cards_to_rearrange = torch.cat(
[b[1].cards_to_rearrange for b in batch], dim=0).detach()
chosen_actions = None if batch[0][2] is None else [b[2] for b in batch]
return (State(player_tensor=player_tensor.to(device),
cards_tensor=cards_tensor.to(device),
spells_tensor=spells_tensor.to(device)),
EncodedActionSet(player_action_tensor=valid_player_actions_tensor,
card_action_tensor=valid_card_actions_tensor,
no_target_battlecry_tensor=valid_no_target_battlecry_tensor,
battlecry_target_tensor=valid_battlecry_target_tensor,
spell_action_tensor=valid_spell_action_tensor,
no_target_spell_action_tensor=valid_no_target_spell_action_tensor,
store_target_spell_action_tensor=valid_store_target_spell_action_tensor,
board_target_spell_action_tensor=valid_board_target_spell_action_tensor,
rearrange_phase=rearrange_phase,
cards_to_rearrange=cards_to_rearrange,
store_start=batch[0][1].store_start,
hand_start=batch[0][1].hand_start,
board_start=batch[0][1].board_start
).to(device),
chosen_actions,
)
def _untensorize_batch(batch_args: List[Tuple[State, EncodedActionSet, Optional[List[Action]]]],
output_actions: List[Action], action_log_probs: torch.Tensor, value: torch.Tensor,
debug_info: ActorCriticGameStepDebugInfo, device: torch.device) -> List[
Tuple[List[Action], torch.Tensor, torch.Tensor, ActorCriticGameStepDebugInfo]]:
result = []
i = 0
for (player_state_tensor, _, _), _, _ in batch_args:
batch_entry_size = player_state_tensor.shape[0]
result.append((output_actions[i:i + batch_entry_size],
action_log_probs[i:i + batch_entry_size].detach().to(device),
value[i:i + batch_entry_size].detach().to(device),
ActorCriticGameStepDebugInfo(
component_policy=debug_info.component_policy[i:i + batch_entry_size].detach().to(device),
permutation_logits=debug_info.permutation_logits[i:i + batch_entry_size].detach().to(device),
)
))
i += batch_entry_size
return result
```
#### File: pytorch/distributed/test_rpc_rref.py
```python
import os
import unittest
from torch import multiprocessing
from torch.distributed import rpc
from torch.distributed.rpc import RRef
def rpc_backed_options():
device_maps = {
'simulator': {0: 0},
'inference': {0: 0}
}
return rpc.TensorPipeRpcBackendOptions(device_maps=device_maps)
def run_worker(rank, num_workers: int):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
rpc.init_rpc('simulator', rank=rank + 1, world_size=num_workers + 1, rpc_backend_options=rpc_backed_options())
rpc.shutdown()
class ContainsRRef:
def __init__(self, rref):
self.rref = rref
def foo(self):
pass
class PytorchDistributedTests(unittest.TestCase):
def test_rpc_with_rref(self):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
self.process_context = multiprocessing.start_processes(
run_worker,
args=(1,),
nprocs=1,
start_method="forkserver",
join=False
)
local_object = {}
rpc.init_rpc('inference', rank=0, world_size=2, rpc_backend_options=rpc_backed_options())
sim_info = rpc.get_worker_info('simulator')
remote_object = rpc.remote(sim_info, ContainsRRef, args=(RRef(local_object),))
remote_object.rpc_async().foo()
if __name__ == '__main__':
unittest.main()
```
#### File: tests/pytorch/test_pytorch.py
```python
import unittest
import torch
from torch.distributions import Categorical
from torch.profiler import profile, record_function, ProfilerActivity
from hearthstone.simulator.core.tavern import Tavern
from hearthstone.training.pytorch.encoding.default_encoder import DefaultEncoder
from hearthstone.training.pytorch.networks.running_norm import WelfordAggregator
class PytorchTests(unittest.TestCase):
def test_encoding(self):
tavern = Tavern()
player_1 = tavern.add_player_with_hero("Dante_Kong")
player_2 = tavern.add_player_with_hero("brian")
tavern.buying_step()
player_1_encoding = DefaultEncoder().encode_state(player_1)
print(player_1_encoding)
def test_valid_actions(self):
tavern = Tavern()
player_1 = tavern.add_player_with_hero("Dante_Kong")
player_2 = tavern.add_player_with_hero("brian")
tavern.buying_step()
player_1_valid_actions = DefaultEncoder().encode_valid_actions(player_1, False)
print(player_1_valid_actions)
def test_get_stacked(self):
tensor1 = torch.tensor([1, 2, 5, 6])
tensor2 = torch.tensor([5, 6, 83, 7])
print(tensor1.size())
print(tensor1.size() + tensor2.size())
torch.Size()
# def test_gpu(self):
# tensor1 = torch.tensor([1,2,5,6])
# if torch.cuda.is_available():
# for i in range(1000):
# i_am_on_the_gpu = tensor1.cuda()
# print("put some stuff on the GPU")
def test_sample_distribution(self):
tensor1 = torch.tensor([[1.5, 2.3, 3.8, 4.1],
[0.1, 0.2, 0.3, 0.4]])
m = Categorical(tensor1)
samp = m.sample()
print(samp)
prob = tensor1.gather(1, torch.tensor([[1, 3], [2, 3]]))
print(prob)
other = tensor1.gather(0, torch.tensor([[0, 1, 0, 0], [1, 0, 1, 1]]))
print(other)
def test_welford_aggregator(self):
agg = WelfordAggregator(torch.Size())
data1 = torch.tensor([1., 2, 3, 4])
data2 = torch.tensor([5., 6, 7, 8])
data3 = torch.tensor([5., -1, -5, 8])
agg.update(data1)
agg.update(data2)
agg.update(data3)
combined = torch.cat([data1, data2, data3])
self.assertAlmostEqual(agg.mean().item(), combined.mean().item())
self.assertAlmostEqual(agg.stdev().item(), torch.std(combined, unbiased=False).item(), 6)
def test_cuda_memory_profiler(self):
with profile(activities=[ProfilerActivity.CUDA], profile_memory=True, record_shapes=True) as prof:
with record_function("model_inference"):
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdburche/ecs",
"score": 3
}
|
#### File: ecs/scripts/generator.py
```python
import argparse
import glob
import os
import schema_reader
from generators import intermediate_files
from generators import csv_generator
from generators import es_template
from generators import beats
from generators import asciidoc_fields
from generators import ecs_helpers
def main():
args = argument_parser()
ecs_version = read_version()
print('Running generator. ECS version ' + ecs_version)
# Load the default schemas
print('Loading default schemas')
(nested, flat) = schema_reader.load_schemas()
# Maybe load user specified directory of schemas
if args.include:
include_glob = os.path.join(args.include, '*.yml')
print('Loading user defined schemas: {0}'.format(include_glob))
(custom_nested, custom_flat) = schema_reader.load_schemas(sorted(glob.glob(include_glob)))
# Merge without allowing user schemas to overwrite default schemas
nested = ecs_helpers.safe_merge_dicts(nested, custom_nested)
flat = ecs_helpers.safe_merge_dicts(flat, custom_flat)
intermediate_files.generate(nested, flat)
if args.intermediate_only:
exit()
csv_generator.generate(flat, ecs_version)
es_template.generate(flat, ecs_version)
beats.generate(nested, ecs_version)
asciidoc_fields.generate(nested, flat, ecs_version)
def argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--intermediate-only', action='store_true',
help='generate intermediary files only')
parser.add_argument('--include', action='store',
help='include user specified directory of custom field definitions')
return parser.parse_args()
def read_version(file='version'):
with open(file, 'r') as infile:
return infile.read().rstrip()
if __name__ == '__main__':
main()
```
|
{
"source": "jdburnet/impref",
"score": 3
}
|
#### File: python/tests/test_slinked_list.py
```python
import pytest
from .. import slinked_list as sll
def test_node_str():
assert str(sll.Node(1, None)) == "1"
@pytest.fixture
def linkedlist():
"""returns an empty linked list"""
return sll.SLinkedList()
def test_is_empty(linkedlist):
assert linkedlist.is_empty()
def test_insert(linkedlist):
linkedlist.insert(1)
assert linkedlist.head.value == 1
def test_inserts_to_front(linkedlist):
linkedlist.insert(1)
linkedlist.insert(2)
assert linkedlist.head.value == 2
def test_remove_from_empty(linkedlist):
assert not linkedlist.remove(1)
def test_remove_head(linkedlist):
linkedlist.insert(1)
assert linkedlist.remove(1)
def test_remove_from_body(linkedlist):
linkedlist.insert(1)
linkedlist.insert(2)
linkedlist.insert(3)
assert linkedlist.remove(1)
def test_remove_removes(linkedlist):
linkedlist.insert(1)
linkedlist.insert(2)
linkedlist.insert(3)
linkedlist.remove(2)
assert linkedlist.head.next.value == 1
def test_remove_key_does_not_exist(linkedlist):
linkedlist.insert(1)
linkedlist.insert(2)
assert not linkedlist.remove(4)
def test_linked_list_str(linkedlist):
linkedlist.insert(1)
linkedlist.insert(2)
linkedlist.insert(3)
assert str(linkedlist) == "[3, 2, 1]"
def test_empty_linked_list_str(linkedlist):
assert str(linkedlist) == "[]"
```
|
{
"source": "jdc91/competitive-programming",
"score": 4
}
|
#### File: competitive-programming/grab/hours.py
```python
def getMinuteEffect(leaveMM,entranceMM):
minuteDiff = leaveMM - entranceMM
if(minuteDiff > 0):
return 1
else :
return -1
def getCost(entranceHH, leaveHH,minuteEffect):
cost = 2
hourDiff = leaveHH - entranceHH
if(minuteEffect >0):
hourDiff += 1
if (hourDiff == 0):
return cost
elif (hourDiff == 1):
return cost + 3
else:
return cost + 3 + 4 * (hourDiff - 1)
def solution(E, L):
entranceHH = int(E.split(":")[0])
entranceMM = int(E.split(":")[1])
leaveHH = int(L.split(":")[0])
leaveMM = int(L.split(":")[1])
minuteEffect = getMinuteEffect(leaveMM,entranceMM)
return getCost(entranceHH, leaveHH, minuteEffect)
E="09:45"
L="11:00"
print(solution(E,L))
```
|
{
"source": "jdcanas/minesweeper",
"score": 3
}
|
#### File: jdcanas/minesweeper/main.py
```python
from game.Board import Board
from game.Coordinate import Coordinate
def main():
sizeCoord = Coordinate(width, height)
board = Board(sizeCoord, mines)
board.initBoard()
board.printBoard()
if __name__ == '__main__':
main()
```
#### File: minesweeper/ui/colors.py
```python
from game.Cell import Cell
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
GRAY = (211, 211, 211)
def getCellColor(cell: Cell):
if cell.isFlipped and not cell.isMine:
color = GRAY
elif cell.isFlipped and cell.isMine:
color = RED
elif cell.isFlagged:
color = YELLOW
else:
color = WHITE
return color
```
|
{
"source": "jdcaperon/AuthOut",
"score": 3
}
|
#### File: server/tests/test_child.py
```python
import requests
def test_get_children():
r = requests.get('https://deco3801.wisebaldone.com/api/child')
assert r.status_code == 200
data = r.json()
assert len(data) != 0
for child in data:
assert 'id' in child
assert 'first_name' in child
assert 'last_name' in child
assert 'date_of_birth' in child
assert 'status' in child
# get this individual child through the specified endpoint
w = requests.get('https://deco3801.wisebaldone.com/api/child/{}'.format(child['id']))
assert w.status_code == 200
assert w.json()['id'] == child['id']
def test_post_invalid_child():
# The data field does not contain all the information nesscary.
data = {'first_name': 'hello'}
r = requests.post('https://deco3801.wisebaldone.com/api/child/', json=data)
assert r.status_code != 200
```
#### File: server/tests/test_entry.py
```python
import requests
def test_get_entries():
r = requests.get('https://deco3801.wisebaldone.com/api/entry')
assert r.status_code == 200
data = r.json()
assert 'signed_in' in data
assert 'signed_out' in data
assert 'entries' in data
def test_get_entry_query():
send = {'id': 0, 'lower': '1/1/2018', 'upper': '2/1/2018'}
r = requests.post('https://deco3801.wisebaldone.com/api/entry/query', json=send)
assert r.status_code == 200
data = r.json()
assert 'entries' in data
def test_get_entry_stats():
send = {'lower': '1/1/2018', 'upper': '2/1/2018'}
r = requests.post('https://deco3801.wisebaldone.com/api/entry/stats', json=send)
assert r.status_code == 200
data = r.json()
assert 'days' in data
assert len(data['days']) == 3
```
#### File: server/tests/test_parent.py
```python
import requests
def test_get_parents():
r = requests.get('https://deco3801.wisebaldone.com/api/parent')
assert r.status_code == 200
data = r.json()
assert len(data) != 0
for parent in data:
assert 'id' in parent
assert 'email' in parent
assert 'first_name' in parent
assert 'last_name' in parent
assert 'date_of_birth' in parent
assert 'mobile_number' in parent
assert 'children' in parent
assert 'trusted_children' in parent
# get this individual parent through the specified endpoint
w = requests.get('https://deco3801.wisebaldone.com/api/parent/{}'.format(parent['id']))
assert w.status_code == 200
assert w.json()['id'] == parent['id']
x = requests.get('https://deco3801.wisebaldone.com/api/parent/{}/children'.format(parent['id']))
assert x.status_code == 200
print(x.json()['children'])
assert len(x.json()['children']) == len(parent['children'])
z = requests.get('https://deco3801.wisebaldone.com/api/parent/{}/children/trusted'.format(parent['id']))
assert z.status_code == 200
print(z.json()['children'])
assert len(z.json()['children']) == len(parent['trusted_children'])
def test_post_invalid_parent():
# The data field does not contain all the information nesscary.
data = {'email': '<EMAIL>'}
r = requests.post('https://deco3801.wisebaldone.com/api/parent/', json=data)
assert r.status_code == 400
```
|
{
"source": "jdcarpinelli/dungeons",
"score": 3
}
|
#### File: dungeons/assistant/bot.py
```python
import sys, os, time, random, datetime
import discord
from discord.ext import commands
from dotenv import load_dotenv
import dice
# Imports from shadowedlucario/oghma:46128dc:bot.py
from query import *
import requests
import json
# Load token, server name from local file
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
TOP_LEVEL_PATH = os.getenv('TOP_LEVEL_PATH')
AUTHOR = os.getenv('AUTHOR')
# Bot invalid command messages
INVALID_ROLL_CMD = \
'Whoops! The roll command wasn\'t used correctly.\n' \
'Try using the same format as the examples in "!help roll".'
INVALID_TELL_CMD = \
'Whoops! The tell command wasn\'t used correctly.\n' \
'Try using the same format as the examples in "!help tell".'
INVALID_TELL_MSG = \
'This command requires a non-blank message.'
INVALID_TELL_RECIPIENT = \
'The user you requested was not found in the server.'
INTERNAL_BUG = \
f'Congrats! That command you just sent resulted in an internal bug! ' \
f'Sorry about that, this was {AUTHOR}\'s first attempt at a Bot. ' \
f'Sending {AUTHOR} a DM with the command you sent would be really helpful!'
## Helper functions
# Returns timestampt string for log messages
def get_timestamp():
return str(int(time.time()*10e3))
# Create bot
bot = commands.Bot(command_prefix='!', disable_everyone=False)
# On startup
@bot.event
async def on_ready():
guild = discord.utils.get(bot.guilds, name=GUILD)
if guild is not None:
print('Connection with guild established!')
print(f'Bot username: {bot.user}')
print(f'Guild name: {guild.name}')
# On event error
@bot.event
async def on_error(event, *args, **kwargs):
with open(
TOP_LEVEL_PATH + '/assistant/logs/errors/err' + get_timestamp() + '.log',
'a'
) as f:
if event == 'on_message':
f.write(f'Unhandled message: {args[0]}\n')
else:
raise
# On command error
@bot.event
async def on_command_error(ctx, error):
# Print to stderr
print('\n\n' + INTERNAL_BUG + '\n\n')
# Log real error
with open(
TOP_LEVEL_PATH + '/assistant/logs/command_errors/err' + \
get_timestamp() + '.log',
'a'
) as err_file:
err_file.write(
f'Author: {ctx.author}\n\n'
f'Message Metadata: {ctx.message}\n\n'
f'Error: {str(error)}'
)
print('Error logged to ', err_file.name)
await ctx.send(INTERNAL_BUG)
# Print intro message
@bot.command(
name='intro',
help='Responds with Dnd-Assistant Introduction.'
)
async def intro(ctx, *args):
# Ignore any arguments
embed = discord.Embed(
title='Hello, meet DnD-Assistant!',
description= \
f'The primary feature is rolling dice, '
f'but more features will be added soon. '
f'Let {AUTHOR} know if you have any '
f'features you want added!\n\n'
f'You can run DnD-Assistant\'s commands '
f'by typing "!" immediately followed by '
f'the command. For example, to list all '
f'possible commands, enter "!help". To '
f'get help with a particular command, like '
f'the "roll" command, enter "!help roll". '
f'Finally, to roll three 6-sided die, enter '
f'"!roll 3d6".\n\n'
f'If you\'re interested, you can check out '
f'the source code at https://github.com/cadojo/dungeons.',
color=0x000000)
# Roll command
embed.add_field(
name='Command: roll',
value= \
'Rolls 4, 6, 8, 10, 12, or 20 sided die.\n'
'Usage: !roll 20, !roll 3d6, !r 2d20, etc.',
inline=False
)
# Help command
embed.add_field(
name='Command: help',
value= \
'List all possible DnD-Assistant commands, or '
'get help with one specific command.\n'
'Usage: !help, or !help roll, !help r, !help intro, etc.',
inline=False
)
# Intro command
embed.add_field(
name='Command: intro',
value= \
'Print out this introduction!\n'
'Usage: !intro',
inline=False
)
await ctx.send(embed=embed)
# Roll dice
@bot.command(
name='roll',
aliases=['r'],
help='Rolls 4, 6, 8, 10, 12, or 20 sided die.\n\n'
'Examples:\n'
'Roll a single 20-sided die:\t\t!roll 20\n'
'Roll three 6-sided die:\t\t\t!roll 3d6\n'
'"!r" serves as a shortcut for "!roll:\t!r 20\n')
async def roll(ctx, *args):
success, msg = dice.roll_request(args)
if success:
await ctx.send('Roll returned: ' + str(msg))
else:
await ctx.send(INVALID_ROLL_CMD + '\n' + str(msg))
# Relay a message
@bot.command(
name = 'tell',
help = \
f'Relay a message to someone else on this server.\n\n'
f'Examples:\n'
f'Tell {AUTHOR} have a great day: !tell @jodoca have a great day!'
)
async def tell(ctx, recipient: str, *message):
## Argument checking
# Usage:
# !tell @user message without any quotes
guild = discord.utils.get(bot.guilds, name=GUILD)
if guild is None:
await ctx.send(INTERNAL_BUG)
return
## Argument checking
# Re-construct message
msg = ''
for m in message:
msg += m + ' '
# Recipient and message should not be empty
if '@' not in recipient \
or recipient == '' \
or msg == '':
await ctx.send(INVALID_TELL_CMD + '\n' + INVALID_TELL_MSG)
# Check if recipient is @everyone or a user
all_recipients = []
if recipient == '@everyone':
all_recipients = [user for user in guild.members if user != bot.user]
else:
# Remove special characters, left with id or name
recipient_parsed = recipient\
.replace('@','')\
.replace('<','')\
.replace('>','')\
.replace('!','')
for user in [user for user in guild.members if user != bot.user]:
if (recipient_parsed == user.name) \
or (recipient_parsed == str(user.id)):
all_recipients.append(user)
if len(all_recipients) == 0:
await ctx.send(INVALID_TELL_RECIPIENT)
return
## Context checking
# If command in DM, DM recipient
if ctx.message.channel.type == discord.ChannelType.private:
for user in all_recipients:
await user.send('<@!' + str(ctx.author.id) + '> says: ' + msg)
await ctx.send('Sent!')
return
# Otherwise, just post wherever this was posted
else:
recipient_str = ''
for user in all_recipients:
recipient_str += ('<@!' + str(user.id) + '> ')
await ctx.send(
f'Hey {recipient_str}, {ctx.author.name} says: {msg}'
)
return
### Bot commands from shadowedlucario/oghma
###
# FUNC NAME: ?search [ENTITY]
# FUNC DESC: Queries the Open5e search API, basically searches the whole thing for the ENTITY.
# ENTITY: The DND entity you wish to get infomation on.
# FUNC TYPE: Command
###
@bot.command(
name='search',
help='Queries the Open5e API to get the entities infomation.',
usage='?search [ENTITY]',
aliases=["sea", "s", "S"]
)
async def search(ctx, *args):
print(f"Executing: ?search {args}")
# Import & reset globals
global partialMatch
partialMatch = False
# Verify arg length isn't over limits
if len(args) >= 201:
argumentsEmbed = discord.Embed(
color=discord.Colour.red(),
title="Invalid argument length",
description="This command does not support more than 200 words in a single message. Try splitting up your query."
)
argumentsEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=argumentsEmbed)
# Send directory contents if no search term given
if len(args) <= 0:
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title="Searching...",
description="This might take a few seconds!"
))
# Get objects from directory, store in txt file
directoryRequest = requests.get("https://api.open5e.com/search/?format=json&limit=10000")
if directoryRequest.status_code != 200:
return await ctx.send(embed=codeError(
directoryRequest.status_code,
"https://api.open5e.com/search/?format=json&limit=10000"
)
)
# Generate a unique filename and write to it
entityFileName = generateFileName("entsearch")
entityFile = open(entityFileName, "a+")
for entity in directoryRequest.json()["results"]:
if "title" in entity.keys():
entityFile.write(f"{ entity['title'] }\n")
else:
entityFile.write(f"{ entity['name'] }\n")
entityFile.close()
# Send embed notifying start of the spam stream
detailsEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"See `{ entityFileName }` for all searchable entities in this endpoint",
description="Due to discord charecter limits regarding embeds, the results have to be sent in a file. Yes I know this is far from ideal but it's the best I can do!"
)
detailsEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
await ctx.send(embed=detailsEmbed)
# Send entites file
return await ctx.send(file=discord.File(entityFileName))
# Filter input to remove whitespaces and set lowercase
filteredInput = "".join(args).lower()
# Search API
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title=f"Searching for { filteredInput }...",
description="This might take a few seconds!"
))
# Use first word to narrow search results down for quicker response on some directories
match = requestOpen5e(f"https://api.open5e.com/search/?format=json&limit=10000&text={ str(args[0]) }", filteredInput, True)
# An API Request failed
if isinstance(match, dict) and "code" in match.keys():
return await ctx.send(embed=codeError(match["code"], match["query"]))
# Searching algorithm hit an invalid object
elif match == "UNKNOWN":
unknownMatchEmbed = discord.Embed(
colour=discord.Colour.red(),
title="ERROR",
description="I found an entity in the API database that doesn't contain a `name` or `docuement` attribute. Please report this to https://github.com/shadowedlucario/oghma/issues"
)
unknownMatchEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=unknownMatchEmbed)
# No entity was found
elif match == None:
noMatchEmbed = discord.Embed(
colour=discord.Colour.orange(),
title="ERROR",
description=f"No matches found for **{ filteredInput }** in the search endpoint"
)
noMatchEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=noMatchEmbed)
# Otherwise, construct & send responses
else:
responses = constructResponse(args, match["route"], match["matchedObj"])
for response in responses:
if isinstance(response, discord.Embed):
# Set a thumbnail for relevent embeds and on successful Scyfall request, overwriting all other thumbnail setup
image = requestScryfall(args, False)
if (not isinstance(image, int)): response.set_thumbnail(url=image)
# Note partial match in footer of embed
if partialMatch:
response.set_footer(text=f"NOTE: Your search term ({ filteredInput }) was a PARTIAL match to this entity.\nIf this isn't the entity you were expecting, try refining your search term or use ?searchdir instead")
else:
response.set_footer(text="NOTE: If this isn't the entity you were expecting, try refining your search term or use `?searchdir` instead")
print(f"SENDING EMBED: { response.title }...")
await ctx.send(embed=response)
elif ".txt" in response:
print(f"SENDING FILE: { response }...")
await ctx.send(file=discord.File(response))
###
# FUNC NAME: ?searchdir [RESOURCE] [ENTITY]
# FUNC DESC: Queries the Open5e RESOURCE API.
# RESOURCE: Resource name (i.e. spells, monsters, etc.).
# ENTITY: The DND entity you wish to get infomation on.
# FUNC TYPE: Command
###
@bot.command(
name='searchdir',
help='Queries the Open5e API to get the entities infomation from the specified resource.',
usage='!search [RESOURCE] [ENTITY]',
aliases=["dir", "d", "D"]
)
async def searchdir(ctx, *args):
print(f"EXECUTING: ?searchdir {args}")
# Import & reset globals
global partialMatch
partialMatch = False
# Get API Root
rootRequest = requests.get("https://api.open5e.com?format=json")
# Throw if Root request wasn't successfull
if rootRequest.status_code != 200:
return await ctx.send(embed=codeError(rootRequest.status_code, "https://api.open5e.com?format=json"))
# Remove search endpoint from list (not used in this command)
directories = list(rootRequest.json().keys())
directories.remove("search")
# Verify we have arguments
if len(args) <= 0:
usageEmbed = discord.Embed(
colour=discord.Colour.red(),
title="No directory was requested.\nUSAGE: `?searchdir [DIRECTORY] [D&D OBJECT]`",
description=f"**Available Directories**\n{ ', '.join(directories) }"
)
usageEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=usageEmbed)
# Filter the dictionary input
filteredDictionary = f"{ args[0].lower() }/"
# Filter input to remove whitespaces and set lowercase
filteredInput = "".join(args[1:]).lower()
# Verify arg length isn't over limits
if len(args) >= 201:
argumentsEmbed = discord.Embed(
color=discord.Colour.red(),
title="Invalid argument length",
description="This command does not support more than 200 words in a single message. Try splitting up your query."
)
argumentsEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=argumentsEmbed)
# Verify resource exists
if directories.count(args[0]) <= 0:
noResourceEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"Requested Directory (`{ str(args[0]) }`) is not a valid directory name",
description=f"**Available Directories**\n{ ', '.join(directories) }"
)
noResourceEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=noResourceEmbed)
# Send directory contents if no search term given
if len(args) == 1:
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title=f"Searching for everything having to do this { filteredDictionary.upper() }!!",
description="Sit back, this might take a minute."
))
# Get objects from directory, store in txt file
directoryRequest = requests.get(f"https://api.open5e.com/{ filteredDictionary }?format=json&limit=10000")
if directoryRequest.status_code != 200:
return await ctx.send(embed=codeError(
directoryRequest.status_code,
f"https://api.open5e.com/{ filteredDictionary }?format=json&limit=10000"
)
)
entityNames = []
for entity in directoryRequest.json()["results"]:
if "title" in entity.keys(): entityNames.append(entity['title'])
else: entityNames.append(entity['name'])
# Keep description word count low to account for names with lots of charecters
if len(entityNames) <= 200:
detailsEmbed = discord.Embed(
colour=discord.Colour.orange(),
title="All searchable entities in this endpoint",
description="\n".join(entityNames)
)
detailsEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
if "search" in filteredDictionary:
detailsEmbed.set_footer(text="NOTE: The `search` endpoint is not searchable with `?searchdir`. Use `?search` instead for this.")
return await ctx.send(embed=detailsEmbed)
# Generate a unique filename and write to it
entityDirFileName = generateFileName("entsearchdir")
entityFile = open(entityDirFileName, "a+")
entityFile.write("\n".join(entityNames))
entityFile.close()
# Send embed notifying start of the spam stream
detailsEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"See `{ entityDirFileName }` for all searchable entities in this endpoint",
description="Due to discord charecter limits regarding embeds, the results have to be sent in a file. Yes I know this is far from ideal but it's the best I can do!"
)
detailsEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
if "search" in filteredDictionary:
detailsEmbed.set_footer(text="NOTE: The `search` endpoint is not searchable with `?searchdir`. Use `?search` instead for this.")
await ctx.send(embed=detailsEmbed)
# Send entites file
return await ctx.send(file=discord.File(entityDirFileName))
# search/ endpoint is best used with the dedicated ?search command
if "search" in filteredDictionary:
# Remove search endpoint from list
directories = list(rootRequest.json().keys())
directories.remove("search")
searchEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"Requested Directory (`{ str(args[0]) }`) is not a valid directory name",
description=f"**Available Directories**\n{ ', '.join(directories) }"
)
searchEmbed.add_field(name="NOTE", value="Use `?search` for searching the `search/` directory. This has been done to cut down on parsing errors.")
searchEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=searchEmbed)
# Search API
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title=f"Searching all { filteredDictionary.upper() } for { filteredInput }...",
description="This might take a few seconds!"
))
# Determine filter type (search can only be used for some endpoints)
filterType = "text"
if args[0] in searchParamEndpoints: filterType = "search"
# Use first word to narrow search results down for quicker response on some directories
match = requestOpen5e(
f"https://api.open5e.com/{ filteredDictionary }?format=json&limit=10000&{ filterType }={ str(args[1]) }",
filteredInput,
False
)
# An API Request failed
if isinstance(match, dict) and "code" in match.keys():
return await ctx.send(embed=codeError(match.code, match.query))
# Searching algorithm hit an invalid object
elif match == "UNKNOWN":
unknownMatchEmbed = discord.Embed(
colour=discord.Colour.red(),
title="ERROR",
description="I found an entity in the API database that doesn't contain a `name` or `docuement` attribute. Please report this to https://github.com/shadowedlucario/oghma/issues"
)
unknownMatchEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=unknownMatchEmbed)
# No entity was found
elif match == None:
noMatchEmbed = discord.Embed(
colour=discord.Colour.orange(),
title="ERROR",
description=f"No matches found for **{ filteredInput.upper() }** in the { filteredDictionary } endpoint"
)
noMatchEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=noMatchEmbed)
# Otherwise, construct & send responses
else:
responses = constructResponse(args, filteredDictionary, match)
for response in responses:
if isinstance(response, discord.Embed):
# Set a thumbnail for relevent embeds and on successful Scyfall request, overwrites other thumbnail setup
image = requestScryfall(args, True)
if (not isinstance(image, int)): response.set_thumbnail(url=image)
# Note partial match in footer of embed
if partialMatch:
response.set_footer(text=f"NOTE: Your search term ({ filteredInput }) was a PARTIAL match to this entity.\nIf this isn't the entity you were expecting, try refining your search term")
print(f"SENDING EMBED: { response.title }...")
await ctx.send(embed=response)
elif ".txt" in response:
print(f"SENDING FILE: { response }...")
await ctx.send(file=discord.File(response))
if __name__ == '__main__':
bot.run(TOKEN)
```
#### File: dungeons/assistant/query.py
```python
import requests
import json
import discord
import random
partialMatch = False
searchParamEndpoints = ["spells", "monsters", "magicitems", "weapons"]
###
# FUNC NAME: generateFileName
# FUNC DESC: Generates a filename using type of file and random number
# FUNC TYPE: Function
###
def generateFileName(fileType): return f"{ fileType }-{ str(random.randrange(1,1000000)) }.txt"
###
# FUNC NAME: codeError
# FUNC DESC: Sends an embed informing the user that there has been an API request failure
# FUNC TYPE: Error
###
def codeError(statusCode, query):
codeEmbed = discord.Embed(
colour=discord.Colour.red(),
title=f"ERROR - API Request FAILED. Status Code: **{ str(statusCode) }**",
description=f"Query: { query }"
)
codeEmbed.add_field(
name="For more idea on what went wrong:",
value="See status codes at https://www.django-rest-framework.org/api-guide/status-codes/",
inline=False
)
codeEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return codeEmbed
###
# FUNC NAME: searchResponse
# FUNC DESC: Searches the API response for the user input. Returns None if nothing was found
# FUNC TYPE: Function
###
def searchResponse(responseResults, filteredInput):
# Sets entity name/title to lowercase and removes spaces
def parse(entityHeader): return entityHeader.replace(" ", "").lower()
global partialMatch
match = None
# First, look for an exact match after parsing
for entity in responseResults:
# Documents don't have a name attribute
if "title" in entity:
# Has to be in it's own "if" to avoid KeyErrors
if parse(entity["title"]) == filteredInput:
match = entity
break
elif "name" in entity:
if parse(entity["name"]) == filteredInput:
match = entity
break
else: match = "UNKNOWN"
# Now try partially matching the entity (i.e. bluedragon will match adultbluedragon here)
if match == None or match == "UNKNOWN":
for entity in responseResults:
if "title" in entity:
if filteredInput in parse(entity["title"]):
partialMatch = True
match = entity
break
elif "name" in entity:
if filteredInput in parse(entity["name"]):
partialMatch = True
match = entity
break
else: match = "UNKNOWN"
return match
###
# FUNC NAME: requestScryfall
# FUNC DESC: Queries the Scryfall API to obtain a thumbnail image.
# FUNC TYPE: Function
###
def requestScryfall(searchTerm, searchdir):
scryfallRequest = requests.get(f"https://api.scryfall.com/cards/search?q={ ' '.join(searchTerm) }&include_extras=true&include_multilingual=true&include_variations=true")
# Try again with the first arg if nothing was found
if scryfallRequest.status_code == 404:
searchWord = searchTerm[0]
if searchdir: searchWord = searchTerm[1]
scryfallWordRequest = requests.get(f"https://api.scryfall.com/cards/search?q={ searchWord }&include_extras=true&include_multilingual=true&include_variations=true")
if scryfallWordRequest.status_code != 200: return scryfallWordRequest.status_code
else: return scryfallWordRequest.json()["data"][0]["image_uris"]["art_crop"]
# Return code if API request failed
elif scryfallRequest.status_code != 200: return scryfallRequest.status_code
# Otherwise, return the cropped image url
else: return scryfallRequest.json()["data"][0]["image_uris"]["art_crop"]
###
# FUNC NAME: requestOpen5e
# FUNC DESC: Queries the Open5e API.
# FUNC TYPE: Function
###
def requestOpen5e(query, filteredInput, wideSearch):
# API Request
request = requests.get(query)
# Return code if not successfull
if request.status_code != 200: return {"code": request.status_code, "query": query}
# Iterate through the results
output = searchResponse(request.json()["results"], filteredInput)
if output == None: return output
elif output == "UNKNOWN": return "UNKNOWN"
# Find resource object if coming from search endpoint
elif wideSearch:
# Request resource using the first word of the name to filter results
route = output["route"]
# Determine filter type (search can only be used for some endpoints)
filterType = "text"
if route in searchParamEndpoints: filterType = "search"
if "title" in output:
resourceRequest = requests.get(
f"https://api.open5e.com/{ route }?format=json&limit=10000&{ filterType }={ output['title'].split()[0] }"
)
else:
resourceRequest = requests.get(
f"https://api.open5e.com/{ route }?format=json&limit=10000&{ filterType }={ output['name'].split()[0] }"
)
# Return code if not successfull
if resourceRequest.status_code != 200:
return {
"code": resourceRequest.status_code,
"query": f"https://api.open5e.com/{ route }?format=json&limit=10000&search={ output['name'].split()[0] }"
}
# Search response again for the actual object
resourceOutput = searchResponse(resourceRequest.json()["results"], filteredInput)
if resourceOutput == "UNKNOWN": return "UNKNOWN"
return {"route": route, "matchedObj": resourceOutput}
# If already got the resource object, just return it
else: return output
###
# FUNC NAME: constructResponse
# FUNC DESC: Constructs embed responses from the API object.
# FUNC TYPE: Function
###
def constructResponse(args, route, matchedObj):
responses = []
# Document
if "document" in route:
# Get document link
docLink = matchedObj['url']
if "http" not in docLink: docLink = f"http://{ matchedObj['url'] }"
if len(matchedObj["desc"]) >= 2048:
documentEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['title'] } (DOCUMENT)",
description=matchedObj["desc"][:2047],
url=docLink
)
documentEmbed.add_field(name="Description Continued...", value=matchedObj["desc"][2048:])
else:
documentEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['title'] } (DOCUMENT)",
description=matchedObj["desc"],
url=docLink
)
documentEmbed.add_field(name="Authors", value=matchedObj["author"], inline=False)
documentEmbed.add_field(name="Link", value=matchedObj["url"], inline=True)
documentEmbed.add_field(name="Version Number", value=matchedObj["version"], inline=True)
documentEmbed.add_field(name="Copyright", value=matchedObj["copyright"], inline=False)
documentEmbed.set_thumbnail(url="https://i.imgur.com/lnkhxCe.jpg")
responses.append(documentEmbed)
# Spell
elif "spell" in route:
spellLink = f"https://open5e.com/spells/{matchedObj['slug']}/"
if len(matchedObj["desc"]) >= 2048:
spellEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (SPELL)",
description=matchedObj["desc"][:2047],
url=spellLink
)
spellEmbed.add_field(name="Description Continued...", value=matchedObj["desc"][2048:], inline=False)
else:
spellEmbed = discord.Embed(
colour=discord.Colour.green(),
title=matchedObj["name"],
description=f"{ matchedObj['desc'] } (SPELL)",
url=spellLink
)
if matchedObj["higher_level"] != "":
spellEmbed.add_field(name="Higher Level", value=matchedObj["higher_level"], inline=False)
spellEmbed.add_field(name="School", value=matchedObj["school"], inline=False)
spellEmbed.add_field(name="Level", value=matchedObj["level"], inline=True)
spellEmbed.add_field(name="Duration", value=matchedObj["duration"], inline=True)
spellEmbed.add_field(name="Casting Time", value=matchedObj["casting_time"], inline=True)
spellEmbed.add_field(name="Range", value=matchedObj["range"], inline=True)
spellEmbed.add_field(name="Concentration?", value=matchedObj["concentration"], inline=True)
spellEmbed.add_field(name="Ritual?", value=matchedObj["ritual"], inline=True)
spellEmbed.add_field(name="Spell Components", value=matchedObj["components"], inline=True)
if "M" in matchedObj["components"]: spellEmbed.add_field(name="Material", value=matchedObj["material"], inline=True)
spellEmbed.add_field(name="Page Number", value=matchedObj["page"], inline=True)
spellEmbed.set_thumbnail(url="https://i.imgur.com/W15EmNT.jpg")
responses.append(spellEmbed)
# Monster
elif "monster" in route:
## 1ST EMBED ##
monsterLink = f"https://open5e.com/monsters/{ matchedObj['slug'] }/"
monsterEmbedBasics = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (MONSTER) - STATS",
description="**TYPE**: {}\n**SUBTYPE**: {}\n**ALIGNMENT**: {}\n**SIZE**: {}\n**CHALLENGE RATING**: {}".format(
matchedObj["type"] if matchedObj["type"] != "" else "None",
matchedObj["subtype"] if matchedObj["subtype"] != "" else "None",
matchedObj["alignment"] if matchedObj["alignment"] != "" else "None",
matchedObj["size"],
matchedObj["challenge_rating"]
),
url=monsterLink
)
# Str
if matchedObj["strength_save"] != None:
monsterEmbedBasics.add_field(
name="STRENGTH",
value=f"{ matchedObj['strength'] } (SAVE: **{ matchedObj['strength_save'] }**)",
inline=True
)
else:
monsterEmbedBasics.add_field(
name="STRENGTH",
value=f"{ matchedObj['strength'] }",
inline=True
)
# Dex
if matchedObj["dexterity_save"] != None:
monsterEmbedBasics.add_field(
name="DEXTERITY",
value=f"{matchedObj['dexterity']} (SAVE: **{ matchedObj['dexterity_save'] }**)",
inline=True
)
else:
monsterEmbedBasics.add_field(
name="DEXTERITY",
value=f"{ matchedObj['dexterity'] }",
inline=True
)
# Con
if matchedObj["constitution_save"] != None:
monsterEmbedBasics.add_field(
name="CONSTITUTION",
value=f"{ matchedObj['constitution'] } (SAVE: **{ matchedObj['constitution_save'] }**)",
inline=True
)
else:
monsterEmbedBasics.add_field(
name="CONSTITUTION",
value=f"{ matchedObj['constitution'] }",
inline=True
)
# Int
if matchedObj["intelligence_save"] != None:
monsterEmbedBasics.add_field(
name="INTELLIGENCE",
value=f"{ matchedObj['intelligence'] } (SAVE: **{ matchedObj['intelligence_save'] }**)",
inline=True
)
else:
monsterEmbedBasics.add_field(
name="INTELLIGENCE",
value=f"{ matchedObj['intelligence'] }",
inline=True
)
# Wis
if matchedObj["wisdom_save"] != None:
monsterEmbedBasics.add_field(
name="WISDOM",
value=f"{ matchedObj['wisdom'] } (SAVE: **{ matchedObj['wisdom_save'] }**)",
inline=True
)
else:
monsterEmbedBasics.add_field(
name="WISDOM",
value=f"{ matchedObj['wisdom'] }",
inline=True
)
# Cha
if matchedObj["charisma_save"] != None:
monsterEmbedBasics.add_field(
name="CHARISMA",
value=f"{ matchedObj['charisma'] } (SAVE: **{ matchedObj['charisma_save'] }**)",
inline=True
)
else:
monsterEmbedBasics.add_field(
name="CHARISMA",
value=f"{ matchedObj['charisma'] }",
inline=True
)
# Hit points/dice
monsterEmbedBasics.add_field(
name=f"HIT POINTS (**{ str(matchedObj['hit_points']) }**)",
value=matchedObj["hit_dice"],
inline=True
)
# Speeds
monsterSpeeds = ""
for speedType, speed in matchedObj["speed"].items():
monsterSpeeds += f"**{ speedType }**: { speed }\n"
monsterEmbedBasics.add_field(name="SPEED", value=monsterSpeeds, inline=True)
# Armour
monsterEmbedBasics.add_field(
name="ARMOUR CLASS",
value=f"{ str(matchedObj['armor_class']) } ({ matchedObj['armor_desc'] })",
inline=True
)
responses.append(monsterEmbedBasics)
## 2ND EMBED ##
monsterEmbedSkills = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (MONSTER) - SKILLS & PROFICIENCIES",
url=monsterLink
)
# Skills & Perception
if matchedObj["skills"] != {}:
monsterSkills = ""
for skillName, skillValue in matchedObj["skills"].items():
monsterSkills += f"**{ skillName }**: { skillValue }\n"
monsterEmbedSkills.add_field(name="SKILLS", value=monsterSkills, inline=True)
# Senses
monsterEmbedSkills.add_field(name="SENSES", value=matchedObj["senses"], inline=True)
# Languages
if matchedObj["languages"] != "": monsterEmbedSkills.add_field(name="LANGUAGES", value=matchedObj["languages"], inline=True)
# Damage conditionals
monsterEmbedSkills.add_field(
name="STRENGTHS & WEAKNESSES",
value="**VULNERABLE TO:** {}\n**RESISTANT TO:** {}\n**IMMUNE TO:** {}".format(
matchedObj["damage_vulnerabilities"] if matchedObj["damage_vulnerabilities"] != "" else "Nothing",
matchedObj["damage_resistances"] if matchedObj["damage_resistances"] != "" else "Nothing",
matchedObj["damage_immunities"] if matchedObj["damage_immunities"] != "" else "Nothing"
+ ", "
+ matchedObj["condition_immunities"] if matchedObj["condition_immunities"] != None else "Nothing",
),
inline=False
)
responses.append(monsterEmbedSkills)
## 3RD EMBED ##
monsterEmbedActions = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (MONSTER) - ACTIONS & ABILITIES",
url=monsterLink
)
# Actions
for action in matchedObj["actions"]:
monsterEmbedActions.add_field(
name=f"{ action['name'] } (ACTION)",
value=action["desc"],
inline=False
)
# Reactions
if matchedObj["reactions"] != "":
for reaction in matchedObj["reactions"]:
monsterEmbedActions.add_field(
name=f"{ reaction['name'] } (REACTION)",
value=reaction["desc"],
inline=False
)
# Specials
for special in matchedObj["special_abilities"]:
if len(special["desc"]) >= 1024:
monsterEmbedActions.add_field(
name=f"{ special['name'] } (SPECIAL)",
value=special["desc"][:1023],
inline=False
)
monsterEmbedActions.add_field(
name=f"{ special['name'] } (SPECIAL) Continued...",
value=special["desc"][1024:],
inline=False
)
else:
monsterEmbedActions.add_field(
name=f"{ special['name'] } (SPECIAL)",
value=special["desc"],
inline=False
)
# Spells
if matchedObj["spell_list"] != []:
# Function to split the spell link down (e.g. https://api.open5e.com/spells/light/), [:-1] removes trailing whitespace
def splitSpell(spellName): return spellName.replace("-", " ").split("/")[:-1]
for spell in matchedObj["spell_list"]:
spellSplit = splitSpell(spell)
monsterEmbedActions.add_field(
name=spellSplit[-1],
value=f"To see spell info, `?searchdir spells { spellSplit[-1] }`",
inline=False
)
responses.append(monsterEmbedActions)
## 4TH EMBED (only used if it has legendary actions) ##
if matchedObj["legendary_desc"] != "":
monsterEmbedLegend = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (MONSTER): LEGENDARY ACTIONS & ABILITIES",
description=matchedObj["legendary_desc"],
url=monsterLink
)
for action in matchedObj["legendary_actions"]:
monsterEmbedLegend.add_field(
name=action["name"],
value=action["desc"],
inline=False
)
responses.append(monsterEmbedLegend)
# Author & Image for all embeds
for embed in responses:
if matchedObj["img_main"] != None: embed.set_thumbnail(url=matchedObj["img_main"])
else: embed.set_thumbnail(url="https://i.imgur.com/6HsoQ7H.jpg")
# Background
elif "background" in route:
# 1st Embed (Basics)
bckLink = "https://open5e.com/sections/backgrounds"
backgroundEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (BACKGROUND) - BASICS",
description=matchedObj["desc"],
url=bckLink
)
# Profs
if matchedObj["tool_proficiencies"] != None:
backgroundEmbed.add_field(
name="PROFICIENCIES",
value=f"**SKILLS**: { matchedObj['skill_proficiencies'] }\n**TOOLS**: { matchedObj['tool_proficiencies'] }",
inline=True
)
else:
backgroundEmbed.add_field(
name="PROFICIENCIES",
value=f"**SKILL**: { matchedObj['skill_proficiencies'] }",
inline=True
)
# Languages
if matchedObj["languages"] != None:
backgroundEmbed.add_field(name="LANGUAGES", value=matchedObj["languages"], inline=True)
# Equipment
backgroundEmbed.add_field(name="EQUIPMENT", value=matchedObj["equipment"], inline=False)
# Feature
backgroundEmbed.add_field(name=matchedObj["feature"], value=matchedObj["feature_desc"], inline=False)
responses.append(backgroundEmbed)
# 2nd Embed (feature)
backgroundFeatureEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (BACKGROUND)\nFEATURE ({ matchedObj['feature'] })",
description=matchedObj["feature_desc"],
url=bckLink
)
responses.append(backgroundFeatureEmbed)
# 3rd Embed & File (suggested characteristics)
if matchedObj["suggested_characteristics"] != None:
if len(matchedObj["suggested_characteristics"]) <= 2047:
backgroundChars = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (BACKGROUND): CHARECTERISTICS",
description=matchedObj["suggested_characteristics"],
url=bckLink
)
responses.append(backgroundChars)
else:
backgroundChars = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (BACKGROUND): CHARECTERISTICS",
description=matchedObj["suggested_characteristics"][:2047],
url=bckLink
)
bckFileName = generateFileName("background")
backgroundChars.add_field(
name="LENGTH OF CHARECTERISTICS TOO LONG FOR DISCORD",
value=f"See `{ bckFileName }` for full description",
inline=False
)
responses.append(backgroundChars)
# Create characteristics.txt
characteristicsFile = open(bckFileName, "a+")
characteristicsFile.write(matchedObj["suggested_characteristics"])
characteristicsFile.close()
responses.append(bckFileName)
for response in responses:
if isinstance(response, discord.Embed):
response.set_thumbnail(url="https://i.imgur.com/GhGODan.jpg")
# Plane
elif "plane" in route:
planeEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (PLANE)",
description=matchedObj["desc"],
url="https://open5e.com/sections/planes"
)
planeEmbed.set_thumbnail(url="https://i.imgur.com/GJk1HFh.jpg")
responses.append(planeEmbed)
# Section
elif "section" in route:
secLink = f"https://open5e.com/sections/{ matchedObj['slug'] }/"
if len(matchedObj["desc"]) >= 2048:
sectionEmbedDesc = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (SECTION) - { matchedObj['parent'] }",
description=matchedObj["desc"][:2047],
url=secLink
)
sectionFilename = generateFileName("section")
sectionEmbedDesc.add_field(
name="LENGTH OF DESCRIPTION TOO LONG FOR DISCORD",
value=f"See `{ sectionFilename }` for full description",
inline=False
)
sectionEmbedDesc.set_thumbnail(url="https://i.imgur.com/J75S6bF.jpg")
responses.append(sectionEmbedDesc)
# Full description as a file
secDescFile = open(sectionFilename, "a+")
secDescFile.write(matchedObj["desc"])
secDescFile.close()
responses.append(sectionFilename)
else:
sectionEmbedDesc = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (SECTION) - { matchedObj['parent'] }",
description=matchedObj["desc"],
url=secLink
)
sectionEmbedDesc.set_thumbnail(url="https://i.imgur.com/J75S6bF.jpg")
responses.append(sectionEmbedDesc)
# Feat
elif "feat" in route:
# Open5e website doesnt have a website entry for URL's yet
featEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (FEAT)",
description=f"PREREQUISITES: **{ matchedObj['prerequisite'] }**"
)
featEmbed.add_field(name="DESCRIPTION", value=matchedObj["desc"], inline=False)
featEmbed.set_thumbnail(url="https://i.imgur.com/X1l7Aif.jpg")
responses.append(featEmbed)
# Condition
elif "condition" in route:
conLink = "https://open5e.com/gameplay-mechanics/conditions"
if len(matchedObj["desc"]) >= 2048:
conditionEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (CONDITION)",
description=matchedObj["desc"][:2047],
url=conLink
)
conditionEmbed.add_field(name="DESCRIPTION continued...", value=matchedObj["desc"][2048:], inline=False)
else:
conditionEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (CONDITION)",
description=matchedObj["desc"],
url=conLink
)
conditionEmbed.set_thumbnail(url="https://i.imgur.com/tOdL5n3.jpg")
responses.append(conditionEmbed)
# Race
elif "race" in route:
raceLink = f"https://open5e.com/races/{ matchedObj['slug'] }"
raceEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (RACE)",
description=matchedObj["desc"],
url=raceLink
)
# Asi Description
raceEmbed.add_field(name="BENEFITS", value=matchedObj["asi_desc"], inline=False)
# Age, Alignment, Size
raceEmbed.add_field(name="AGE", value=matchedObj["age"], inline=True)
raceEmbed.add_field(name="ALIGNMENT", value=matchedObj["alignment"], inline=True)
raceEmbed.add_field(name="SIZE", value=matchedObj["size"], inline=True)
# Speeds
raceEmbed.add_field(name="SPEEDS", value=matchedObj["speed_desc"], inline=False)
# Languages
raceEmbed.add_field(name="LANGUAGES", value=matchedObj["languages"], inline=True)
# Vision buffs
if matchedObj["vision"] != "":
raceEmbed.add_field(name="VISION", value=matchedObj["vision"], inline=True)
# Traits
if matchedObj["traits"] != "":
if len(matchedObj["traits"]) >= 1024:
raceEmbed.add_field(name="TRAITS", value=matchedObj["traits"][:1023], inline=False)
raceEmbed.add_field(name="TRAITS continued...", value=matchedObj["traits"][1024:], inline=False)
else:
raceEmbed.add_field(name="TRAITS", value=matchedObj["traits"], inline=False)
raceEmbed.set_thumbnail(url="https://i.imgur.com/OUSzh8W.jpg")
responses.append(raceEmbed)
# Start new embed for any subraces
if matchedObj["subraces"] != []:
for subrace in matchedObj["subraces"]:
subraceEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ subrace['name'] } (Subrace of **{ matchedObj['name'] })",
description=subrace["desc"],
url=raceLink
)
# Subrace asi's
subraceEmbed.add_field(name="SUBRACE BENEFITS", value=subrace["asi_desc"], inline=False)
# Subrace traits
if subrace["traits"] != "":
if len(subrace["traits"]) >= 1024:
subraceEmbed.add_field(name="TRAITS", value=subrace["traits"][:1023], inline=False)
subraceEmbed.add_field(name="TRAITS continued...", value=subrace["traits"][1024:], inline=False)
else:
subraceEmbed.add_field(name="TRAITS", value=subrace["traits"], inline=False)
subraceEmbed.set_thumbnail(url="https://i.imgur.com/OUSzh8W.jpg")
responses.append(subraceEmbed)
# Class
elif "class" in route:
# 1st Embed & File (BASIC)
classLink = f"https://open5e.com/classes/{ matchedObj['slug'] }"
classDescEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (CLASS): Basics",
description=matchedObj["desc"][:2047],
url=classLink
)
# Spell casting
if matchedObj["spellcasting_ability"] != "":
classDescEmbed.add_field(name="CASTING ABILITY", value=matchedObj["spellcasting_ability"], inline=False)
clsDesFileName = generateFileName("clsdescription")
clsTblFileName = generateFileName("clstable")
classDescEmbed.add_field(
name="LENGTH OF DESCRIPTION & TABLE TOO LONG FOR DISCORD",
value=f"See `{ clsDesFileName }` for full description\nSee `{ clsTblFileName }` for class table",
inline=False
)
responses.append(classDescEmbed)
# Full description as a file
descFile = open(clsDesFileName, "a+")
descFile.write(matchedObj["desc"])
descFile.close()
responses.append(clsDesFileName)
# Class table as a file
tableFile = open(clsTblFileName, "a+")
tableFile.write(matchedObj["table"])
tableFile.close()
responses.append(clsTblFileName)
# 2nd Embed (DETAILS)
classDetailsEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (CLASS): Profs & Details",
description=f"**ARMOUR**: { matchedObj['prof_armor'] }\n**WEAPONS**: { matchedObj['prof_weapons'] }\n**TOOLS**: { matchedObj['prof_tools'] }\n**SAVE THROWS**: { matchedObj['prof_saving_throws'] }\n**SKILLS**: { matchedObj['prof_skills'] }",
url=classLink
)
classDetailsEmbed.add_field(
name="Hit points",
value=f"**Hit Dice**: { matchedObj['hit_dice'] }\n**HP at first level**: { matchedObj['hp_at_1st_level'] }\n**HP at other levels**: { matchedObj['hp_at_higher_levels'] }",
inline=False
)
# Equipment
if len(matchedObj["equipment"]) >= 1024:
classDetailsEmbed.add_field(name="EQUIPMENT", value=matchedObj["equipment"][:1023], inline=False)
classDetailsEmbed.add_field(name="EQUIPMENT continued", value=matchedObj["equipment"][1024:], inline=False)
else:
classDetailsEmbed.add_field(name="EQUIPMENT", value=matchedObj["equipment"], inline=False)
responses.append(classDetailsEmbed)
# 3rd Embed (ARCHETYPES)
if matchedObj["archetypes"] != []:
for archtype in matchedObj["archetypes"]:
archTypeEmbed = None
if len(archtype["desc"]) <= 2047:
archTypeEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ archtype['name'] } (ARCHETYPES)",
description=archtype["desc"],
url=classLink
)
responses.append(archTypeEmbed)
else:
archTypeEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ archtype['name'] } (ARCHETYPES)\n{ matchedObj['subtypes_name'] if matchedObj['subtypes_name'] != '' else 'None' } (SUBTYPE)",
description=archtype["desc"][:2047],
url=classLink
)
clsArchFileName = generateFileName("clsarchetype")
archTypeEmbed.add_field(
name="LENGTH OF DESCRIPTION TOO LONG FOR DISCORD",
value=f"See `{ clsArchFileName }` for full description",
inline=False
)
responses.append(archTypeEmbed)
archDesFile = open(clsArchFileName, "a+")
archDesFile.write(archtype["desc"])
archDesFile.close()
responses.append(clsArchFileName)
# Finish up
for response in responses:
if isinstance(response, discord.Embed):
response.set_thumbnail(url="https://i.imgur.com/Mjh6AAi.jpg")
# Magic Item
elif "magicitem" in route:
itemLink = f"https://open5e.com/magicitems/{ matchedObj['slug'] }"
if len(matchedObj["desc"]) >= 2048:
magicItemEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (MAGIC ITEM)",
description=matchedObj["desc"][:2047],
url=itemLink
)
mIfileName = generateFileName("magicitem")
magicItemEmbed.add_field(
name="LENGTH OF DESCRIPTION TOO LONG FOR DISCORD",
value=f"See `{ mIfileName }` for full description",
inline=False
)
responses.append(magicItemEmbed)
itemFile = open(mIfileName, "a+")
itemFile.write(matchedObj["desc"])
itemFile.close()
responses.append(mIfileName)
else:
magicItemEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (MAGIC ITEM)",
description=matchedObj["desc"],
url=itemLink
)
responses.append(magicItemEmbed)
for response in responses:
if isinstance(response, discord.Embed):
response.add_field(name="TYPE", value=matchedObj["type"], inline=True)
response.add_field(name="RARITY", value=matchedObj["rarity"], inline=True)
if matchedObj["requires_attunement"] == "requires_attunement":
response.add_field(name="ATTUNEMENT REQUIRED?", value="YES", inline=True)
else:
response.add_field(name="ATTUNEMENT REQUIRED?", value="NO", inline=True)
response.set_thumbnail(url="https://i.imgur.com/2wzBEjB.png")
# Remove this break if magicitems produces more than 1 embed in the future
break
# Weapon
elif "weapon" in route:
weaponEmbed = discord.Embed(
colour=discord.Colour.green(),
title=f"{ matchedObj['name'] } (WEAPON)",
description=f"**PROPERTIES**: { ' | '.join(matchedObj['properties']) if matchedObj['properties'] != [] else 'None' }",
url="https://open5e.com/sections/weapons"
)
weaponEmbed.add_field(
name="DAMAGE",
value=f"{ matchedObj['damage_dice'] } ({ matchedObj['damage_type'] })",
inline=True
)
weaponEmbed.add_field(name="WEIGHT", value=matchedObj["weight"], inline=True)
weaponEmbed.add_field(name="COST", value=matchedObj["cost"], inline=True)
weaponEmbed.add_field(name="CATEGORY", value=matchedObj["category"], inline=False)
weaponEmbed.set_thumbnail(url="https://i.imgur.com/pXEe4L9.png")
responses.append(weaponEmbed)
else:
global partialMatch
partialMatch = False
badObjectFilename = generateFileName("badobject")
itemFile = open(badObjectFilename, "a+")
itemFile.write(matchedObj)
itemFile.close()
noRouteEmbed = discord.Embed(
colour=discord.Colour.red(),
title="The matched item's type (i.e. spell, monster, etc) was not recognised",
description=f"Please create an issue describing this failure and with the following values at https://github.com/shadowedlucario/oghma/issues\n**Input**: { args }\n**Route**: { route }\n**Troublesome Object**: SEE `{ badObjectFilename }`"
)
noRouteEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
responses.append(noRouteEmbed)
responses.append(badObjectFilename)
return responses
```
|
{
"source": "jdcarvalho/python-focusnfe",
"score": 2
}
|
#### File: focusnfe/core/exception.py
```python
class FocusNFECoreException(Exception):
EC_PROGRAMMING = 'programming_error'
EC_BAD_REQUEST = 'bad_request'
EC_FORBIDDEN = 'forbidden'
EC_NOT_FOUND = 'not_found'
EC_SERVER_ERROR = 'server_error'
code = None
def __init__(self, *args, **kwargs):
self.code = kwargs.pop('code', '')
super(FocusNFECoreException, self).__init__(*args)
```
|
{
"source": "jdcastanier/Chessboard-game",
"score": 4
}
|
#### File: jdcastanier/Chessboard-game/main.py
```python
import random
from graphics import *
#INICIA EL PROGRAMA PRINCIPAL
def main():
#CREA UN MENÚ INICIAL PARA ELEGIR EL JUEGO (AUNQUE ES UNO SOLO)
def menu():
print(f'''
BIENVENIDO AL PROGRAMA DE JUEGOS PARA DESARROLLAR
NUESTRAS HABILIDADES COGNITIVAS. POR FAVOR ESCOJA UNA
DE LAS SIGUIENTES OPCIONES:
1. IMPOSSIBLE ESCAPE: TECLEE "1".
EN CUALQUIER MOMENTO DURANTE EL JUEGO TECLEE "SALIR"
PARA TERMINAR EL JUEGO Y REGRESAR AL MENÚ PRINCIPAL.
''')
opcion = input()
if opcion == "1":
impossible_escape()
else:
opcion_1 = opcion.upper()
if opcion_1 != "SALIR":
print ('INPUT INCORRECTO')
menu()
#INICIA EL JUEGO
def impossible_escape():
#CREA LAS MONEDAS EN LA VENTANA CUANDO SE JUEGA
def monedas_real(ventana_tablero_expo, letras, numeros):
matriz_circulos = []
for col in range(65, 416, 50):
for ren in range(65, 416, 50):
lista = []
circulo = Circle(Point(ren, col), 23)
color_num = random.randint(0, 1)
if color_num == 0:
circulo.setFill(color_rgb(247, 198, 86))
color = "amarillo"
lista.append(ren)
lista.append(col)
lista.append(color)
else:
circulo.setFill(color_rgb(129, 95, 18))
color = "cafe"
lista.append(ren)
lista.append(col)
lista.append(color)
circulo.draw(ventana_tablero_expo)
matriz_circulos.append(lista)
print(f'''
- CARCELERO: Tienes derecho a cambiar la configuración de una sola moneda.
INGRESA LA UBICACIÓN DE LA MONEDA QUE QUIERES CAMBIAR. HAZLO DE LA FORMA: A, 1
NOTARÁS QUE LA MONEDA EN LA POSICIÓN QUE ELEGISTE CAMBIA SU CONFIGURACIÓN.
''')
# matriz = [[ren, col, color]]
moneda = input()
moneda_1 = moneda.upper()
if moneda_1 == "SALIR":
menu()
lista_coord = moneda.split(", ")
num_trad = [0,1,2,3,4,5,6,7]
if (lista_coord[0] not in "ABCDEFGH") \
or (lista_coord[1] not in "12345678") :
print ('INPUT INCORRECTO')
menu()
for ren in range(len(letras)):
if lista_coord [0] == letras [ren]:
for col in range(len(numeros)):
if lista_coord [1] == numeros [col]:
coord_trad_1 = [num_trad[ren], num_trad[col]]
coord_trad_2 =(num_trad[ren]) +(num_trad[col]*8)
break
circulo_cambio = Circle(Point(matriz_circulos[coord_trad_2][0],\
matriz_circulos[coord_trad_2][1]), 23)
if matriz_circulos [coord_trad_2][2] == "cafe":
circulo_cambio.setFill(color_rgb(247, 198, 86))
matriz_circulos [coord_trad_2][2] = "amarillo"
elif matriz_circulos [coord_trad_2][2] == "amarillo":
circulo_cambio.setFill(color_rgb(129, 95, 18))
matriz_circulos [coord_trad_2][2] = "cafe"
circulo_cambio.draw(ventana_tablero_expo)
return(None)
#TRADUCE LA POSICION DE FORMATO "A, 1" A PIXELES Y LLAMA A LA FUNCION QUE DIBUJA EL CUADRO ROJO
def buscar_rojo_real(ventana_tablero_expo, posicion_rojo, letras, numeros, coordenadas_llaves):
buscador_a = input()
buscador_1 = buscador_a.upper()
if buscador_1 == "SALIR":
menu()
buscador_list = buscador_a.split(", ")
num_trad = [0,1,2,3,4,5,6,7]
if (buscador_list[0] not in "ABCDEFGH") \
or (buscador_list[1] not in "12345678") :
print ('INPUT INCORRECTO')
menu()
for ren in range(len(letras)):
if buscador_list [0] == letras [ren]:
for col in range(len(numeros)):
if buscador_list [1] == numeros [col]:
coord_trad_3 = [(num_trad[ren]*50 + 40),(num_trad[col]*50 + 40)]
break
cosa = rectangulo_rojo(ventana_tablero_expo, coordenadas_llaves, letras, numeros)
if posicion_rojo[0] == coord_trad_3[0] \
and posicion_rojo[1] == coord_trad_3[1]:
print('''
- CARCELERO: ¡Felicitaciones! Han logrado descifrar la posición de las llaves. Quedan libres. ''')
else:
print('''
- CARCELERO: JAJAJAJAJA no pudieron descifrar la ubicación de las llaves, serán ejecutados.''')
#CREA LA VENTANA CUANDO SE JUEGA
def crear_ventana_real():
ventana_tablero_real = GraphWin('Tablero', 450, 450)
negro = Rectangle(Point(40, 40), Point(440, 440))
negro.setOutline(color_rgb(0, 50, 255))
negro.setWidth(3)
negro.setFill(color_rgb(0, 0, 0))
negro.draw(ventana_tablero_real)
for ren in range(40, 441, 50):
for col in range(40, 441, 50):
if ren == 40 or ren == 140 or ren == 240 or ren == 340:
if col == 90 or col == 190 or col == 290 or col == 390:
blancos(ren, col, ventana_tablero_real)
elif ren == 90 or ren == 190 or ren == 290 or ren == 390:
if col == 40 or col == 140 or col == 240 or col == 340:
blancos(ren, col, ventana_tablero_real)
letras = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
numeros = ['1', '2', '3', '4', '5', '6', '7', '8']
indices(letras, numeros, ventana_tablero_real)
posicion_llaves_ren = random.randint(0, 7)
posicion_llaves_col = random.randint(0, 7)
coordenadas_llaves = [posicion_llaves_ren, posicion_llaves_col]
posicion_rojo = rectangulo_rojo(ventana_tablero_real, coordenadas_llaves, letras, numeros)
ventana_tablero_real.getMouse()
regresa_rectangulo(ventana_tablero_real, posicion_rojo)
monedas_real(ventana_tablero_real, letras, numeros)
print('''
- CARCELERO: Ahora puede venir el segundo prisionero para intentar adivinar en dónde se encuentran las llaves. Los prisioneros deben recordar que no pueden comunicarse.
EL SEGUNDO JUGADOR DEBE INGRESAR LA POSICIÓN EN LA QUE CREE QUE ESTÁN LAS LLAVES. LO DEBE HACER EN EL FORMATO A, 1''')
buscar_rojo_real(ventana_tablero_real, posicion_rojo, letras, numeros, coordenadas_llaves)
print (f'''
GRACIAS POR JUGAR IMPOSSIBLE ESCAPE, SI QUIERES INTENTARLO DE NUEVO, VUELVE A CORRER EL PROGRAMA.
LA SOLUCIÓN SE ENCUENTRA DISPONIBLE EN: http://datagenetics.com/blog/december12014/index.html
PROGRAMA REALIZADO POR JUAN <NAME>.
TEC DE MONTERREY.''')
ventana_tablero_real.getMouse()
ventana_tablero_real.close()
return(None)
#FUNCION QUE DIBUJA EL RECUADRO ROJO AL AZAR
def rectangulo_rojo(ventana_tablero_expo, coordenadas_llaves, letras, numeros):
rectangulo_rojo = Rectangle( Point(40 +(50 * coordenadas_llaves[0]),\
40 +(50 * coordenadas_llaves[1])), \
Point(90 +(50 * coordenadas_llaves[0]), \
90 +(50 * coordenadas_llaves[1])))
rectangulo_rojo.setFill(color_rgb(255, 0, 0))
rectangulo_rojo.draw(ventana_tablero_expo)
posicion_rojo = [40 +(50 * coordenadas_llaves[0]), \
40 +(50 * coordenadas_llaves[1]), \
90 +(50 * coordenadas_llaves[0]), \
90 +(50 * coordenadas_llaves[1])]
letra = letras [coordenadas_llaves[0]]
numero = numeros [coordenadas_llaves[1]]
print(f'''
Las llaves son asignadas a la posición {letra}, {numero}.
DALE CLICK AL TABLERO PARA CONTINUAR''')
return(posicion_rojo)
#TRADUCE LA POSICION DE FORMATO "A, 1" A PIXELES Y LLAMA A LA FUNCION QUE DIBUJA EL CUADRO ROJO, A DIFERENCIA DE BUSCAR_ROJO_REAL TIENE EL PRINT DEL TUTORIAL.
def buscar_rojo(ventana_tablero_expo, posicion_rojo, letras, numeros, coordenadas_llaves):
buscador = input()
buscador_2 = buscador.upper()
if buscador_2 == "SALIR":
menu()
buscador_list = buscador.split(", ")
num_trad = [0,1,2,3,4,5,6,7]
if (buscador_list[0] not in "ABCDEFGH") \
or (buscador_list[1] not in "12345678") :
print ('INPUT INCORRECTO')
menu()
for ren in range(len(letras)):
if buscador_list [0] == letras [ren]:
for col in range(len(numeros)):
if buscador_list [1] == numeros [col]:
coord_trad_3 = [(num_trad[ren]*50 + 40),(num_trad[col]*50 + 40)]
break
cosa = rectangulo_rojo(ventana_tablero_expo, \
coordenadas_llaves, letras, numeros)
if posicion_rojo[0] == coord_trad_3[0] \
and posicion_rojo[1] == coord_trad_3[1]:
print('''
- CARCELERO: Una vez el que el segundo prisionero haya escogido el casillero en donde cree que se asignaron las llaves, se le revelará la posición real de las llaves, y en caso de haberlo logrado los dos quedarán salvados, caso contrario, ambos serán ejecutados.''')
else:
print('''
- CARCELERO: Una vez el que el segundo prisionero haya escogido el casillero en donde cree que se asignaron las llaves, se le revelará la posición real y en caso de haberlo logrado los dos quedarán salvados, caso contrario, ambos serán ejecutados.''')
#DEVUELVE EL COLOR ORIGINAL AL RECUADRO ROJO
def regresa_rectangulo(ventana_tablero_expo, posicion_rojo):
rentangulo_cambio = Rectangle(Point(posicion_rojo [0], posicion_rojo[1]), \
Point(posicion_rojo[2], posicion_rojo [3]))
if posicion_rojo [0] == 40 \
or posicion_rojo [0] == 140 \
or posicion_rojo [0] == 240 \
or posicion_rojo [0] == 340:
if posicion_rojo [1] == 90 \
or posicion_rojo [1] == 190 \
or posicion_rojo [1] == 290 \
or posicion_rojo [1] == 390:
rentangulo_cambio.setFill(color_rgb(255,255,255))
else:
rentangulo_cambio.setFill(color_rgb(0,0,0))
else:
if posicion_rojo [1] == 40 \
or posicion_rojo [1] == 140 \
or posicion_rojo [1] == 240 \
or posicion_rojo [1] == 340:
rentangulo_cambio.setFill(color_rgb(255,255,255))
else:
rentangulo_cambio.setFill(color_rgb(0,0,0))
rentangulo_cambio.draw(ventana_tablero_expo)
#COLOCA LAS MONEDAS EN LA VENTANA, A DIFERENCIA DE MONEDAS_REAL TIENE EL PRINT DEL TUTORIAL
def monedas_expo(ventana_tablero_expo, letras, numeros):
matriz_circulos = []
for col in range(65, 416, 50):
for ren in range(65, 416, 50):
lista = []
circulo = Circle(Point(ren, col), 23)
color_num = random.randint(0, 1)
if color_num == 0:
circulo.setFill(color_rgb(247, 198, 86))
color = "amarillo"
lista.append(ren)
lista.append(col)
lista.append(color)
else:
circulo.setFill(color_rgb(129, 95, 18))
color = "cafe"
lista.append(ren)
lista.append(col)
lista.append(color)
circulo.draw(ventana_tablero_expo)
matriz_circulos.append(lista)
print(f'''
- CARCELERO: Luego de asignar la llave a un casillero, colocaré una moneda en cada casillero.
La configuración de cada moneda(cara o cruz) será totalmente mi decisión. Puede ser al azar o puede ser un orden malvado apropósito con el fin de hacerlo más difícil, es mi problema.
Entonces le dejaré a quien se quedó conmigo cambiar la configuración de una sola moneda.
INGRESA LA UBICACIÓN DE LA MONEDA QUE QUIERES CAMBIAR. HAZLO DE LA FORMA: A, 1
NOTARÁS QUE LA MONEDA EN LA POSICIÓN QUE ELEGISTE CAMBIA SU CONFIGURACIÓN.
''')
# matriz = [[ren, col, color]]
moneda = input()
moneda_2 = moneda.upper()
if moneda_2 == "SALIR":
menu()
lista_coord = moneda.split(", ")
num_trad = [0,1,2,3,4,5,6,7]
if (lista_coord[0] not in "ABCDEFGH") \
or (lista_coord[1] not in "12345678") :
print ('INPUT INCORRECTO')
menu()
for ren in range(len(letras)):
if lista_coord [0] == letras [ren]:
for col in range(len(numeros)):
if lista_coord [1] == numeros [col]:
coord_trad_1 = [num_trad[ren], num_trad[col]]
coord_trad_2 =(num_trad[ren]) +(num_trad[col]*8)
break
circulo_cambio = Circle(Point(matriz_circulos[coord_trad_2][0],\
matriz_circulos[coord_trad_2][1]), 23)
if matriz_circulos [coord_trad_2][2] == "cafe":
circulo_cambio.setFill(color_rgb(247, 198, 86))
matriz_circulos [coord_trad_2][2] = "amarillo"
elif matriz_circulos [coord_trad_2][2] == "amarillo":
circulo_cambio.setFill(color_rgb(129, 95, 18))
matriz_circulos [coord_trad_2][2] = "cafe"
circulo_cambio.draw(ventana_tablero_expo)
return(None)
#CREA LA VENTANA DEL TUTORIAL, A DIFERENCIA DE CREAR_VENTANA_REAL TIENE LOS PRINTS DEL TUTORIAL
def crear_ventana_expo():
ventana_tablero_expo = GraphWin('Tablero', 450, 450)
negro = Rectangle(Point(40, 40), Point(440, 440))
negro.setOutline(color_rgb(0, 50, 255))
negro.setWidth(3)
negro.setFill(color_rgb(0, 0, 0))
negro.draw(ventana_tablero_expo)
for ren in range(40, 441, 50):
for col in range(40, 441, 50):
if ren == 40 or ren == 140 or ren == 240 or ren == 340:
if col == 90 or col == 190 or col == 290 or col == 390:
blancos(ren, col, ventana_tablero_expo)
elif ren == 90 or ren == 190 or ren == 290 or ren == 390:
if col == 40 or col == 140 or col == 240 or col == 340:
blancos(ren, col, ventana_tablero_expo)
letras = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
numeros = ['1', '2', '3', '4', '5', '6', '7', '8']
indices(letras, numeros, ventana_tablero_expo)
posicion_llaves_ren = random.randint(0, 7)
posicion_llaves_col = random.randint(0, 7)
coordenadas_llaves = [posicion_llaves_ren, posicion_llaves_col]
posicion_rojo = rectangulo_rojo(ventana_tablero_expo, coordenadas_llaves, letras, numeros)
ventana_tablero_expo.getMouse()
regresa_rectangulo(ventana_tablero_expo, posicion_rojo)
monedas_expo(ventana_tablero_expo, letras, numeros)
print('''
- CARCELERO: Cuando intenten el reto de verdad, será en este momento en el que regresará el segundo prisionero; quien, sin comunicarse con el primer prisionero, sin saber cuál fue la configuración inicial del tablero, y sin saber cuál fue la moneda que modificó su compañero, deberá determinar a qué casillero se asignaron las llaves(cuadro rojo del comienzo).
EL SEGUNDO JUGADOR DEBE INGRESAR LA POSICIÓN EN LA QUE CREE QUE ESTÁN LAS LLAVES. LO DEBE HACER EN EL FORMATO A, 1''')
buscar_rojo(ventana_tablero_expo, posicion_rojo, letras, numeros, coordenadas_llaves)
return(ventana_tablero_expo)
#DIBUJA LOS RECUADROS BLANCOS EN EL TABLERO
def blancos(ren, col, ventana_tablero):
blanco = Rectangle(Point(col, ren), Point(col + 50, ren + 50))
blanco.setFill(color_rgb(255, 255, 255))
blanco.draw(ventana_tablero)
return(None)
#CREA LOS INDICES ALFANUMERICOS DEL TABLERO
def indices(lista_letras, lista_num, ventana_tablero):
i = 0
for lugar in range(65, 465, 50):
text = Text(Point(lugar, 25), lista_letras[i])
text.setTextColor(color_rgb(0, 0, 0))
text.setSize(25)
text.draw(ventana_tablero)
i += 1
i = 0
for lugar_b in range(65, 465, 50):
text_b = Text(Point(25, lugar_b), lista_num[i])
text_b.setTextColor(color_rgb(0, 0, 0))
text_b.setSize(25)
text_b.draw(ventana_tablero)
i += 1
return(None)
#CREA LA PRIMERA VENTANA DEL JUEGO Y DEL TUTORIAL, SIN LLAMAR A LA FUNCION DL RECUADRO ROJO
def crear_ventana():
ventana_tablero = GraphWin('Tablero', 450, 450)
negro = Rectangle(Point(40, 40), Point(440, 440))
negro.setOutline(color_rgb(0, 50, 255))
negro.setWidth(3)
negro.setFill(color_rgb(0, 0, 0))
negro.draw(ventana_tablero)
for ren in range(40, 441, 50):
for col in range(40, 441, 50):
if ren == 40 or ren == 140 or ren == 240 or ren == 340:
if col == 90 or col == 190 or col == 290 or col == 390:
blancos(ren, col, ventana_tablero)
elif ren == 90 or ren == 190 or ren == 290 or ren == 390:
if col == 40 or col == 140 or col == 240 or col == 340:
blancos(ren, col, ventana_tablero)
letras = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
numeros = ['1', '2', '3', '4', '5', '6', '7', '8']
indices(letras, numeros, ventana_tablero)
ventana_tablero.getMouse()
ventana_tablero.close()
return (None)
##
print(f'''
BIENVENIDOS A IMPOSSIBLE ESCAPE, EL RETO MATEMÁTICO. ESTE JUEGO ESTÁ DISEÑADO
PARA DOS PERSONAS. ¡ASÍ QUE CONSIGUE UN COMPAÑERO! UNA VEZ LISTOS COMENCEMOS EL TUTORIAL...(ESCRIBE "LISTOS" PARA CONTINUAR)
''')
salir = True
while salir == True:
respuesta = input()
res_may = respuesta.upper()
if res_may == 'LISTOS':
print()
print('''
-CARCELERO: Jejeje. Hola par de prisioneros, yo seré su carcelero. Ustedes dos han sido condenados a muerte; sin embargo, estoy dispuesto a darles una oportunidad de salir en libertad. La explicaré antes de ponerlos a prueba...
DALE CLICK AL TABLERO PARA CONTINUAR''')
salir = not(salir)
crear_ventana()
elif res_may == "SALIR":
menu()
else:
print(
'''PALABRA INCORRECTA. ESCRIBE "LISTOS" PARA CONTINUAR''')
print('''
-CARCELERO: ¿Ven este tablero de ajedrez? Los separaré de modo que SÓLO UNO DE USTEDES PUEDA VER LA PANTALLA y asignaré la llave de las celdas a uno de los casilleros del tablero, mostrándole a quien se quedó conmigo la posición.
''')
ventana_tablero_expo = crear_ventana_expo()
print('''
¿Están listos para intentarlo? A partir de este momento tienen todo el tiempo que deseen para desarrollar su estrategia. Una vez listos DENLE CLICK AL TABLERO PARA EMPEZAR.''')
ventana_tablero_expo.getMouse()
ventana_tablero_expo.close()
print ('''
-CARCELERO: Hola de nuevo prisioneros, en este momento uno de ustedes debe dejar la sala, de modo que no pueda ver la pantalla.
CUANDO ESTÉS LISTO DALE CLICK AL TABLERO PARA CONTINUAR.''')
crear_ventana()
crear_ventana_real()
menu()
main()
```
|
{
"source": "jdclarke5/sylver",
"score": 3
}
|
#### File: sylver/backend/postgres.py
```python
from .backend import BaseBackend
import psycopg2
from psycopg2 import sql
class PostgresBackend(BaseBackend):
def __init__(self, connection_string):
"""Initialise PostgreSQL connection with a valid libpq connection
string. Create the `position`, `status`, and `reply` tables if they
do not yet exist.
"""
self.conn = psycopg2.connect(connection_string)
with self.conn:
with self.conn.cursor() as c:
c.execute("""
CREATE TABLE IF NOT EXISTS position (
name text PRIMARY KEY,
generators integer[] NOT NULL,
gcd integer NOT NULL,
multiplicity integer NOT NULL,
genus integer NOT NULL,
frobenius integer NOT NULL,
irreducible char (1) NULL
);""")
c.execute("""
CREATE TABLE IF NOT EXISTS status (
position text PRIMARY KEY,
status varchar (2) NOT NULL
);""")
c.execute("""
CREATE TABLE IF NOT EXISTS reply (
position text NOT NULL,
reply integer NOT NULL,
CONSTRAINT uniquetuple UNIQUE (position, reply)
);""")
self.position_cols = ("name", "generators", "gcd", "multiplicity",
"genus", "frobenius", "irreducible")
def save(self, position, status, replies):
"""PostgreSQL implementation of BaseBackend method.
"""
position_dict = {"name": position.name, **position.to_dict()}
# Position
columns = sql.SQL(",").join(map(sql.Identifier, self.position_cols))
values = sql.SQL(",").join(map(sql.Placeholder, self.position_cols))
position_query = sql.SQL("""
INSERT INTO position ({columns}) VALUES ({values})
ON CONFLICT (name) DO NOTHING;""").format(
columns=columns, values=values)
# Status
status_query = sql.SQL("""
INSERT INTO status (position, status) VALUES (%(name)s, {status})
ON CONFLICT (position) DO UPDATE SET status = EXCLUDED.status
WHERE status.status != 'P' AND status.status != 'N';
""").format(status=sql.Literal(status))
# Reply
reply_values = [sql.SQL("(%(name)s, {})").format(sql.Literal(r))
for r in replies]
reply_query = sql.SQL("""
INSERT INTO reply (position, reply) VALUES {}
ON CONFLICT ON CONSTRAINT uniquetuple DO NOTHING;
""").format(sql.SQL(",").join(reply_values))
with self.conn:
with self.conn.cursor() as c:
c.execute(position_query, position_dict)
c.execute(status_query, position_dict)
if replies:
c.execute(reply_query, position_dict)
def get_status(self, position):
"""PostgreSQL implementation of BaseBackend method.
"""
query = "SELECT status FROM status WHERE position = %(name)s;"
with self.conn:
with self.conn.cursor() as c:
c.execute(query, {"name": position.name})
result = c.fetchone()
return result[0] if result else None
```
#### File: sylver/backend/redis.py
```python
from .backend import BaseBackend
import yaml
import redis
class RedisBackend(BaseBackend):
def __init__(self, **kwargs):
"""Initialise Redis client by passing **kwargs to `redis.Redis`.
"""
self.redis = redis.Redis(**kwargs)
def set(self, key, dictionary):
"""Set a python dictionary as a YAML string in Redis. Note that YAML
is used because it supports sets.
"""
yaml_dictionary = yaml.dump(dictionary)
self.redis.set(key, yaml_dictionary)
def get(self, key):
"""Retrieve a python dictionary from a YAML string in Redis. Note that
YAML is used because it supports sets.
"""
yaml_dictionary = self.redis.get(key)
return yaml.safe_load(yaml_dictionary) if yaml_dictionary else None
def save(self, position, status, replies):
"""Redis implementation of BaseBackend method.
"""
key = position.name
existing = self.get(key) or {}
entry = {
**position.to_dict(),
"status": status,
"replies": existing.get("replies", set()).union(replies),
}
self.set(key, entry)
def get_status(self, position):
"""Redis implementation of BaseBackend method.
"""
key = position.name
existing = self.get(key) or {}
return existing.get("status", None)
```
|
{
"source": "jdcla/transformer-xl",
"score": 2
}
|
#### File: jdcla/transformer-xl/mem_transformer.py
```python
import sys
import math
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.proj_adaptive_softmax import ProjectedAdaptiveLogSoftmax
from utils.log_uniform_sampler import LogUniformSampler, sample_logits
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, conv_size=7, pre_conv=False,
tgt_len=None, mem_len=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.pre_conv = pre_conv
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.conv_size = conv_size
if conv_size != 0:
if self.pre_conv:
self.pre_motif_net = nn.Conv1d(d_model, d_model, conv_size, padding=conv_size//2)
else:
self.motif_net_q = nn.Conv1d(d_head, d_head, conv_size, padding=conv_size//2)
self.motif_net_k = nn.Conv1d(d_head, d_head, conv_size, padding=conv_size//2)
self.motif_net_v = nn.Conv1d(d_head, d_head, conv_size, padding=conv_size//2)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m, :m] = torch.triu(mask[:m, :m])
mask[-m:, -m:] = torch.tril(mask[-m:, -m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen - 1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:, :, None, None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_conv and (self.conv_size > 0):
cat = cat.permute(1,2,0).contiguous()
cat = self.pre_motif_net(cat).permute(2,0,1)
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
w_new = w
if self.pre_conv and (self.conv_size > 0):
w_new = w.permute(1,2,0).contiguous()
w_new = self.pre_motif_net(w_new).permute(2,0,1)
w_heads = self.qkv_net(w_new)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
if (self.conv_size == 0) or self.pre_conv:
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
else:
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head).permute(2,1,3,0) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head).permute(2,1,3,0) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head).permute(2,1,3,0) # qlen x bsz x n_head x d_head
w_head_q = w_head_q.reshape(self.n_head * bsz, self.d_head, qlen) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.reshape(self.n_head * bsz, self.d_head, klen) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.reshape(self.n_head * bsz, self.d_head, klen)
w_head_q = self.motif_net_q(w_head_q).view(self.n_head, bsz, self.d_head, qlen).permute(3,1,0,2)
w_head_k = self.motif_net_k(w_head_k).view(self.n_head, bsz, self.d_head, klen).permute(3,1,0,2)
w_head_v = self.motif_net_v(w_head_v).view(self.n_head, bsz, self.d_head, klen).permute(3,1,0,2)
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
# r_w_bias: n_head x d_head
rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
# r_r_bias: n_head x d_head
rr_head_q = w_head_q + r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None, :, :, None], -float('inf')).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:, :, :, None], -float('inf')).type_as(attn_score)
# [qlen x klen x bsz x n_head]
self.attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(self.attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask,
mems=mems)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=False)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
def forward(self, inp):
embed = self.emb_layers[0](inp) # inp: qlen x bs -> embed: qlen x bs x d_embed
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
embed.mul_(self.emb_scale)
return embed
class ConvEmbeddings(nn.Module):
def __init__(self, n_token, d_embed, d_proj, conv_size, cutoffs):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
self.emb_layers.append(
nn.Conv1d(4, d_embed, conv_size, padding=conv_size//2)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
def forward(self, inp):
embed = self.emb_layers[0](inp.permute(1,2,0).contiguous()).permute(2,0,1).contiguous()
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
embed.mul_(self.emb_scale)
return embed
class MemTransformerLM(nn.Module):
def __init__(self, n_token_in, n_token_out, n_layer, n_head, d_model, d_head, d_inner,
dropout, dropatt, conv_size=7, conv_emb=False, pre_conv=False, tie_weight=True, d_embed=None,
tie_projs=[False], tgt_len=None, mem_len=None, ext_ds=None,
cutoffs=[], same_length=False, clamp_len=-1):
super().__init__()
self.n_token_in = n_token_in
self.n_token_out = n_token_out
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.conv_size = conv_size
self.pre_conv = pre_conv
self.conv_emb = conv_emb
if conv_emb:
self.word_emb = ConvEmbeddings(n_token_in, d_embed, d_model, conv_size, cutoffs,)
else:
self.word_emb = AdaptiveEmbedding(n_token_in, d_embed, d_model, cutoffs,)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_ds = ext_ds
self.max_klen = tgt_len + mem_len
self.layers = nn.ModuleList()
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head, d_model, d_head, d_inner, dropout, conv_size=conv_size,
pre_conv=pre_conv, tgt_len=tgt_len, mem_len=mem_len,
dropatt=dropatt)
)
self.out_layer = nn.Linear(d_model, n_token_out)
# use adaptive softmax (including standard softmax)
self.crit = ProjectedAdaptiveLogSoftmax(n_token_out, d_embed, d_model,
cutoffs)
if tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.word_emb.emb_layers[i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and d_model != d_embed:
self.crit.out_projs[i] = self.word_emb.emb_projs[0]
elif tie_proj:
self.crit.out_projs[i] = self.word_emb.emb_projs[i]
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def _create_params(self):
self.pos_emb = PositionalEmbedding(self.d_model)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
def reset_length(self, tgt_len, mem_len, ext_ds):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_ds = ext_ds
def init_mems(self):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer + 1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
if self.conv_emb:
qlen, bsz, _ = dec_inp.size()
else:
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None].type(torch.bool) # -1
else:
dec_attn_mask = torch.triu(word_emb.new_ones(qlen, klen),
diagonal=1 + mlen + self.ext_ds).byte()[:, :, None].type(torch.bool)
hids = []
## attn
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, self.r_w_bias,
self.r_r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
##
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, data, target, *mems, criterion=None, last=False):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
tgt_len = target.size(0)
if not mems:
mems = self.init_mems()
if self.same_length and not last:
tgt_len_adj = tgt_len - (self.ext_ds)
else:
tgt_len_adj = tgt_len
target = target[:tgt_len_adj]
hidden, new_mems = self._forward(data, mems=mems)
if new_mems is not None:
new_mems = [mem[:tgt_len_adj] for mem in new_mems]
pred_hid = hidden[:tgt_len_adj] # do not evaluate scores downstream
"""loss, logit = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1),
criterion)
loss = loss.view(tgt_len, -1)"""
logit = self.out_layer(pred_hid.view(-1, pred_hid.size(-1)))
if criterion is None:
loss = -F.log_softmax(logit, dim=-1) \
.gather(1, target.view(-1).unsqueeze(1)).squeeze(1) # Problem with gather for merge
else:
loss = criterion(logit, target.view(-1))
logits = [logit]
targets = [target]
targets = [t.view(-1).cpu().data.numpy() for t in targets]
preds = [F.softmax(lgt, dim=1).cpu().data.numpy() for lgt in logits]
if new_mems is None:
return [loss] + [preds] + [targets]
else:
return [loss] + [preds] + [targets] + new_mems
```
|
{
"source": "jdcloud-apigateway/jdcloud-sdk-python",
"score": 2
}
|
#### File: baseanti/models/InternalAttackLog.py
```python
class InternalAttackLog(object):
def __init__(self, ip=None, attackLogId=None, startTime=None, endTime=None, attackStatus=None):
"""
:param ip: (Optional) 公网 IP 地址
:param attackLogId: (Optional) 攻击记录 ID
:param startTime: (Optional) 攻击开始时间, UTC 时间, 格式: yyyy-MM-dd'T'HH:mm:ssZ
:param endTime: (Optional) 攻击结束时间, UTC 时间, 格式: yyyy-MM-dd'T'HH:mm:ssZ
:param attackStatus: (Optional) normal: 正常, unregister: 未备案, illegalmail: 非法邮件, clean: 超阈值, blackhole: 黑洞
"""
self.ip = ip
self.attackLogId = attackLogId
self.startTime = startTime
self.endTime = endTime
self.attackStatus = attackStatus
```
#### File: billing/apis/CalculateTotalPriceRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CalculateTotalPriceRequest(JDCloudRequest):
"""
查询计费价格信息
"""
def __init__(self, parameters, header=None, version="v1"):
super(CalculateTotalPriceRequest, self).__init__(
'/regions/{regionId}/calculateTotalPrice', 'POST', header, version)
self.parameters = parameters
class CalculateTotalPriceParameters(object):
def __init__(self, regionId, cmd, packageCount, ):
"""
:param regionId:
:param cmd: 操作类型 1:创建 2:续费 3:升配 4:删除
:param packageCount: 批量购买时数量
"""
self.regionId = regionId
self.cmd = cmd
self.orderList = None
self.operateTime = None
self.promotionInfo = None
self.clientType = None
self.packageCount = packageCount
self.processType = None
self.renewMode = None
self.unifyExpireDay = None
self.totalPriceRule = None
def setOrderList(self, orderList):
"""
:param orderList: (Optional) 计算价格的订单
"""
self.orderList = orderList
def setOperateTime(self, operateTime):
"""
:param operateTime: (Optional) 操作时间(格式为:yyyy-MM-dd HH:mm:ss)
"""
self.operateTime = operateTime
def setPromotionInfo(self, promotionInfo):
"""
:param promotionInfo: (Optional) 1:折扣(不需要传) 2:免费活动3:付费活动 4:推荐码 5:会员价 [{"promotionType":1,"activityCode":123},{"promotionType":2,"activityCode":}]
"""
self.promotionInfo = promotionInfo
def setClientType(self, clientType):
"""
:param clientType: (Optional) 客户端:1.PC端;2.移动端;
"""
self.clientType = clientType
def setProcessType(self, processType):
"""
:param processType: (Optional) 临时升配时必传,3-临时升配
"""
self.processType = processType
def setRenewMode(self, renewMode):
"""
:param renewMode: (Optional) 续费方式 0:正常续费 1:续费至统一到期日,续费时必传
"""
self.renewMode = renewMode
def setUnifyExpireDay(self, unifyExpireDay):
"""
:param unifyExpireDay: (Optional) 续费统一到期日(1-28),续费时必传
"""
self.unifyExpireDay = unifyExpireDay
def setTotalPriceRule(self, totalPriceRule):
"""
:param totalPriceRule: (Optional) 计算总价规则 1:计算预付费资源总价(计费类型为包年包月、按次) ;不传计算所有资源总价
"""
self.totalPriceRule = totalPriceRule
```
#### File: billing/models/AccountingRuleVo.py
```python
class AccountingRuleVo(object):
def __init__(self, id=None, site=None, appCode=None, targetType=None, serviceCode=None, pin=None, outAccountType=None, outAccountDay=None, timeCron=None, targetTypeName=None, appCodeName=None, serviceCodeName=None, outAccountTypeName=None):
"""
:param id: (Optional) id
:param site: (Optional) 站点
:param appCode: (Optional) 产品线
:param targetType: (Optional) 出账对象类型 1:通用 2:用户
:param serviceCode: (Optional) 产品
:param pin: (Optional) pin
:param outAccountType: (Optional) 出账类型 1:实时出账 2:定期出账
:param outAccountDay: (Optional) 出账周期:限制范围 1-28
:param timeCron: (Optional) 定期出账 时间表达式
:param targetTypeName: (Optional) 出账对象类型名称
:param appCodeName: (Optional) 产品线名称
:param serviceCodeName: (Optional) 产品名称
:param outAccountTypeName: (Optional) 出账类型名称
"""
self.id = id
self.site = site
self.appCode = appCode
self.targetType = targetType
self.serviceCode = serviceCode
self.pin = pin
self.outAccountType = outAccountType
self.outAccountDay = outAccountDay
self.timeCron = timeCron
self.targetTypeName = targetTypeName
self.appCodeName = appCodeName
self.serviceCodeName = serviceCodeName
self.outAccountTypeName = outAccountTypeName
```
#### File: billing/models/OrderPriceProtocol.py
```python
class OrderPriceProtocol(object):
def __init__(self, resourceId=None, appCode=None, serviceCode=None, site=None, region=None, billingType=None, timeSpan=None, timeUnit=None, networkOperator=None, formula=None, formulaStr=None, pin=None, count=None, startTime=None, endTime=None, taskId=None, sourceId=None):
"""
:param resourceId: (Optional) 资源id(新购时不传,升降配、续费必须传)
:param appCode: (Optional) 业务线
:param serviceCode: (Optional) 产品线(必传)
:param site: (Optional) 站点信息 0:主站 其他:专有云
:param region: (Optional) 地域(新购、升降配必传)
:param billingType: (Optional) 计费类型 1:按配置 2:按用量 3:包年包月(必传) 4:按次计费
:param timeSpan: (Optional) 时长(包年包月新购、续费必传)
:param timeUnit: (Optional) 时长类型 0:无(非包年包月) 1:小时 2:天 3:月 4:年(包年包月新购、续费必传)
:param networkOperator: (Optional) 网络类型 0:non 1:非BGP 2:BGP
:param formula: (Optional) 计算公式(配置细项)(新购、升降配必传)
:param formulaStr: (Optional) 配置细项
:param pin: (Optional) 用户pin
:param count: (Optional) 具体商品数量,默认为1
:param startTime: (Optional) 订单开始时间
:param endTime: (Optional) 临时升配结束时间,临时升配时必传
:param taskId: (Optional) 自然单列表
:param sourceId: (Optional) 交易单模块sourceId
"""
self.resourceId = resourceId
self.appCode = appCode
self.serviceCode = serviceCode
self.site = site
self.region = region
self.billingType = billingType
self.timeSpan = timeSpan
self.timeUnit = timeUnit
self.networkOperator = networkOperator
self.formula = formula
self.formulaStr = formulaStr
self.pin = pin
self.count = count
self.startTime = startTime
self.endTime = endTime
self.taskId = taskId
self.sourceId = sourceId
```
#### File: billing/models/ResourceStopDeleteRuleVo.py
```python
class ResourceStopDeleteRuleVo(object):
def __init__(self, id=None, site=None, appCode=None, appCodeName=None, serviceCode=None, serviceCodeName=None, ruleType=None, pin=None, arrearStop=None, arrearStopDelayHours=None, arrearDelete=None, arrearDeleteDelayHours=None, expireStop=None, expireStopDelayHours=None, expireDelete=None, expireDeleteDelayHours=None, createTime=None, arrearDeleteType=None, expireDeleteType=None, flowArrearStop=None, flowArrearStopDelayHours=None, flowArrearDelete=None, flowArrearDeleteDelayHours=None, flowArrearDeleteType=None, clientType=None):
"""
:param id: (Optional) 主键
:param site: (Optional) 站点
:param appCode: (Optional) 产品线编码
:param appCodeName: (Optional) 产品线名称
:param serviceCode: (Optional) 产品编码
:param serviceCodeName: (Optional) 产品名称
:param ruleType: (Optional) 规则类型 1:试用规则 2、用户产品规则 3:用户规则 4:产品规则 5:通用规则 6:用户等级产品规则
:param pin: (Optional) pin
:param arrearStop: (Optional) 按配置欠费是否停服 1:欠费需要停服 0:欠费不需要停服
:param arrearStopDelayHours: (Optional) 按配置欠费停服延后时长
:param arrearDelete: (Optional) 按配置欠费停服是否释放资源 1:需要释放资源 0:不需要释放资源
:param arrearDeleteDelayHours: (Optional) 按配置欠费停服释放资源延后时长
:param expireStop: (Optional) 到期是否停服 1:到期需要停服 0:到期不需要停服
:param expireStopDelayHours: (Optional) 到期停服延后时长
:param expireDelete: (Optional) 到期停服是否释放资源 1:需要释放资源 0:不需要释放资源
:param expireDeleteDelayHours: (Optional) 到期停服释放资源延后时长
:param createTime: (Optional) 创建时间
:param arrearDeleteType: (Optional) 按配置欠费释放类型 1:释放资源 2:释放数据
:param expireDeleteType: (Optional) 到期释放类型 1:释放资源 2:释放数据
:param flowArrearStop: (Optional) 按用量欠费是否停服 1:欠费需要停服 0:欠费不需要停服
:param flowArrearStopDelayHours: (Optional) 按用量欠费停服延后时长
:param flowArrearDelete: (Optional) 按用量欠费停服是否释放资源 1:需要释放资源 0:不需要释放资源
:param flowArrearDeleteDelayHours: (Optional) 按用量欠费停服释放资源延后时长
:param flowArrearDeleteType: (Optional) 按用量欠费释放类型 1:释放资源 2:释放数据
:param clientType: (Optional) 客户级别 1-普通客户 2-VIP客户
"""
self.id = id
self.site = site
self.appCode = appCode
self.appCodeName = appCodeName
self.serviceCode = serviceCode
self.serviceCodeName = serviceCodeName
self.ruleType = ruleType
self.pin = pin
self.arrearStop = arrearStop
self.arrearStopDelayHours = arrearStopDelayHours
self.arrearDelete = arrearDelete
self.arrearDeleteDelayHours = arrearDeleteDelayHours
self.expireStop = expireStop
self.expireStopDelayHours = expireStopDelayHours
self.expireDelete = expireDelete
self.expireDeleteDelayHours = expireDeleteDelayHours
self.createTime = createTime
self.arrearDeleteType = arrearDeleteType
self.expireDeleteType = expireDeleteType
self.flowArrearStop = flowArrearStop
self.flowArrearStopDelayHours = flowArrearStopDelayHours
self.flowArrearDelete = flowArrearDelete
self.flowArrearDeleteDelayHours = flowArrearDeleteDelayHours
self.flowArrearDeleteType = flowArrearDeleteType
self.clientType = clientType
```
#### File: captcha/models/Trial.py
```python
class Trial(object):
def __init__(self, appId=None, sceneId=None, secret=None, supports=None, captchaType=None, captchaName=None):
"""
:param appId: (Optional) 应用id
:param sceneId: (Optional) 场景id
:param secret: (Optional) 场景secret,前端调用时需要先解密再调用,解密请联系相关研发
:param supports: (Optional) 支持的APP类型,有的验证码类型不支持pc端体验,需要切换到移动端体验, eg["pc","m"]
:param captchaType: (Optional) 验证码类型
:param captchaName: (Optional) 验证码名称
"""
self.appId = appId
self.sceneId = sceneId
self.secret = secret
self.supports = supports
self.captchaType = captchaType
self.captchaName = captchaName
```
#### File: censor/apis/AsyncVideoScanV2Request.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class AsyncVideoScanV2Request(JDCloudRequest):
"""
提交视频异步检测任务V2
"""
def __init__(self, parameters, header=None, version="v1"):
super(AsyncVideoScanV2Request, self).__init__(
'/video:asyncscanv2', 'POST', header, version)
self.parameters = parameters
class AsyncVideoScanV2Parameters(object):
def __init__(self, ):
"""
"""
self.bizType = None
self.url = None
self.dataId = None
self.version = None
self.title = None
self.callback = None
self.callbackUrl = None
self.uniqueKey = None
self.scFrequency = None
self.advancedFrequency = None
def setBizType(self, bizType):
"""
:param bizType: (Optional) 业务bizType,请联系客户经理获取
"""
self.bizType = bizType
def setUrl(self, url):
"""
:param url: (Optional) 最大长度512, 点播视频地址
"""
self.url = url
def setDataId(self, dataId):
"""
:param dataId: (Optional) 最大长度128,点播视频唯一标识
"""
self.dataId = dataId
def setVersion(self, version):
"""
:param version: (Optional) 接口版本号,可选值 v3.2
"""
self.version = version
def setTitle(self, title):
"""
:param title: (Optional) 最大长度512,视频名称
"""
self.title = title
def setCallback(self, callback):
"""
:param callback: (Optional) 最大长度512,数据回调参数,产品根据业务情况自行设计,当获取离线检测结果时,内容安全服务会返回该字段
"""
self.callback = callback
def setCallbackUrl(self, callbackUrl):
"""
:param callbackUrl: (Optional) 最大长度256,离线结果回调通知到客户的URL。主动回调数据接口超时时间设置为2s,为了保证顺利接收数据,需保证接收接口性能稳定并且保证幂等性。
"""
self.callbackUrl = callbackUrl
def setUniqueKey(self, uniqueKey):
"""
:param uniqueKey: (Optional) 最大长度64,客户个性化视频唯一性标识,传入后,将以此值作为重复检测依据,若不传,默认以URL作为查重依据,如果重复提交会被拒绝,返回报错信息请求重复,以及原提交taskID值,具体返回请查看响应示例
"""
self.uniqueKey = uniqueKey
def setScFrequency(self, scFrequency):
"""
:param scFrequency: (Optional) 最大长度64,客户个性化视频唯一性标识,传入后,将以此值作为重复检测依据,若不传,默认以URL作为查重依据,如果重复提交会被拒绝,返回报错信息请求重复,以及原提交taskID值,具体返回请查看响应示例
"""
self.scFrequency = scFrequency
def setAdvancedFrequency(self, advancedFrequency):
"""
:param advancedFrequency: (Optional) 高级截帧设置,此项填写,默认截帧策略失效
"""
self.advancedFrequency = advancedFrequency
```
#### File: censor/models/EvidenceItem.py
```python
class EvidenceItem(object):
def __init__(self, beginTime=None, endTime=None, type=None, url=None, censorSource=None, labels=None, frontPics=None, backPics=None):
"""
:param beginTime: (Optional) 证据开始相对时间,单位为毫秒,调用方获取后可自行格式化为可视化时间,如:149000 转换为"00:02:29"
:param endTime: (Optional) 证据结束相对时间,单位为毫秒,调用方获取后可自行格式化为可视化时间,如:149000 转换为"00:02:29"
:param type: (Optional) 1:图片,2:视频
:param url: (Optional) 证据信息
:param censorSource: (Optional) 审核来源,0:京东人审,1:客户人审,2:京东机审
:param labels: (Optional) 证据结果数组
:param frontPics: (Optional) 关联信息-命中前截图信息
:param backPics: (Optional) 关联信息-命中后截图信息
"""
self.beginTime = beginTime
self.endTime = endTime
self.type = type
self.url = url
self.censorSource = censorSource
self.labels = labels
self.frontPics = frontPics
self.backPics = backPics
```
#### File: censor/models/FaceContentItem.py
```python
class FaceContentItem(object):
def __init__(self, name=None, gender=None, age=None, type=None, category=None, x1=None, y1=None, x2=None, y2=None):
"""
:param name: (Optional) 人脸名字,不可识别则为空
:param gender: (Optional) 人脸性别,值为男(male)、女(female);不可识别则为空
:param age: (Optional) 人脸年龄,值为具体年龄(age);不可识别则为空
:param type: (Optional) 人脸类型,包含卡通脸(cartoon)、普通(normal)
:param category: (Optional) 人物分类,包含名人(star)、普通(normal)
:param x1: (Optional) 人脸位置信息,对应人脸矩形左上角横坐标相对坐标
:param y1: (Optional) 人脸位置信息,对应人脸矩形左上角纵坐标相对坐标
:param x2: (Optional) 人脸位置信息,对应人脸矩形右下角横坐标相对坐标
:param y2: (Optional) 人脸位置信息,对应人脸矩形右下角纵坐标相对坐标
"""
self.name = name
self.gender = gender
self.age = age
self.type = type
self.category = category
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
```
#### File: live/apis/UpdateLiveForwardTaskRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class UpdateLiveForwardTaskRequest(JDCloudRequest):
"""
更新直播拉流转推任务
"""
def __init__(self, parameters, header=None, version="v1"):
super(UpdateLiveForwardTaskRequest, self).__init__(
'/LiveForwardTask:update', 'POST', header, version)
self.parameters = parameters
class UpdateLiveForwardTaskParameters(object):
def __init__(self, taskId, ):
"""
:param taskId: 任务ID
"""
self.taskId = taskId
self.sourceUrl = None
self.pushUrl = None
self.startTime = None
self.endTime = None
self.callbackEvents = None
self.callbackUrl = None
self.name = None
def setSourceUrl(self, sourceUrl):
"""
:param sourceUrl: (Optional) 拉流地址
- 支持rtmp
"""
self.sourceUrl = sourceUrl
def setPushUrl(self, pushUrl):
"""
:param pushUrl: (Optional) 转推地址
- 支持rtmp
"""
self.pushUrl = pushUrl
def setStartTime(self, startTime):
"""
:param startTime: (Optional) 开始时间
- UTC时间, ISO8601示例:2021-07-26T08:08:08Z
- 不填表示立即开始
"""
self.startTime = startTime
def setEndTime(self, endTime):
"""
:param endTime: (Optional) 结束时间
- UTC时间, ISO8601示例:2021-07-26T08:08:08Z
- 最大支持365天,与开始时间间隔不超过7天。
- 不填拉不到流10分钟自动结束
"""
self.endTime = endTime
def setCallbackEvents(self, callbackEvents):
"""
:param callbackEvents: (Optional) 回调类型
- 不填发送全部回调
- TaskStart 任务开始
- TaskExit 任务结束
- callbackUrl非空的情况下,callbackEvents有效
"""
self.callbackEvents = callbackEvents
def setCallbackUrl(self, callbackUrl):
"""
:param callbackUrl: (Optional) 事件回调地址
"""
self.callbackUrl = callbackUrl
def setName(self, name):
"""
:param name: (Optional) 任务名称
- 最大255字符
"""
self.name = name
```
#### File: privatezone/apis/CreateZoneRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreateZoneRequest(JDCloudRequest):
"""
- 添加一个私有解析的zone,可添加以下三种类型的zone
- 云内全局zone:zone的后缀是指定的后缀,如:local。该域名在云内自动全局生效,不用关联vpc即可在vpc内解析,该类型全局唯一,不能重复添加
- 反向解析zone:zone的后缀是in-addr.arpa时,我们认为他是一个反向解析的zone,反向解析域名前缀目前支持10/172.16-31/192.168网段,如:10.in-addr.arpa、16.172.in-addr.arpa。反向解析的zone只能添加反向解析的记录
- 私有解析zone:该类型的zone可以时任意符合格式的域名,私有解析zone需要关联vpc后,在vpc内生效解析
"""
def __init__(self, parameters, header=None, version="v1"):
super(CreateZoneRequest, self).__init__(
'/regions/{regionId}/zones', 'POST', header, version)
self.parameters = parameters
class CreateZoneParameters(object):
def __init__(self, regionId, zone, instanceId, zoneType, ):
"""
:param regionId: 地域ID
:param zone: zone
:param instanceId: 购买的套餐实例ID
:param zoneType: 域名类型 LOCAL->云内全局 PTR->反向解析zone PV->私有zone
"""
self.regionId = regionId
self.zone = zone
self.instanceId = instanceId
self.zoneType = zoneType
self.retryRecurse = None
self.bindVpc = None
def setRetryRecurse(self, retryRecurse):
"""
:param retryRecurse: (Optional) 解析失败后是否进行递归解析
"""
self.retryRecurse = retryRecurse
def setBindVpc(self, bindVpc):
"""
:param bindVpc: (Optional) 绑定的vpc信息
"""
self.bindVpc = bindVpc
```
#### File: privatezone/models/DescribePacksRes.py
```python
class DescribePacksRes(object):
def __init__(self, packType=None, packName=None, price=None, sla=None, bindVpcNum=None, domainLevel=None, rrAuthorExport=None, rrNum=None, zoneLevel=None, zoneNumMin=None, zoneNumMax=None, durationMin=None, durationMax=None, durationUnit=None):
"""
:param packType: (Optional) 套餐类型
:param packName: (Optional) 套餐名称
:param price: (Optional) 价格
:param sla: (Optional) 服务级别协议
:param bindVpcNum: (Optional) 绑定vpc数量
:param domainLevel: (Optional) 域名等级
:param rrAuthorExport: (Optional) 导出解析记录权限
:param rrNum: (Optional) 解析记录数量
:param zoneLevel: (Optional) zone级别
:param zoneNumMin: (Optional) zone最小数量
:param zoneNumMax: (Optional) zone最大数量
:param durationMin: (Optional) 最小时长
:param durationMax: (Optional) 最大时长
:param durationUnit: (Optional) 时长单位
"""
self.packType = packType
self.packName = packName
self.price = price
self.sla = sla
self.bindVpcNum = bindVpcNum
self.domainLevel = domainLevel
self.rrAuthorExport = rrAuthorExport
self.rrNum = rrNum
self.zoneLevel = zoneLevel
self.zoneNumMin = zoneNumMin
self.zoneNumMax = zoneNumMax
self.durationMin = durationMin
self.durationMax = durationMax
self.durationUnit = durationUnit
```
#### File: privatezone/models/DescribeResourceRecordsRes.py
```python
class DescribeResourceRecordsRes(object):
def __init__(self, id=None, hostRecord=None, hostValue=None, recordType=None, ttl=None, priority=None, port=None, weight=None, createTime=None, updateTime=None, status=None):
"""
:param id: (Optional) 解析记录id
:param hostRecord: (Optional) 主机记录
:param hostValue: (Optional) 主机记录值
:param recordType: (Optional) 解析类型,目前支持类型 A AAAA CNAME TXT CAA SRV MX PTR
:param ttl: (Optional) TTL值
:param priority: (Optional) 优先级,只存在于MX, SRV解析记录类型
:param port: (Optional) 端口,只存在于SRV解析记录类型
:param weight: (Optional) 解析记录的权重
:param createTime: (Optional) 创建时间, UTC时间格式,例如2017-11-10T23:00:00Z
:param updateTime: (Optional)
:param status: (Optional) 解析状态 START->正在解析 STOP->停止解析
"""
self.id = id
self.hostRecord = hostRecord
self.hostValue = hostValue
self.recordType = recordType
self.ttl = ttl
self.priority = priority
self.port = port
self.weight = weight
self.createTime = createTime
self.updateTime = updateTime
self.status = status
```
#### File: redis/models/SpecInfo.py
```python
class SpecInfo(object):
def __init__(self, memoryGB=None, instanceClass=None, cpu=None, diskGB=None, maxConnection=None, bandwidthMbps=None, ipNumber=None, shard=None, azs=None):
"""
:param memoryGB: (Optional) 内存大小(GB)
:param instanceClass: (Optional) 实例规格,标准版不为空,4.0 自定义分片集群版规格为空,具体规格参考单分片规格
:param cpu: (Optional) 实例CPU核数,0表示自定义分片集群版规格,CPU核数由分片数变化
:param diskGB: (Optional) 实例磁盘大小(GB),0表示自定义分片集群版规格,磁盘大小由分片数变化
:param maxConnection: (Optional) 最大连接数,0表示自定义分片集群版规格,最大连接数由分片数变化
:param bandwidthMbps: (Optional) 带宽(Mbps),0表示自定义分片集群版规格,带宽由分片数变化
:param ipNumber: (Optional) 需要的IP数,0表示自定义分片集群版规格,IP数由分片数变化
:param shard: (Optional) 实例的分片列表信息,redis 2.8标准版、集群版以及redis 4.0标准版没有分片列表信息
:param azs: (Optional) az列表
"""
self.memoryGB = memoryGB
self.instanceClass = instanceClass
self.cpu = cpu
self.diskGB = diskGB
self.maxConnection = maxConnection
self.bandwidthMbps = bandwidthMbps
self.ipNumber = ipNumber
self.shard = shard
self.azs = azs
```
#### File: starshield/apis/CreatePageRuleRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreatePageRuleRequest(JDCloudRequest):
"""
创建页面规则
"""
def __init__(self, parameters, header=None, version="v1"):
super(CreatePageRuleRequest, self).__init__(
'/zones/{zone_identifier}/pagerules', 'POST', header, version)
self.parameters = parameters
class CreatePageRuleParameters(object):
def __init__(self, zone_identifier, ):
"""
:param zone_identifier:
"""
self.zone_identifier = zone_identifier
self.targets = None
self.actions = None
self.priority = None
self.status = None
def setTargets(self, targets):
"""
:param targets: (Optional) 根据请求评估的目标
"""
self.targets = targets
def setActions(self, actions):
"""
:param actions: (Optional) 如果此规则的目标与请求匹配,则要执行的操作集。操作可以将url重定向到另一个url或覆盖设置(但不能同时覆盖两者)
"""
self.actions = actions
def setPriority(self, priority):
"""
:param priority: (Optional) 一个数字,表示一个页面规则优先于另一个页面规则。
如果您可能有一个全面的页面规则(例如#1 “/images/”)
但是想要更具体的规则优先(例如#2 '/images/special/'),
您需要在后者(#2)上指定更高的优先级,以便它将覆盖第一个优先级。
"""
self.priority = priority
def setStatus(self, status):
"""
:param status: (Optional) 页面规则的状态
"""
self.status = status
```
#### File: starshield/apis/ListZonesRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ListZonesRequest(JDCloudRequest):
"""
列出、搜索、排序和筛选您的域
"""
def __init__(self, parameters, header=None, version="v1"):
super(ListZonesRequest, self).__init__(
'/zones', 'GET', header, version)
self.parameters = parameters
class ListZonesParameters(object):
def __init__(self, ):
"""
"""
self.match = None
self.name = None
self.__account__name = None
self.order = None
self.page = None
self.per_page = None
self.status = None
self.__account__id = None
self.direction = None
def setMatch(self, match):
"""
:param match: (Optional) 是否匹配所有搜索要求或至少一个(任何)
"""
self.match = match
def setName(self, name):
"""
:param name: (Optional) 域名
"""
self.name = name
def set__account__name(self, __account__name):
"""
:param __account__name: (Optional) 帐户名
"""
self.__account__name = __account__name
def setOrder(self, order):
"""
:param order: (Optional) 按字段对域进行排序
"""
self.order = order
def setPage(self, page):
"""
:param page: (Optional) 分页结果的页码
"""
self.page = page
def setPer_page(self, per_page):
"""
:param per_page: (Optional) 每页的域数
"""
self.per_page = per_page
def setStatus(self, status):
"""
:param status: (Optional) 域的状态
"""
self.status = status
def set__account__id(self, __account__id):
"""
:param __account__id: (Optional) 帐户标识符标签
"""
self.__account__id = __account__id
def setDirection(self, direction):
"""
:param direction: (Optional) asc - 升序;desc - 降序
"""
self.direction = direction
```
#### File: starshield/apis/UpdateDNSRecordRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class UpdateDNSRecordRequest(JDCloudRequest):
"""
"""
def __init__(self, parameters, header=None, version="v1"):
super(UpdateDNSRecordRequest, self).__init__(
'/zones/{zone_identifier}/dns_records/{identifier}', 'PUT', header, version)
self.parameters = parameters
class UpdateDNSRecordParameters(object):
def __init__(self, zone_identifier, identifier, name, content, ttl, ):
"""
:param zone_identifier:
:param identifier:
:param name: DNS记录名称
:param content: DNS记录内容
:param ttl: DNS记录的生存时间。值为1是 "自动"。
"""
self.zone_identifier = zone_identifier
self.identifier = identifier
self.ty_pe = None
self.name = name
self.content = content
self.ttl = ttl
self.proxied = None
self.priority = None
self.srvData = None
self.caaData = None
def setTy_pe(self, ty_pe):
"""
:param ty_pe: (Optional) DNS记录类型
"""
self.ty_pe = ty_pe
def setProxied(self, proxied):
"""
:param proxied: (Optional) 是否利用星盾的性能和安全优势
"""
self.proxied = proxied
def setPriority(self, priority):
"""
:param priority: (Optional) 如果是MX记录,该属性是必需的
"""
self.priority = priority
def setSrvData(self, srvData):
"""
:param srvData: (Optional)
"""
self.srvData = srvData
def setCaaData(self, caaData):
"""
:param caaData: (Optional)
"""
self.caaData = caaData
```
#### File: starshield/models/Certificate.py
```python
class Certificate(object):
def __init__(self, priority=None, expires_on=None, hosts=None, zone_id=None, status=None, geo_restrictions=None, modified_on=None, signature=None, issuer=None, id=None, uploaded_on=None, bundle_method=None):
"""
:param priority: (Optional) 在请求中使用证书的顺序/优先级。
:param expires_on: (Optional) 来自授权机构的证书过期时间
:param hosts: (Optional)
:param zone_id: (Optional) 域标识符标签
:param status: (Optional) 域的自定义SSL的状态
:param geo_restrictions: (Optional)
:param modified_on: (Optional) 上次修改证书的时间
:param signature: (Optional) 用于证书的哈希类型
:param issuer: (Optional) 颁发证书的证书颁发机构
:param id: (Optional) 自定义证书标识符标签
:param uploaded_on: (Optional) 证书上载到星盾的时间
:param bundle_method: (Optional) SSL泛捆绑在各处有着最高的概率被验证,甚至能被使用过时的或不寻常的信任存储的客户端验证。
最佳捆绑使用最短的认证链和最新的中间证书。
而强制捆绑会验证证书链,但不以其他方式修改证书链。
"""
self.priority = priority
self.expires_on = expires_on
self.hosts = hosts
self.zone_id = zone_id
self.status = status
self.geo_restrictions = geo_restrictions
self.modified_on = modified_on
self.signature = signature
self.issuer = issuer
self.id = id
self.uploaded_on = uploaded_on
self.bundle_method = bundle_method
```
#### File: starshield/models/DescribePopIpRes.py
```python
class DescribePopIpRes(object):
def __init__(self, ip=None, coloId=None, domains=None, ipIsptype=None):
"""
:param ip: (Optional) ip
:param coloId: (Optional) pop 节点id
:param domains: (Optional) ip对应域名信息
:param ipIsptype: (Optional) ip对应运营商信息
"""
self.ip = ip
self.coloId = coloId
self.domains = domains
self.ipIsptype = ipIsptype
```
#### File: starshield/models/Plan.py
```python
class Plan(object):
def __init__(self, id=None, name=None, price=None, currency=None, frequency=None, legacy_id=None, is_subscribed=None, can_subscribe=None):
"""
:param id: (Optional)
:param name: (Optional)
:param price: (Optional)
:param currency: (Optional)
:param frequency: (Optional)
:param legacy_id: (Optional)
:param is_subscribed: (Optional)
:param can_subscribe: (Optional)
"""
self.id = id
self.name = name
self.price = price
self.currency = currency
self.frequency = frequency
self.legacy_id = legacy_id
self.is_subscribed = is_subscribed
self.can_subscribe = can_subscribe
```
#### File: starshield/models/Totals.py
```python
class Totals(object):
def __init__(self, since=None, until=None, requests=None, bandwidth=None):
"""
:param since: (Optional)
:param until: (Optional)
:param requests: (Optional)
:param bandwidth: (Optional)
"""
self.since = since
self.until = until
self.requests = requests
self.bandwidth = bandwidth
```
#### File: vm/apis/ModifyInstanceAttributeRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ModifyInstanceAttributeRequest(JDCloudRequest):
"""
修改一台云主机的属性。
详细操作说明请参考帮助文档:
[修改实例名称](https://docs.jdcloud.com/cn/virtual-machines/modify-instance-name)
[自定义数据](https://docs.jdcloud.com/cn/virtual-machines/userdata)
[实例元数据](https://docs.jdcloud.com/cn/virtual-machines/instance-metadata)
## 接口说明
- 支持修改实例的名称、描述、hostname、自定义数据、实例元数据。
"""
def __init__(self, parameters, header=None, version="v1"):
super(ModifyInstanceAttributeRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}:modifyInstanceAttribute', 'POST', header, version)
self.parameters = parameters
class ModifyInstanceAttributeParameters(object):
def __init__(self, regionId, instanceId, ):
"""
:param regionId: 地域ID。
:param instanceId: 云主机ID。
"""
self.regionId = regionId
self.instanceId = instanceId
self.name = None
self.description = None
self.hostname = None
self.metadata = None
self.userdata = None
def setName(self, name):
"""
:param name: (Optional) 实例名称。长度为2\~128个字符,只允许中文、数字、大小写字母、英文下划线(\_)、连字符(-)及点(.),不能以(.)作为首尾。
"""
self.name = name
def setDescription(self, description):
"""
:param description: (Optional) 实例描述。256字符以内。
"""
self.description = description
def setHostname(self, hostname):
"""
:param hostname: (Optional) 实例hostname。
**Windows系统**:长度为2\~15个字符,允许大小写字母、数字或连字符(-),不能以连字符(-)开头或结尾,不能连续使用连字符(-),也不能全部使用数字。不支持点号(.)。
**Linux系统**:长度为2-64个字符,允许支持多个点号,点之间为一段,每段允许使用大小写字母、数字或连字符(-),但不能连续使用点号(.)或连字符(-),不能以点号(.)或连字符(-)开头或结尾。
"""
self.hostname = hostname
def setMetadata(self, metadata):
"""
:param metadata: (Optional) 用户自定义元数据。
以 `key-value` 键值对形式指定,可在实例系统内通过元数据服务查询获取。最多支持40对键值对,且 `key` 不超过256字符,`value` 不超过16KB,不区分大小写。
注意:`key` 不要以连字符(-)结尾,否则此 `key` 不生效。
"""
self.metadata = metadata
def setUserdata(self, userdata):
"""
:param userdata: (Optional) 自定义脚本。
目前仅支持启动脚本,即 `launch-script`,须Base64编码且编码前数据长度不能超过16KB。
**linux系统**:支持bash和python,编码前须分别以 `#!/bin/bash` 和 `#!/usr/bin/env python` 作为内容首行。
**Windows系统**:支持 `bat` 和 `powershell` ,编码前须分别以 `<cmd></cmd>和<powershell></powershell>` 作为内容首、尾行。
"""
self.userdata = userdata
```
#### File: vm/models/UpdateInstanceTemplateSpec.py
```python
class UpdateInstanceTemplateSpec(object):
def __init__(self, instanceType=None, imageId=None, password=<PASSWORD>, keyNames=None, metadata=None, userdata=None, elasticIp=None, primaryNetworkInterface=None, systemDisk=None, dataDisks=None, chargeOnStopped=None, autoImagePolicyId=None, passWordAuth=None, imageInherit=None, noPassword=None, noElasticIp=None):
"""
:param instanceType: (Optional) 实例规格,可查询 [DescribeInstanceTypes](https://docs.jdcloud.com/virtual-machines/api/describeinstancetypes) 接口获得指定地域或可用区的规格信息。
:param imageId: (Optional) 镜像ID,可查询 [DescribeImages](https://docs.jdcloud.com/virtual-machines/api/describeimages) 接口获得指定地域的镜像信息。
:param password: (Optional) 实例密码。可用于SSH登录和VNC登录。长度为8\~30个字符,必须同时包含大、小写英文字母、数字和特殊符号中的三类字符。特殊符号包括:\(\)\`~!@#$%^&\*\_-+=\|{}\[ ]:";'<>,.?/,更多密码输入要求请参见 [公共参数规范](https://docs.jdcloud.com/virtual-machines/api/general_parameters)。
如指定密钥且 `passwordAuth` 设置为 `true`,则密码不会生成注入,否则即使不指定密码系统也将默认自动生成随机密码,并以短信和邮件通知。
:param keyNames: (Optional) 密钥对名称。仅Linux系统下该参数生效,当前仅支持输入单个密钥。如指定了该参数则覆盖原有参数。
:param metadata: (Optional) 用户自定义元数据。以key-value键值对形式指定,可在实例系统内通过元数据服务查询获取。最多支持20对键值对,且key不超过256字符,value不超过16KB,不区分大小写。
注意:key以连字符(-)结尾,表示要删除该key。
:param userdata: (Optional) 自定义脚本。目前仅支持启动脚本,即 `launch-script`,须 `base64` 编码且编码前数据长度不能超过16KB。
**linux系统**:支持 `bash` 和 `python`,编码前须分别以 `#!/bin/bash` 和 `#!/usr/bin/env python` 作为内容首行。
**Windows系统**:支持 `bat` 和 `powershell`,编码前须分别以 `<cmd></cmd>和<powershell></powershell>` 作为内容首、尾行。
:param elasticIp: (Optional) 主网卡主IP关联的弹性公网IP配置。如指定了该参数则覆盖原有参数。
:param primaryNetworkInterface: (Optional) 主网卡配置。如指定了该参数则覆盖原有参数。
:param systemDisk: (Optional) 系统盘配置。如指定了该参数则覆盖原有参数。
:param dataDisks: (Optional) 数据盘配置。单实例最多可挂载云硬盘(系统盘+数据盘)的数量受实例规格的限制。如指定了该参数则覆盖原有参数。
:param chargeOnStopped: (Optional) 停机不计费模式。该参数仅对按配置计费且系统盘为云硬盘的实例生效,并且不是专有宿主机中的实例。配置停机不计费且停机后,实例部分将停止计费,且释放实例自身包含的资源(CPU/内存/GPU/本地数据盘)。
可选值:
`keepCharging`(默认值):停机后保持计费,不释放资源。
`stopCharging`:停机后停止计费,释放实例资源。
:param autoImagePolicyId: (Optional) 自动任务策略ID。
:param passWordAuth: (Optional) 是否允许SSH密码登录。
`yes`:允许SSH密码登录。
`no`:禁止SSH密码登录。
仅在指定密钥时此参数有效,指定此参数后密码即使输入也将被忽略,同时会在系统内禁用SSH密码登录。
:param imageInherit: (Optional) 是否使用镜像中的登录凭证,不再指定密码或密钥。
`yes`:使用镜像登录凭证。
`no`(默认值):不使用镜像登录凭证。
仅使用私有或共享镜像时此参数有效。若指定`imageInherit=yes`则指定的密码或密钥将无效。
:param noPassword: (Optional) 传 `true` 则会清空实例模板配置的密码。
:param noElasticIp: (Optional) 传 `true` 则会清空实例模板配置的公网IP。
"""
self.instanceType = instanceType
self.imageId = imageId
self.password = password
self.keyNames = keyNames
self.metadata = metadata
self.userdata = userdata
self.elasticIp = elasticIp
self.primaryNetworkInterface = primaryNetworkInterface
self.systemDisk = systemDisk
self.dataDisks = dataDisks
self.chargeOnStopped = chargeOnStopped
self.autoImagePolicyId = autoImagePolicyId
self.passWordAuth = passWordAuth
self.imageInherit = imageInherit
self.noPassword = noPassword
self.noElasticIp = noElasticIp
```
#### File: vod/models/MediaClip.py
```python
class MediaClip(object):
def __init__(self, mediaId=None, mediaIn=None, mediaOut=None, timelineIn=None, timelineOut=None, operations=None):
"""
:param mediaId: (Optional) 素材ID,此处,必须为视频点播媒资的视频ID
:param mediaIn: (Optional) 素材片段在媒资中的入点
:param mediaOut: (Optional) 素材片段在媒资中的出点
:param timelineIn: (Optional) 素材片段在合成时间线中的入点
:param timelineOut: (Optional) 素材片段在合成时间线中的出点
:param operations: (Optional)
"""
self.mediaId = mediaId
self.mediaIn = mediaIn
self.mediaOut = mediaOut
self.timelineIn = timelineIn
self.timelineOut = timelineOut
self.operations = operations
```
#### File: waf/models/AntiEvent.py
```python
class AntiEvent(object):
def __init__(self, remoteAddr=None, csaInfo=None, riskLevel=None, area=None, accessTime=None, method=None, attackType=None, url=None, payLoad=None, action=None, ruleName=None, logId=None, isReported=None, wafInstanceId=None, antiStatus=None, upstreamErr=None, skipExist=None, denyExist=None):
"""
:param remoteAddr: (Optional) 源ip
:param csaInfo: (Optional) 情报标签
:param riskLevel: (Optional) 风险等级
:param area: (Optional) 来源地区
:param accessTime: (Optional) 产生时间
:param method: (Optional) 方法
:param attackType: (Optional) 攻击类型
:param url: (Optional) url
:param payLoad: (Optional) 恶意负载
:param action: (Optional) 动作
:param ruleName: (Optional) 规则名称
:param logId: (Optional) 日志Id
:param isReported: (Optional) 该信息是否已上报AI平台,0表示否
:param wafInstanceId: (Optional) 实例id
:param antiStatus: (Optional) 状态码
:param upstreamErr: (Optional) 状态标识
:param skipExist: (Optional) 是否已加入白名单,0表示否
:param denyExist: (Optional) 是否已加入黑名单,0表示否
"""
self.remoteAddr = remoteAddr
self.csaInfo = csaInfo
self.riskLevel = riskLevel
self.area = area
self.accessTime = accessTime
self.method = method
self.attackType = attackType
self.url = url
self.payLoad = payLoad
self.action = action
self.ruleName = ruleName
self.logId = logId
self.isReported = isReported
self.wafInstanceId = wafInstanceId
self.antiStatus = antiStatus
self.upstreamErr = upstreamErr
self.skipExist = skipExist
self.denyExist = denyExist
```
#### File: waf/models/DomainMainConfig.py
```python
class DomainMainConfig(object):
def __init__(self, domain=None, cname=None, certName=None, protocols=None, sslProtocols=None, pureClient=None, httpStatus=None, httpsCertUpdateStatus=None, gmHttpsCertUpdateStatus=None, gmCertSupport=None, antiStatus=None, disableWaf=None, attackInfo=None, dnsStatus=None, enableCname2Rs=None, enableIpv6=None, region=None):
"""
:param domain: (Optional) 域名
:param cname: (Optional) cname域名
:param certName: (Optional) 绑定的证书名称
:param protocols: (Optional) 使用协议,["http","https"]
:param sslProtocols: (Optional) ssl协议,["TLSv1","TLSv1.1","TLSv1.2","SSLv2","SSLv3"]
:param pureClient: (Optional) 前置代理,1:使用 0:不使用
:param httpStatus: (Optional) 协议状态,0:正常
:param httpsCertUpdateStatus: (Optional) https证书绑定状态
:param gmHttpsCertUpdateStatus: (Optional) 国密https证书绑定状态
:param gmCertSupport: (Optional) 是否支持国密证书
:param antiStatus: (Optional) 防护状态,0:关闭 1:开启
:param disableWaf: (Optional) 1:bypass 0:防护模式
:param attackInfo: (Optional) 近七天攻击详情
:param dnsStatus: (Optional) 网站dns配置
:param enableCname2Rs: (Optional) cname解析状态。0为解析到VIP,1为解析到回源地址
:param enableIpv6: (Optional) cname解析状态。0为解析到VIP,1为解析到回源地址
:param region: (Optional) 域名的地域信息,类型是map[string]regionVipInfo
"""
self.domain = domain
self.cname = cname
self.certName = certName
self.protocols = protocols
self.sslProtocols = sslProtocols
self.pureClient = pureClient
self.httpStatus = httpStatus
self.httpsCertUpdateStatus = httpsCertUpdateStatus
self.gmHttpsCertUpdateStatus = gmHttpsCertUpdateStatus
self.gmCertSupport = gmCertSupport
self.antiStatus = antiStatus
self.disableWaf = disableWaf
self.attackInfo = attackInfo
self.dnsStatus = dnsStatus
self.enableCname2Rs = enableCname2Rs
self.enableIpv6 = enableIpv6
self.region = region
```
#### File: waf/models/UserPolicyInfo.py
```python
class UserPolicyInfo(object):
def __init__(self, id=None, name=None, count=None, domainCnt=None, autoAdd=None, importLevel=None, ruleIds=None, updateTime=None, wafDomains=None):
"""
:param id: (Optional) 规则组id
:param name: (Optional) 规则组名字
:param count: (Optional) 规则组里的规则个数
:param domainCnt: (Optional) 规则组应用的域名个数
:param autoAdd: (Optional) 规则是否自动更新, 0/1
:param importLevel: (Optional) 导入规则集的等级,0/1/2/-1
:param ruleIds: (Optional) 规则id
:param updateTime: (Optional) 更新时间
:param wafDomains: (Optional) 自定义规则集应用的域名信息
"""
self.id = id
self.name = name
self.count = count
self.domainCnt = domainCnt
self.autoAdd = autoAdd
self.importLevel = importLevel
self.ruleIds = ruleIds
self.updateTime = updateTime
self.wafDomains = wafDomains
```
#### File: waf/models/UsrBotRules.py
```python
class UsrBotRules(object):
def __init__(self, id=None, ruleName=None, detectThrsd=None, detectPeriod=None, matchItems=None, action=None, disable=None, updateTime=None, status=None, ststhrst=None, ststhrstRatio=None, statusDisable=None, dateDisable=None, unit=None, blockTime=None):
"""
:param id: (Optional) 规则id
:param ruleName: (Optional) 规则名
:param detectThrsd: (Optional) 次数阈值
:param detectPeriod: (Optional) 检测时长,秒
:param matchItems: (Optional) 匹配条件集,总长度不能超过4096
:param action: (Optional) 动作配置,默认为告警
:param disable: (Optional) 0-使用中 1-禁用
:param updateTime: (Optional) 更新时间
:param status: (Optional) 响应状态码
:param ststhrst: (Optional) 状态码数量阀值
:param ststhrstRatio: (Optional) 状态码比例阀值
:param statusDisable: (Optional) 响应码功能是否启用
:param dateDisable: (Optional) 规则生效时间是否启用
:param unit: (Optional) 统计维度
:param blockTime: (Optional) 持续时间, 单位分钟,范围[1-24*60]
"""
self.id = id
self.ruleName = ruleName
self.detectThrsd = detectThrsd
self.detectPeriod = detectPeriod
self.matchItems = matchItems
self.action = action
self.disable = disable
self.updateTime = updateTime
self.status = status
self.ststhrst = ststhrst
self.ststhrstRatio = ststhrstRatio
self.statusDisable = statusDisable
self.dateDisable = dateDisable
self.unit = unit
self.blockTime = blockTime
```
|
{
"source": "jdcloud-cmw/jdsf-demo-python",
"score": 2
}
|
#### File: demo_server/loadbalance/registryservice.py
```python
import datetime
import random
import threading
import consul
from loadbalance.consulconfig import ConsulConfig, ConsulDiscoverConfig, AppConfig
from tool.networktool import *
service_cache = {}
def reload_service_cache():
if service_cache is not None and len(service_cache)>0:
for key in service_cache.keys():
RegistryService.load_remote_service_cache(key)
class RegistryService(object):
def __init__(self):
consul_discover_config = ConsulDiscoverConfig.load_config()
app_config = AppConfig.load_config()
self.consul_discover_config = consul_discover_config
self.app_config = app_config
def registry_service(self, consul_schema=None, consul_address=None, consul_port=None):
consul_client = RegistryService.get_consul_client(consul_schema, consul_address, consul_port)
if self.consul_discover_config.prefer_ip_address:
if self.app_config.service_ip_address is not None:
service_ip_address = self.app_config.service_ip_address
else:
service_ip_address = get_host_ip()
else:
service_ip_address = get_host_name()
service_port = self.app_config.service_port
health_check_url = self.consul_discover_config.health_check_url
service_name = self.consul_discover_config.service_name
if service_name is None:
service_name = self.app_config.service_name
instance_id = self.consul_discover_config.instance_id
if instance_id is None:
instance_id = service_name + str(service_port) + random.uniform(1, 20)
check_url = "http://" + service_ip_address + ":" + str(service_port) + health_check_url
check = consul.Check.http(check_url, "10s")
consul_client.agent.service.register(service_name, instance_id,
address=service_ip_address,
port=service_port,
check=check)
timer = threading.Timer(5, reload_service_cache)
timer.start()
@classmethod
def get_consul_client(cls, consul_schema=None, consul_address=None, consul_port=None):
consul_config = ConsulConfig.load_config()
if consul_schema is not None:
consul_config.schema = consul_schema
if consul_address is not None:
consul_config.address = consul_address
if consul_port is not None:
consul_config.port = consul_port
host = consul_config.address
scheme = consul_config.schema
port = consul_config.port
consul_client = consul.Consul(host, port, None, scheme)
return consul_client
@classmethod
def load_remote_service_cache(cls, service_name):
health_service = RegistryService.get_health_service(service_name)
if health_service is not None and len(health_service[1])>0:
cache_service_info = CacheServiceInfo(service_name=service_name)
for service in health_service[1]:
health_service_info = HealthServiceInfo(service_name=service['Service']['Service'],
instance_id=service['Service']['ID'],
service_host=service['Service']['Address'],
service_port=service['Service']['Port'])
cache_service_info.add_service(health_service_info)
service_cache[service_name] = cache_service_info
@classmethod
def get_health_service(cls,service_name):
consul_client = RegistryService.get_consul_client()
health_service = consul_client.health.service(service_name, passing=True)
if health_service is not None:
return health_service
class HealthServiceInfo(object):
def __init__(self, service_name, instance_id, service_host, service_port):
self.service_name = service_name
self.instance_id = instance_id
self.service_host = service_host
self.service_port = service_port
def __str__(self):
return 'HealthServiceInfo({service_name},{instance_id},{service_host},{service_port})'.format(
service_name=self.service_name,
service_host=self.service_host,
service_port=self.service_port,
instance_id=self.instance_id
)
class CacheServiceInfo(object):
def __init__(self, service_name):
self.update_time = datetime.datetime.now()
self.service_name = service_name
self.services = []
def add_service(self, health_service_info):
self.services.append(health_service_info)
def get_service(self):
service_instance_count = len(self.services)
if service_instance_count > 0:
index = random.uniform(0, service_instance_count)
service = self.services[int(index)]
return service
else:
health_service = RegistryService.get_health_service(self.service_name)
if health_service is not None:
self.services = []
for service in health_service[1]:
health_service_info = HealthServiceInfo(service_name=service['Service']['Service'],
instance_id=service['Service']['ID'],
service_host=service['Service']['Address'],
service_port=service['Service']['Port'])
self.update_time = datetime.datetime.now()
self.services.append(health_service_info)
index = random.uniform(0, len(self.services))
return self.services[int(index)]
else:
return None
def __str__(self):
return 'CacheServiceInfo({service_name},{update_time},{services})'.format(
service_name=self.service_name,
update_time=self.update_time,
services=self.services
)
```
#### File: demo_server/opentracer/config.py
```python
import logging
import yaml
from jaeger_client import Config
from loadbalance.consulconfig import AppConfig
class OpenTraceConfig(object):
def __init__(self, simple_type, simple_rate, trace_udp_address,
trace_udp_port, trace_http_address, trace_http_port):
self.simple_type = simple_type
self.simple_rate = simple_rate
self.trace_udp_address = trace_udp_address
self.trace_udp_port = trace_udp_port
self.trace_http_address = trace_http_address
self.trace_http_port = trace_http_port
def __str__(self):
return 'OpenTraceConfig({simple_type},{simple_rate},{trace_udp_address},{trace_udp_port},' \
'{trace_http_address},{trace_http_port})'.format(simple_type=self.simple_type,
simple_rate=self.simple_rate,
trace_udp_address=self.trace_udp_address,
trace_udp_port=self.trace_udp_port,
trace_http_address=self.trace_http_address,
trace_http_port=self.trace_http_port)
@classmethod
def from_yaml_dic(cls, yaml_load_dic):
trace_udp_address = ""
trace_udp_port = 0
trace_http_address = ""
trace_http_port = 0
simple_type = yaml_load_dic['trace']['config']['simpleType']
simple_rate = yaml_load_dic['trace']['config']['simpleRate']
if 'traceUDPAddress' in yaml_load_dic['trace']['config'].keys():
trace_udp_address = yaml_load_dic['trace']['config']['traceUDPAddress']
if 'traceUDPPort' in yaml_load_dic['trace']['config'].keys():
trace_udp_port = yaml_load_dic['trace']['config']['traceUDPPort']
if 'traceHttpAddress' in yaml_load_dic['trace']['config'].keys():
trace_http_address = yaml_load_dic['trace']['config']['traceHttpAddress']
if 'traceHttpPort' in yaml_load_dic['trace']['config'].keys():
trace_http_port = yaml_load_dic['trace']['config']['traceHttpPort']
return OpenTraceConfig(simple_type, simple_rate, trace_udp_address, trace_udp_port, trace_http_address,
trace_http_port)
@classmethod
def load_config(cls, config_yaml_path='./config/appConfig.yaml'):
f = open(config_yaml_path)
y = yaml.load(f, Loader=yaml.SafeLoader)
return OpenTraceConfig.from_yaml_dic(y)
def initialize_tracer(config_file_path):
app_config = AppConfig.load_config(config_file_path)
if config_file_path is None:
trace_config = OpenTraceConfig.load_config()
else:
trace_config = OpenTraceConfig.load_config(config_file_path)
log_level = logging.DEBUG
logging.getLogger('').handlers = []
logging.basicConfig(format='%(asctime)s %(message)s', level=log_level)
# Create configuration object with enabled logging and sampling of all requests.
config = Config(config={'sampler': {'type': trace_config.simple_type, 'param': trace_config.simple_rate},
'logging': True,
'local_agent':
# Also, provide a hostname of Jaeger instance to send traces to.
{'reporting_host': trace_config.trace_udp_address}},
# Service name can be arbitrary string describing this particular web service.
service_name=app_config.service_name)
return config.initialize_tracer()
```
|
{
"source": "jdcloud-demo/jdcloud-sdk-python",
"score": 2
}
|
#### File: cdn/models/SslCertModel.py
```python
class SslCertModel(object):
def __init__(self, certId=None, certName=None, commonName=None, certType=None, startTime=None, endTime=None, deletable=None, digest=None, aliasName=None, dnsNames=None, downloadable=None):
"""
:param certId: (Optional) 证书Id
:param certName: (Optional) 证书名称
:param commonName: (Optional) 绑定域名
:param certType: (Optional) 证书类型
:param startTime: (Optional) 开始时间
:param endTime: (Optional) 结束时间
:param deletable: (Optional) 是否允许被删除,1允许,0不允许
:param digest: (Optional) 对私钥文件使用sha256算法计算的摘要信息
:param aliasName: (Optional) 证书别名
:param dnsNames: (Optional) 域名
:param downloadable: (Optional) 是否允许被下载,0->不允许,1->允许
"""
self.certId = certId
self.certName = certName
self.commonName = commonName
self.certType = certType
self.startTime = startTime
self.endTime = endTime
self.deletable = deletable
self.digest = digest
self.aliasName = aliasName
self.dnsNames = dnsNames
self.downloadable = downloadable
```
#### File: iothub/models/InstanceDetailVO.py
```python
class InstanceDetailVO(object):
def __init__(self, id=None, name=None, subNetId=None, subNetName=None, vpcId=None, vpcName=None, azName=None, pubDomain=None, priDomain=None, status=None, createTime=None, description=None, region=None, rdsInstanceId=None, rdsDatabase=None, rdsHostName=None, rdsUserName=None, jcqId=None, jcqAccessPoint=None, jcqTopicName=None, jcqTopicType=None, jcqRegion=None, ak=None, sk=None):
"""
:param id: (Optional)
:param name: (Optional)
:param subNetId: (Optional)
:param subNetName: (Optional)
:param vpcId: (Optional)
:param vpcName: (Optional)
:param azName: (Optional)
:param pubDomain: (Optional)
:param priDomain: (Optional)
:param status: (Optional)
:param createTime: (Optional)
:param description: (Optional)
:param region: (Optional)
:param rdsInstanceId: (Optional)
:param rdsDatabase: (Optional)
:param rdsHostName: (Optional)
:param rdsUserName: (Optional)
:param jcqId: (Optional)
:param jcqAccessPoint: (Optional)
:param jcqTopicName: (Optional)
:param jcqTopicType: (Optional)
:param jcqRegion: (Optional)
:param ak: (Optional) ak
:param sk: (Optional) sk
"""
self.id = id
self.name = name
self.subNetId = subNetId
self.subNetName = subNetName
self.vpcId = vpcId
self.vpcName = vpcName
self.azName = azName
self.pubDomain = pubDomain
self.priDomain = priDomain
self.status = status
self.createTime = createTime
self.description = description
self.region = region
self.rdsInstanceId = rdsInstanceId
self.rdsDatabase = rdsDatabase
self.rdsHostName = rdsHostName
self.rdsUserName = rdsUserName
self.jcqId = jcqId
self.jcqAccessPoint = jcqAccessPoint
self.jcqTopicName = jcqTopicName
self.jcqTopicType = jcqTopicType
self.jcqRegion = jcqRegion
self.ak = ak
self.sk = sk
```
#### File: ipanti/models/CertInfoModifySpec.py
```python
class CertInfoModifySpec(object):
def __init__(self, certId=None, httpsCertContent=None, httpsRsaKey=None):
"""
:param certId: (Optional) 证书 Id
- 如果传 certId, 请确认已经上传了相应的证书
- certId 缺省时网站规则将使用 httpsCertContent, httpsRsaKey 对应的证书
:param httpsCertContent: (Optional) 证书内容
:param httpsRsaKey: (Optional) 私钥
"""
self.certId = certId
self.httpsCertContent = httpsCertContent
self.httpsRsaKey = httpsRsaKey
```
#### File: kubernetes/models/MasterVersion.py
```python
class MasterVersion(object):
def __init__(self, version=None, isDefault=None, defaultNodeVersion=None, versionStatus=None, nodeVersions=None):
"""
:param version: (Optional) 集群版本号
:param isDefault: (Optional) 是否默认版本
:param defaultNodeVersion: (Optional) 默认ndoe版本号
:param versionStatus: (Optional) 版本状态
:param nodeVersions: (Optional) node 节点的配置
"""
self.version = version
self.isDefault = isDefault
self.defaultNodeVersion = defaultNodeVersion
self.versionStatus = versionStatus
self.nodeVersions = nodeVersions
```
#### File: monitor/apis/DescribeMetricDataRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeMetricDataRequest(JDCloudRequest):
"""
查看某资源多个监控项数据,metric介绍1:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeMetricDataRequest, self).__init__(
'/regions/{regionId}/metrics/{metric}/metricData', 'GET', header, version)
self.parameters = parameters
class DescribeMetricDataParameters(object):
def __init__(self, regionId, metric, serviceCode, resourceId):
"""
:param regionId: 地域 Id
:param metric: 监控项英文标识(id)
:param serviceCode: 资源的类型,取值vm, lb, ip, database 等
:param resourceId: 资源的uuid
"""
self.regionId = regionId
self.metric = metric
self.aggrType = None
self.downSampleType = None
self.startTime = None
self.endTime = None
self.timeInterval = None
self.tags = None
self.groupBy = None
self.rate = None
self.serviceCode = serviceCode
self.resourceId = resourceId
def setAggrType(self, aggrType):
"""
:param aggrType: (Optional) 聚合方式,默认等于downSampleType或avg,可选值参考:sum、avg、last、min、max
"""
self.aggrType = aggrType
def setDownSampleType(self, downSampleType):
"""
:param downSampleType: (Optional) 采样方式,默认等于aggrType或avg,可选值参考:sum、avg、last、min、max
"""
self.downSampleType = downSampleType
def setStartTime(self, startTime):
"""
:param startTime: (Optional) 查询时间范围的开始时间, UTC时间,格式:2016-12-11T00:00:00+0800(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800)
"""
self.startTime = startTime
def setEndTime(self, endTime):
"""
:param endTime: (Optional) 查询时间范围的结束时间, UTC时间,格式:2016-12-11T00:00:00+0800(为空时,将由startTime与timeInterval计算得出)(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800)
"""
self.endTime = endTime
def setTimeInterval(self, timeInterval):
"""
:param timeInterval: (Optional) 时间间隔:1h,6h,12h,1d,3d,7d,14d,固定时间间隔,timeInterval默认为1h,当前时间往 前1h
"""
self.timeInterval = timeInterval
def setTags(self, tags):
"""
:param tags: (Optional) 监控指标数据的维度信息,根据tags来筛选指标数据不同的维度
"""
self.tags = tags
def setGroupBy(self, groupBy):
"""
:param groupBy: (Optional) 是否对查询的tags分组
"""
self.groupBy = groupBy
def setRate(self, rate):
"""
:param rate: (Optional) 是否求速率
"""
self.rate = rate
```
#### File: monitor/models/SiteMonitorHttpOption.py
```python
class SiteMonitorHttpOption(object):
def __init__(self, cookie=None, header=None, method=None, reqContent=None, resCheck=None):
"""
:param cookie: (Optional)
:param header: (Optional)
:param method: (Optional)
:param reqContent: (Optional)
:param resCheck: (Optional)
"""
self.cookie = cookie
self.header = header
self.method = method
self.reqContent = reqContent
self.resCheck = resCheck
```
#### File: rds/apis/CreateROInstanceRequest.py
```python
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreateROInstanceRequest(JDCloudRequest):
"""
创建MySQL的只读实例<br>- 仅支持MySQL
"""
def __init__(self, parameters, header=None, version="v1"):
super(CreateROInstanceRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}:createROInstance', 'POST', header, version)
self.parameters = parameters
class CreateROInstanceParameters(object):
def __init__(self, regionId, instanceId, instanceName, instanceClass, instanceStorageGB, azId, ):
"""
:param regionId: 地域代码,取值范围参见[《各地域及可用区对照表》](../Enum-Definitions/Regions-AZ.md)
:param instanceId: RDS 实例ID,唯一标识一个RDS实例
:param instanceName: 实例名称,具体规则可参见帮助中心文档:[名称及密码限制](../../../documentation/Database-and-Cache-Service/RDS/Introduction/Restrictions/SQLServer-Restrictions.md)
:param instanceClass: 只读实例规格FlavorId
:param instanceStorageGB: 磁盘空间
:param azId: 可用区ID
"""
self.regionId = regionId
self.instanceId = instanceId
self.instanceName = instanceName
self.instanceClass = instanceClass
self.instanceStorageType = None
self.instanceStorageGB = instanceStorageGB
self.azId = azId
self.vpcId = None
self.subnetId = None
self.parameterGroup = None
self.storageEncrypted = None
self.count = None
def setInstanceStorageType(self, instanceStorageType):
"""
:param instanceStorageType: (Optional) 存储类型,参见[枚举参数定义](../Enum-Definitions/Enum-Definitions.md),缺省值为:LOCAL_SSD
"""
self.instanceStorageType = instanceStorageType
def setVpcId(self, vpcId):
"""
:param vpcId: (Optional) VPC的ID,如果没有填写就保持和常规实例一样的VPC
"""
self.vpcId = vpcId
def setSubnetId(self, subnetId):
"""
:param subnetId: (Optional) 子网ID,如果没有填写就保持和常规实例一样的subnet
"""
self.subnetId = subnetId
def setParameterGroup(self, parameterGroup):
"""
:param parameterGroup: (Optional) 参数组ID,缺省采用和常规实例一样的参数组
"""
self.parameterGroup = parameterGroup
def setStorageEncrypted(self, storageEncrypted):
"""
:param storageEncrypted: (Optional) 实例数据加密(存储类型为云硬盘才支持数据加密)。false:不加密;true:加密。缺省为false。
"""
self.storageEncrypted = storageEncrypted
def setCount(self, count):
"""
:param count: (Optional) 创建只读实例的数目,缺省为1
"""
self.count = count
```
#### File: renewal/models/QueryInstanceParam.py
```python
class QueryInstanceParam(object):
def __init__(self, appCode, serviceCode, pageNumber=None, pageSize=None, renewStatus=None, expireTime=None, instanceName=None, instanceId=None, ipAddress=None, billingType=None):
"""
:param appCode: 业务线
:param serviceCode: 产品线
:param pageNumber: (Optional) 当前页码
:param pageSize: (Optional) 每页条数
:param renewStatus: (Optional) 资源续费状态 0:手动续费资源 1:全部资源 2:自动续费资源,默认全部
:param expireTime: (Optional) 到期时间 0:已过期,n:n天内到期,-1:全部,-2:未到期,默认全部
:param instanceName: (Optional) 资源名称
:param instanceId: (Optional) 资源ID
:param ipAddress: (Optional) 主机绑定的内网ip地址
:param billingType: (Optional) 资源计费类型 1:按配置,3:包年包月,默认不筛选
"""
self.appCode = appCode
self.serviceCode = serviceCode
self.pageNumber = pageNumber
self.pageSize = pageSize
self.renewStatus = renewStatus
self.expireTime = expireTime
self.instanceName = instanceName
self.instanceId = instanceId
self.ipAddress = ipAddress
self.billingType = billingType
```
#### File: vm/models/InstanceSpec.py
```python
class InstanceSpec(object):
def __init__(self, name, agId=None, instanceTemplateId=None, az=None, instanceType=None, imageId=None, password=<PASSWORD>, keyNames=None, elasticIp=None, primaryNetworkInterface=None, systemDisk=None, dataDisks=None, charge=None, userdata=None, description=None, noPassword=<PASSWORD>, noKeyNames=None, noElasticIp=None):
"""
:param agId: (Optional) 高可用组Id。指定了此参数后,只能通过高可用组关联的实例模板创建虚机,并且实例模板中的参数不可覆盖替换。实例模板以外的参数还可以指定。
:param instanceTemplateId: (Optional) 实例模板id,如果没有使用高可用组,那么对于实例模板中没有的信息,需要使用创建虚机的参数进行补充,或者选择覆盖启动模板中的参数。
:param az: (Optional) 云主机所属的可用区。
:param instanceType: (Optional) 实例规格。可查询<a href="http://docs.jdcloud.com/virtual-machines/api/describeinstancetypes">DescribeInstanceTypes</a>接口获得指定地域或可用区的规格信息。
:param imageId: (Optional) 镜像ID。可查询<a href="http://docs.jdcloud.com/virtual-machines/api/describeimages">DescribeImages</a>接口获得指定地域的镜像信息。
:param name: 云主机名称,<a href="http://docs.jdcloud.com/virtual-machines/api/general_parameters">参考公共参数规范</a>。
:param password: (Optional) 密码,<a href="http://docs.jdcloud.com/virtual-machines/api/general_parameters">参考公共参数规范</a>。
:param keyNames: (Optional) 密钥对名称,当前只支持传入一个。
:param elasticIp: (Optional) 主网卡主IP关联的弹性IP规格
:param primaryNetworkInterface: (Optional) 主网卡配置信息
:param systemDisk: (Optional) 系统盘配置信息
:param dataDisks: (Optional) 数据盘配置信息,本地盘(local类型)做系统盘的云主机可挂载8块数据盘,云硬盘(cloud类型)做系统盘的云主机可挂载7块数据盘。
:param charge: (Optional) 计费配置
云主机不支持按用量方式计费,默认为按配置计费。
打包创建数据盘的情况下,数据盘的计费方式只能与云主机保持一致。
打包创建弹性公网IP的情况下,若公网IP的计费方式没有指定为按用量计费,那么公网IP计费方式只能与云主机保持一致。
:param userdata: (Optional) 元数据信息,目前只支持传入一个key为"launch-script",表示首次启动脚本。value为base64格式。
launch-script:linux系统支持bash和python,编码前须分别以 #!/bin/bash 和 #!/usr/bin/env python 作为内容首行;
launch-script:windows系统支持bat和powershell,编码前须分别以 <cmd></cmd> 和 <powershell></powershell> 作为内容首、尾行。
:param description: (Optional) 主机描述,<a href="http://docs.jdcloud.com/virtual-machines/api/general_parameters">参考公共参数规范</a>。
:param noPassword: (Optional) 不使用模板中的密码。
仅当不使用Ag,并且使用了模板,并且password参数为空时,此参数(值为true)生效。
若使用模板创建虚机时,又指定了password参数时,此参数无效,以新指定的为准。
:param noKeyNames: (Optional) 不使用模板中的密钥。
仅当不使用Ag,并且使用了模板,并且keynames参数为空时,此参数(值为true)生效。
若使用模板创建虚机时,又指定了keynames参数时,此参数无效,以新指定的为准。
:param noElasticIp: (Optional) 不使用模板中的弹性公网IP。
仅当不使用Ag,并且使用了模板,并且elasticIp参数为空时,此参数(值为true)生效。
若使用模板创建虚机时,又指定了elasticIp参数时,此参数无效,以新指定的为准。
"""
self.agId = agId
self.instanceTemplateId = instanceTemplateId
self.az = az
self.instanceType = instanceType
self.imageId = imageId
self.name = name
self.password = password
self.keyNames = keyNames
self.elasticIp = elasticIp
self.primaryNetworkInterface = primaryNetworkInterface
self.systemDisk = systemDisk
self.dataDisks = dataDisks
self.charge = charge
self.userdata = userdata
self.description = description
self.noPassword = <PASSWORD>
self.noKeyNames = noKeyNames
self.noElasticIp = noElasticIp
```
#### File: vpc/models/NetworkAclRule.py
```python
class NetworkAclRule(object):
def __init__(self, ruleId=None, protocol=None, fromPort=None, toPort=None, direction=None, addressPrefix=None, ruleAction=None, priority=None, description=None, createdTime=None):
"""
:param ruleId: (Optional) networkAcl规则ID
:param protocol: (Optional) 规则限定协议。取值范围:All,TCP,UDP,ICMP
:param fromPort: (Optional) 规则限定起始传输层端口, 取值范围:1-65535, 若protocol为传输层协议,默认值为1,若protocol不是传输层协议,设置无效,恒为0。如果规则只限定一个端口号,fromPort和toPort填写同一个值
:param toPort: (Optional) 规则限定终止传输层端口, 取值范围:1-65535, 若protocol为传输层协议,默认值为65535,若protocol不是传输层协议,设置无效,恒为0。如果规则只限定一个端口号,fromPort和toPort填写同一个值
:param direction: (Optional) networkAcl规则方向。ingress:入规则; egress:出规则
:param addressPrefix: (Optional) 匹配地址前缀
:param ruleAction: (Optional) 访问控制策略:allow:允许,deny:拒绝
:param priority: (Optional) 规则匹配优先级,取值范围为[1,32768],优先级数字越小优先级越高
:param description: (Optional) 描述,允许输入UTF-8编码下的全部字符,不超过256字符
:param createdTime: (Optional) networkAclRule创建时间
"""
self.ruleId = ruleId
self.protocol = protocol
self.fromPort = fromPort
self.toPort = toPort
self.direction = direction
self.addressPrefix = addressPrefix
self.ruleAction = ruleAction
self.priority = priority
self.description = description
self.createdTime = createdTime
```
#### File: vpc/models/RouteTable.py
```python
class RouteTable(object):
def __init__(self, routeTableId=None, routeTableName=None, routeTableType=None, description=None, vpcId=None, routeTableRules=None, subnetIds=None, createdTime=None):
"""
:param routeTableId: (Optional) 路由表ID
:param routeTableName: (Optional) 路由表名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。
:param routeTableType: (Optional) 路由表类型,default:默认路由表,custom:自定义路由表
:param description: (Optional) 路由表描述信息,允许输入UTF-8编码下的全部字符,不超过256字符。
:param vpcId: (Optional) 私有网络ID
:param routeTableRules: (Optional) 路由表规则信息
:param subnetIds: (Optional) 路由表绑定的子网列表
:param createdTime: (Optional) 路由表创建时间
"""
self.routeTableId = routeTableId
self.routeTableName = routeTableName
self.routeTableType = routeTableType
self.description = description
self.vpcId = vpcId
self.routeTableRules = routeTableRules
self.subnetIds = subnetIds
self.createdTime = createdTime
```
|
{
"source": "jdcphysics/validation",
"score": 3
}
|
#### File: codes/Bband/BOOTES_lf.py
```python
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__= "1.0"
def overview():
"""
plots a simulated data set B-band luminsoity function (histogram) along with that in
Beare et al (points)
FILES NEEDED (3 including this one):
import this file to run commands
have file "Beare_15.txt" (in same directory)
have your simulation data file fname (explained below), e.g. 'galaxies.dat'
USAGE:
plot3sep(zcen,fname,hval,boxside,ramin,ramax,decmin,decmax,delz)
OUTPUT:
B-band Luminosity function plots for red, blue, all at redshift zcen
colors separated by
M_U - M_B > 1.074 - 0.18z -0.03 (M_B + 19.4) [red]
ARGUMENTS of plot3sep():
zcen = central mass of simulation sample
fname = filename of data, in ascii (e.g. 'galaxies.dat'), more below
hval =hubble constant (e.g. 0.67)
boxside: two choices depending upon data in fname
periodic box, boxside = box side length in Mpc/h
light cone, any number < 0, to flag that will use ra and dec and delta z
other entries to plot3sep
for periodic box:
every entry after boxside (which is positive) is ignored
can be left out
for light cone:
ramin, ramax, decmin, decmax : minimum and maximum ra and dec
delz : galaxies are taken in a region zcen +/- delz for light cone sample
omega_m is assumed to be 0.31, if you use a much different value
you might need to change the lightcone volume.
INPUT SIMULATION DATA FILE:
(e.g. 'galaxies.dat')
if have B band:
for periodic box, this file is list of galaxies in ascii, one line per galaxy
M_B M_U
units: [AB], absolute magnitudes
for light cone this file is list of galaxies in ascii, one line per galaxy
M_B M_U ra dec redshift
units: [AB], absolute magnitudes for M_B,M_U
if don't have B band:
just set M_U to something random and throw out color plots
INPUT OBSERVATIONAL DATA FILE:
source for data in file Beare_tab8910.txt
http://arxiv.org/abs/1511.01580
R.A.Beare, M.J.I.Brown, K.A.Pimbblet, F.Bian, <NAME>
The z<1.2 optical luminosity function from a sample of ~410000 galaxies in Bootes
"""
import numpy as N
import os
import time
import sys
import matplotlib
import matplotlib.pyplot as plt
def chiofz(zval=0.45,omm=0.31):
"""
comoving distance to redshift zval
omega matter = omm
use for volume if specify region with ra/dec
"""
Nint = 300000
zp1int = N.linspace(1,zval+1,Nint)
ez = N.sqrt(omm*zp1int*zp1int*zp1int + (1-omm))
tmp = 2997.925* N.trapz(1/ez, dx = zp1int[1]-zp1int[0])
return(tmp)
def phi_beare(zcen=0.45,addcolor=0):
"""
read data of Beare et al, 2015 take redshift range
surrounding to zcen chosen
tables 8,9,10 for all, red, blue
addcolor=0 all
addcolor = 1 red
addcolor = 2 blue
units:
Magnitudes: MB - 5 log h70
Phi: 1.e-3 (h70/Mpc)^3 /dex
"""
# now need to find right redshift and type
# first redshift
zrange = N.array([0.2,0.4,0.6,0.8,1.0,1.2])
jjz = N.nonzero(zcen>=zrange)[0]
if ((jjz.size ==0)|(zcen>=1.2)):
print "z = %3.2f not in range"%(zcen)
if (jjz.size > 1):
jjz = jjz.max()
print "using BOOTES range %3.2f < z < %3.2f "%(zrange[jjz],zrange[jjz+1])
ff = open("Beare_tab8910.txt")
phi = N.loadtxt(ff, usecols=(0,2*(jjz+1),2*(jjz+1)+1))
ff.close()
# Now color, just need to start at right place
if (addcolor==0):
jjkeep = N.arange(22) #all
if (addcolor==1):
jjkeep = N.arange(22,46) #red
if (addcolor==2):
jjkeep = N.arange(46,66) #blue
Bmid = phi[jjkeep,0]+0.25/2. #Bmax = Bmin+0.25
phival = phi[jjkeep,1] *1.e-3
phierr = phi[jjkeep,2] *1.e-3
jj = N.nonzero(phival>0)[0]
Bmid = Bmid[jj]
phival = phival[jj]
phierr = phierr[jj]
return(Bmid,phival,phierr,jjz)
def testlum(zcen=0.45,addcolor=0,fname="galshort.dat",hval=0.67,boxside=100,ramin=-2,ramax=-2,decmin=2,decmax=-2,delz=0.02):
"""
usage:
testlum(zval,addcolor,inputfile,hval,boxside,ra_min,ra_max,dec_min,dec_max,delz)
zcen: central redshift for simulation
fname: galaxy data file, more below
addcolor=0 all galaxies
addcolor=1 red
addcolor=2 blue
boxside: positive for periodic box, negative for light cone
boxside = side of box when periodic box
**or**
boxside<0 (e.g. -1) if light cone
ra_min,ra_max,dec_min,dec_max :min/max ra and dec for light cone [ignored for per]
delz = use z range zcen+delz > z < zcen-delz for light cone [ignored for per]
BOOTES tables 8,9,10
colors separated by
M_U - M_B > 1.074 - 0.18z -0.03 (M_B + 19.4) [red]
input file either has entries
galaxy data file fname entries on each line, one per galaxy
[boxside > 0, i.e. fixed time, give boxside in units of Mpc/h]
M_B M_U [ABS]
[boxside < 0,use any boxside value < 0, lightcone]
M_B M_U [ABS], ra, dec, redshift
"""
ff = open(fname)
gals = N.loadtxt(ff)
ff.close()
magB = gals[:,0]
magU = gals[:,1]
if (boxside < 0):
print "using light cone"
ra = gals[:,2]
dec = gals[:,3]
redz = gals[:,4]
#need ra, dec, redshift,delz
chimax = chiofz(zcen+delz) #[Mpc/h]
chimin = chiofz(zcen-delz) #[Mpc/h]
print "ramin,ramax, decmin,decmax %5.4f %5.4f %5.4f %5.4f \n"%(ramin,ramax,decmin,decmax)
angvol = -(N.cos((90-decmin)*N.pi/180) - N.cos((90-decmax)*N.pi/180))*(N.pi*(ramax-ramin)/180.)
chivol =(chimax*chimax*chimax - chimin*chimin*chimin)/3.
vol = chivol*angvol # in [Mpc/h]^3
# truncate galaxy sample to light cone
jj = N.nonzero((ra>ramin)&(ra<ramax)&(dec>decmin)&(dec<decmax)&
(redz<zcen+delz)&(redz>zcen-delz))[0]
magU = magU[jj]
magB = magB[jj]
if (boxside>0):
print "using periodic box, side %8.2f Mpc/h"%(boxside)
vol = boxside*boxside*boxside
#units:
#volume for Phi is in units of [Mpc/h70]^3, we have [Mpc/h]^3,
# so divide volume by (h/h70)^3 = (h/(h/0.7))^3 = 0.7^3
vol = vol/(0.7*0.7*0.7)
# Magnitudes are in units of MB- 5 log h70
magB = magB - 5*N.log10(hval/0.70)
#note color cut is assuming h70's in units, but for h = 0.65 change is -0.005
jj = N.arange(magB.size)
if (addcolor==1):
jj = N.nonzero(magU - magB > 1.074 - 0.18*zcen -0.03* (magB + 19.4) )[0]
if (addcolor==2):
jj = N.nonzero(magU - magB <= 1.074 - 0.18*zcen -0.03* (magB + 19.4) )[0]
magB = magB[jj]
nbin = 50
nhist,bins = N.histogram(magB,nbin,range=(-24.00,-17.75))
bins += (bins[1]-bins[0])/2.
bins = N.delete(bins,nbin)
ngalact = nhist*1./(vol*(bins[1]-bins[0]))
galkind =("all","red","blue")
Bmid,phi,phierr,jjz = phi_beare(zcen,addcolor)
phi += 1.e-10
return(bins,ngalact,Bmid,phi,phierr,jjz)
def plot3sep(zcen=0.45,fname="galshort.dat",hval=0.67,boxside=100,ramin=-2,ramax=-2,decmin=2,decmax=-2,delz=0.02):
"""
plots separately, see comments for testlum to get inputs.
if running for periodic box, only need zcen (central sim redshift), fname(galaxy file), hval (hubble constant, i.e. 0.67 for planck cosmology), boxside (e.g. 100 if
box is 100 Mpc/h on a side)
galshort format is above, in testlum comments
"""
coltype=("all","red","blue")
collfull=("all galaxies","red galaxies","blue galaxies")
collist=('k','r','b')
cshapelist=('ks','rs','bs')
for i in range(3):
f,ax = plt.subplots(1,1)
ax.set_xlim(-17.75,-24.00)
ax.set_ylim(1.e-6,0.01)
ax.set_yscale("log")
bin_centers,ngalact,Bmid,phi,phierr,jjz=testlum(zcen,i,fname,hval,boxside,ramin,ramax,decmin,decmax,delz)
ax.step(bin_centers, ngalact,collist[i],label=r'simulation $z_{sim}$= %3.2f'%(zcen))
zrange = N.array([0.2,0.3,0.4,0.5,0.65,0.8,1.0])
ax.plot(Bmid,phi,cshapelist[i],label=r'BOOTES %3.2f<z<%3.2f'%(zrange[jjz],zrange[jjz+1]))
ax.text(9.2,2.e-4,collfull[i],color=collist[i])
ax.legend(loc=3)
ax.set_yscale("log")
ax.set_xlabel("$M_B-5 log $h_{70}$")
ax.set_ylabel("$\Phi$[$h_{70}^3$/Mpc${}^3$]")
plt.tight_layout()
plt.savefig("Bband_z%d_%s.pdf"%(zcen*101,coltype[i]))
plt.close("all")
```
|
{
"source": "jdcplus/python-gui-openpyxl-tutorial",
"score": 4
}
|
#### File: jdcplus/python-gui-openpyxl-tutorial/test-gui.py
```python
from tkinter import *
class GUI(Frame):
def __init__(self,master=None):
Frame.__init__(self, master)
self.grid()
self.fnameLabel = Label(master, text="First Name")
self.fnameLabel.grid()
self.fnameEntry = Entry(master)
self.fnameEntry.grid()
self.lnameLabel = Label(master, text="Last Name")
self.lnameLabel.grid()
self.lnameEntry = Entry(master)
self.lnameEntry.grid()
self.submitButton = Button(master, command=self.buttonClick, text="Submit")
self.submitButton.grid()
def buttonClick(self):
""" handle button click event and output text from entry area"""
print('hello')
if __name__ == "__main__":
guiFrame = GUI()
guiFrame.mainloop()
```
|
{
"source": "JDCTeam/android_device_samsung_jf-common",
"score": 2
}
|
#### File: android_device_samsung_jf-common/releasetools/releasetools.py
```python
def FullOTA_CustomAsserts(info):
info.script.AppendExtra('ifelse(is_substring("I337", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('ifelse(is_substring("I545", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/vzw/* /system/"));')
info.script.AppendExtra('ifelse(is_substring("I545", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox sed -i \'s/ro.com.google.clientidbase=android-google/ro.com.google.clientidbase=android-verizon/g\' /system/build.prop"));')
info.script.AppendExtra('ifelse(is_substring("L720", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/cdma/* /system/ && busybox rm -rf /system/rild/cdma/lib/libril.so && busybox cp -R /system/rild/gsm/lib/libril.so /system/rild/cdma/lib/"));')
info.script.AppendExtra('ifelse(is_substring("M919", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('ifelse(is_substring("R970", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/usc/* /system/"));')
info.script.AppendExtra('ifelse(is_substring("S970", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('ifelse(is_substring("S975", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('ifelse(is_substring("I9505", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('ifelse(is_substring("I9507", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('ifelse(is_substring("I9508", getprop("ro.bootloader")), run_program("/sbin/sh", "-c", "busybox cp -R /system/rild/gsm/* /system/ && busybox rm /system/lib/libcnefeatureconfig.so"));')
info.script.AppendExtra('delete_recursive("/system/rild");')
def FullOTA_InstallEnd(info):
info.script.Mount("/system")
info.script.AppendExtra('set_metadata("/system/bin/qcks", "uid", 0, "gid", 2000, "mode", 0755, "capabilities", 0x0, "selabel", "u:object_r:mdm_helper_exec:s0");')
info.script.AppendExtra('set_metadata("/system/bin/ks", "uid", 0, "gid", 2000, "mode", 0755, "capabilities", 0x0, "selabel", "u:object_r:mdm_helper_exec:s0");')
info.script.AppendExtra('set_metadata("/system/bin/netmgrd", "uid", 0, "gid", 2000, "mode", 0755, "capabilities", 0x0, "selabel", "u:object_r:netmgrd_exec:s0");')
info.script.AppendExtra('set_metadata("/system/bin/qmuxd", "uid", 0, "gid", 2000, "mode", 0755, "capabilities", 0x0, "selabel", "u:object_r:qmuxd_exec:s0");')
info.script.script = [cmd for cmd in info.script.script if not "boot.img" in cmd]
info.script.script = [cmd for cmd in info.script.script if not "show_progress(0.100000, 0);" in cmd]
info.script.AppendExtra('package_extract_file("boot.img", "/tmp/boot.img");')
info.script.AppendExtra('assert(run_program("/sbin/sh", "/system/etc/loki.sh") == 0);')
info.script.Unmount("/system")
```
|
{
"source": "jd/daiquiri",
"score": 2
}
|
#### File: daiquiri/daiquiri/output.py
```python
import datetime
import inspect
import logging
import logging.handlers
import numbers
import os
import sys
try:
import syslog
except ImportError:
syslog = None
from daiquiri import formatter
from daiquiri import handlers
def get_program_name():
"""Return the name of the running program."""
return os.path.basename(inspect.stack()[-1][1])
class Output(object):
"""Generic log output."""
def __init__(self, handler, formatter=formatter.TEXT_FORMATTER, level=None):
self.handler = handler
self.handler.setFormatter(formatter)
if level is not None:
self.handler.setLevel(level)
def add_to_logger(self, logger):
"""Add this output to a logger."""
logger.addHandler(self.handler)
def _get_log_file_path(
logfile=None, logdir=None, program_name=None, logfile_suffix=".log"
):
ret_path = None
if not logdir:
ret_path = logfile
if not ret_path and logfile and logdir:
ret_path = os.path.join(logdir, logfile)
if not ret_path and logdir:
program_name = program_name or get_program_name()
ret_path = os.path.join(logdir, program_name) + logfile_suffix
if not ret_path:
raise ValueError("Unable to determine log file destination")
return ret_path
class File(Output):
"""Ouput to a file."""
def __init__(
self,
filename=None,
directory=None,
suffix=".log",
program_name=None,
formatter=formatter.TEXT_FORMATTER,
level=None,
):
"""Log file output.
:param filename: The log file path to write to. If directory is also
specified, both will be combined.
:param directory: The log directory to write to. If no filename is
specified, the program name and suffix will be used
to contruct the full path relative to the directory.
:param suffix: The log file name suffix. This will be only used if no
filename has been provided.
:param program_name: Program name. Autodetected by default.
"""
logpath = _get_log_file_path(filename, directory, program_name, suffix)
handler = logging.handlers.WatchedFileHandler(logpath)
super(File, self).__init__(handler, formatter, level)
class RotatingFile(Output):
"""Output to a file, rotating after a certain size."""
def __init__(
self,
filename=None,
directory=None,
suffix=".log",
program_name=None,
formatter=formatter.TEXT_FORMATTER,
level=None,
max_size_bytes=0,
backup_count=0,
):
"""Rotating log file output.
:param filename: The log file path to write to. If directory is also
specified, both will be combined.
:param directory: The log directory to write to. If no filename is
specified, the program name and suffix will be used
to contruct the full path relative to the directory.
:param suffix: The log file name suffix. This will be only used if no
filename has been provided.
:param program_name: Program name. Autodetected by default.
:param max_size_bytes: Allow the file to rollover at a predetermined
size.
:param backup_count: The maximum number of files to rotate logging
output between.
"""
logpath = _get_log_file_path(filename, directory, program_name, suffix)
handler = logging.handlers.RotatingFileHandler(
logpath, maxBytes=max_size_bytes, backupCount=backup_count
)
super(RotatingFile, self).__init__(handler, formatter, level)
def do_rollover(self):
"""Manually forces a log file rotation."""
return self.handler.doRollover()
class TimedRotatingFile(Output):
"""Rotating log file output, triggered by a fixed interval."""
def __init__(
self,
filename=None,
directory=None,
suffix=".log",
program_name=None,
formatter=formatter.TEXT_FORMATTER,
level=None,
interval=datetime.timedelta(hours=24),
backup_count=0,
):
"""Rotating log file output, triggered by a fixed interval.
:param filename: The log file path to write to. If directory is also
specified, both will be combined.
:param directory: The log directory to write to. If no filename is
specified, the program name and suffix will be used
to contruct the full path relative to the directory.
:param suffix: The log file name suffix. This will be only used if no
filename has been provided.
:param program_name: Program name. Autodetected by default.
:param interval: datetime.timedelta instance representing how often a
new log file should be created.
:param backup_count: The maximum number of files to rotate logging
output between.
"""
logpath = _get_log_file_path(filename, directory, program_name, suffix)
handler = logging.handlers.TimedRotatingFileHandler(
logpath,
when="S",
interval=self._timedelta_to_seconds(interval),
backupCount=backup_count,
)
super(TimedRotatingFile, self).__init__(handler, formatter, level)
def do_rollover(self):
"""Manually forces a log file rotation."""
return self.handler.doRollover()
@staticmethod
def _timedelta_to_seconds(td):
"""Convert a datetime.timedelta object into a seconds interval.
:param td: datetime.timedelta
:return: time in seconds
:rtype: int
"""
if isinstance(td, numbers.Real):
td = datetime.timedelta(seconds=td)
return td.total_seconds()
class Stream(Output):
"""Generic stream output."""
def __init__(
self, stream=sys.stderr, formatter=formatter.TEXT_FORMATTER, level=None
):
super(Stream, self).__init__(
handlers.TTYDetectorStreamHandler(stream), formatter, level
)
STDERR = Stream()
STDOUT = Stream(sys.stdout)
class Journal(Output):
def __init__(
self, program_name=None, formatter=formatter.TEXT_FORMATTER, level=None
):
program_name = program_name or get_program_name
super(Journal, self).__init__(
handlers.JournalHandler(program_name), formatter, level
)
class Syslog(Output):
def __init__(
self,
program_name=None,
facility="user",
formatter=formatter.TEXT_FORMATTER,
level=None,
):
if syslog is None:
# FIXME(jd) raise something more specific
raise RuntimeError("syslog is not available on this platform")
super(Syslog, self).__init__(
handlers.SyslogHandler(
program_name=program_name or get_program_name(),
facility=self._find_facility(facility),
),
formatter,
level,
)
@staticmethod
def _find_facility(facility):
# NOTE(jd): Check the validity of facilities at run time as they differ
# depending on the OS and Python version being used.
valid_facilities = [
f
for f in [
"LOG_KERN",
"LOG_USER",
"LOG_MAIL",
"LOG_DAEMON",
"LOG_AUTH",
"LOG_SYSLOG",
"LOG_LPR",
"LOG_NEWS",
"LOG_UUCP",
"LOG_CRON",
"LOG_AUTHPRIV",
"LOG_FTP",
"LOG_LOCAL0",
"LOG_LOCAL1",
"LOG_LOCAL2",
"LOG_LOCAL3",
"LOG_LOCAL4",
"LOG_LOCAL5",
"LOG_LOCAL6",
"LOG_LOCAL7",
]
if getattr(syslog, f, None)
]
facility = facility.upper()
if not facility.startswith("LOG_"):
facility = "LOG_" + facility
if facility not in valid_facilities:
raise TypeError(
"syslog facility must be one of: %s"
% ", ".join("'%s'" % fac for fac in valid_facilities)
)
return getattr(syslog, facility)
class Datadog(Output):
def __init__(
self,
hostname="127.0.0.1",
port=10518,
formatter=formatter.DATADOG_FORMATTER,
level=None,
handler_class=handlers.PlainTextSocketHandler,
):
super(Datadog, self).__init__(
handler_class(hostname, port),
formatter=formatter,
level=level,
)
preconfigured = {
"stderr": STDERR,
"stdout": STDOUT,
}
if syslog is not None:
preconfigured["syslog"] = Syslog()
if handlers.journal is not None:
preconfigured["journal"] = Journal()
```
|
{
"source": "jd-daniels/homeassistant-subaru",
"score": 2
}
|
#### File: custom_components/subarujd/const.py
```python
from enum import Enum
import subarulink.const as sc
from homeassistant.const import Platform
DOMAIN = "subarujd"
FETCH_INTERVAL = 300
UPDATE_INTERVAL = 7200
CONF_UPDATE_ENABLED = "update_enabled"
CONF_NOTIFICATION_OPTION = "notification_option"
CONF_COUNTRY = "country"
class NotificationOptions(Enum):
"""Lovelace levels of notification."""
FAILURE = "Failure — Only notify on failure"
PENDING = "Pending — Temporary notification of remote command in progress"
SUCCESS = "Success — Persistent notification of completed remote command"
@classmethod
def list(cls):
"""List values of NotificationOptions."""
return [item.value for item in NotificationOptions]
@classmethod
def get_by_value(cls, value):
"""Get enum instance by value."""
for item in cls:
if item.value == value:
return item
# entry fields
ENTRY_CONTROLLER = "controller"
ENTRY_COORDINATOR = "coordinator"
ENTRY_VEHICLES = "vehicles"
ENTRY_LISTENER = "listener"
# update coordinator name
COORDINATOR_NAME = "subaru_data"
# info fields
VEHICLE_VIN = "vin"
VEHICLE_NAME = "display_name"
VEHICLE_HAS_EV = "is_ev"
VEHICLE_API_GEN = "api_gen"
VEHICLE_HAS_REMOTE_START = "has_res"
VEHICLE_HAS_REMOTE_SERVICE = "has_remote"
VEHICLE_HAS_SAFETY_SERVICE = "has_safety"
VEHICLE_LAST_UPDATE = "last_update"
VEHICLE_LAST_FETCH = "last_fetch"
VEHICLE_STATUS = "status"
VEHICLE_CLIMATE = "climate"
VEHICLE_CLIMATE_SELECTED_PRESET = "preset_name"
API_GEN_1 = "g1"
API_GEN_2 = "g2"
MANUFACTURER = "Subaru Corp."
ATTR_DOOR = "door"
REMOTE_SERVICE_FETCH = "fetch"
REMOTE_SERVICE_UPDATE = "update"
REMOTE_SERVICE_LOCK = "lock"
REMOTE_SERVICE_UNLOCK = "unlock"
REMOTE_SERVICE_LIGHTS = "lights"
REMOTE_SERVICE_LIGHTS_STOP = "lights_stop"
REMOTE_SERVICE_HORN = "horn"
REMOTE_SERVICE_HORN_STOP = "horn_stop"
REMOTE_SERVICE_REMOTE_START = "remote_start"
REMOTE_SERVICE_REMOTE_STOP = "remote_stop"
REMOTE_SERVICE_CHARGE_START = "charge_start"
REMOTE_CLIMATE_PRESET_NAME = "preset_name"
SERVICE_UNLOCK_SPECIFIC_DOOR = "unlock_specific_door"
UNLOCK_DOOR_ALL = "all"
UNLOCK_DOOR_DRIVERS = "driver"
UNLOCK_DOOR_TAILGATE = "tailgate"
UNLOCK_VALID_DOORS = {
UNLOCK_DOOR_ALL: sc.ALL_DOORS,
UNLOCK_DOOR_DRIVERS: sc.DRIVERS_DOOR,
UNLOCK_DOOR_TAILGATE: sc.TAILGATE_DOOR,
}
SUPPORTED_PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.DEVICE_TRACKER,
Platform.LOCK,
Platform.SENSOR,
Platform.BUTTON,
Platform.SELECT,
]
ICONS = {
"Avg Fuel Consumption": "mdi:leaf",
"EV Range": "mdi:ev-station",
"Odometer": "mdi:road-variant",
"Range": "mdi:gas-station",
"Horn Start": "mdi:volume-high",
"Horn Stop": "mdi:volume-off",
"Lights Start": "mdi:lightbulb-on",
"Lights Stop": "mdi:lightbulb-off",
"Locate": "mdi:car-connected",
"Refresh": "mdi:refresh",
"Remote Start": "mdi:power",
"Remote Stop": "mdi:stop-circle-outline",
"Charge EV": "mdi:ev-station",
"Climate Preset": "mdi:thermometer-lines",
}
```
|
{
"source": "jdddog/dragnet",
"score": 2
}
|
#### File: dragnet/dragnet/data_processing.py
```python
from __future__ import division, print_function
import io
import itertools
import multiprocessing
import os
import re
from dataclasses import dataclass
import ftfy
import numpy as np
from dragnet.blocks import Blockifier, simple_tokenizer, text_from_subtree
from dragnet.lcs import check_inclusion
from lxml import etree
@dataclass
class DatasetFormat:
""" Class for storing the settings used by each file format """
raw_html_dirname: str = 'HTML'
gold_standard_dirname: str = 'Corrected'
gold_standard_blocks_dirname: str = 'block_corrected'
raw_html_ext: str = '.html'
gold_standard_ext: str = '.html.corrected.txt'
gold_standard_blocks_ext: str = '.block_corrected.txt'
FORMATS = {
'1.0': DatasetFormat(),
'2.0': DatasetFormat(raw_html_dirname='html',
gold_standard_dirname='meta',
gold_standard_blocks_dirname='block_corrected',
raw_html_ext='.html',
gold_standard_ext='.toml',
gold_standard_blocks_ext='.block_corrected.txt')
}
RE_COMMENTS_DELIM = re.compile(r'\n*!@#\$%\^&\*\(\)\s+COMMENTS\n*')
def extract_all_gold_standard_data(data_dir, nprocesses=1,
overwrite=False, format='1.0', **kwargs):
"""
Extract the gold standard block-level content and comment percentages from a
directory of labeled data (only those for which the gold standard blocks are
not found), and save results to corresponding files in a block-level
gold standard directory under ``data_dir``.
Args:
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html files and gold standard content +
comments text files
nprocesses (int): If > 1, use a :class:`multiprocessing.Pool` to
parallelize the extractions
overwrite (bool): If True, overwrite existing gold-standard blocks files.
format (str): The format of the dataset to load. Set to '1.0' by default.
Set to '2.0' to load .toml based files.
**kwargs: passed into :func:`extract_gold_standard_blocks`
See Also:
:func:`extract_gold_standard_blocks`
"""
ds_format: DatasetFormat = FORMATS[format]
use_pool = nprocesses > 1
if use_pool:
pool = multiprocessing.Pool(processes=nprocesses)
# get the set of files that have already been block corrected
# so that we don't block correct them again
if overwrite is False:
gs_blocks_dir = os.path.join(data_dir, ds_format.gold_standard_blocks_dirname)
if not os.path.isdir(gs_blocks_dir):
os.mkdir(gs_blocks_dir)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(ds_format.gold_standard_blocks_ext))
gs_blocks_fileroots = {
re.search(r'(.+)' + re.escape(ds_format.gold_standard_blocks_ext), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames}
else:
gs_blocks_fileroots = set()
# extract the block-level gold parse from
# the set of files to be block corrected
gs_dir = os.path.join(data_dir, ds_format.gold_standard_dirname)
gs_filenames = get_filenames(
gs_dir, full_path=False, match_regex=re.escape(ds_format.gold_standard_ext))
for i, gs_filename in enumerate(gs_filenames):
gs_fileroot = re.search(r'(.+)' + re.escape(ds_format.gold_standard_ext), gs_filename).group(1)
if gs_fileroot in gs_blocks_fileroots:
continue
if i % 100 == 0:
print('Extracting gold standard blocks for file "{}"'.format(gs_filename))
if use_pool:
kwargs['format'] = format
pool.apply_async(extract_gold_standard_blocks, (data_dir, gs_fileroot), kwargs)
else:
kwargs['format'] = format
extract_gold_standard_blocks(data_dir, gs_fileroot, **kwargs)
# close out our pool
if use_pool:
pool.close()
pool.join()
def extract_gold_standard_blocks(data_dir, fileroot, encoding=None,
tokenizer=simple_tokenizer, cetr=False, format='1.0'):
"""
Extract the gold standard block-level content and comments for a single
observation identified by ``fileroot``, and write the results to file.
Args:
data_dir (str): The root directory containing sub-directories for
raw HTML, gold standard extracted content, and gold standard blocks.
fileroot (str): Unique identifier for a single observation of training
data, corresponding to the start of its raw html and gold standard
filenames under ``data_dir``.
encoding (str)
tokenizer (Callable): Object that takes a string and returns the tokens
as a list of strings.
cetr (bool): If True, parse the gold standard in clean eval format.
format (str): The format of the dataset to load. Set to '1.0' by default.
Set to '2.0' to load .toml based files.
Notes:
Results are written to a text file in the block-level gold standard dir
:obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line
corresponds to a single block in its order of appearance, and has the
following format::
content_frac comments_frac all_tokens content_tokens comments_tokens
where each item is separated by a tab. ``content_frac`` is equal to the
fraction of ``all_tokens`` found in the corresponding gold parse content
text; ``comments_frac`` is the same but for comments text.
"""
# read the raw html, split it into blocks, and tokenize each block
raw_html = read_html_file(data_dir, fileroot, encoding=encoding, format=format) # text is unicode
from dragnet.blocks import BlockifyError
try:
blocks = [b.text for b in Blockifier.blockify(raw_html)] # text is bytes
except BlockifyError as e:
print('BlockifyError for file "{}"'.format(fileroot))
return
blocks_tokens = [tokenizer(block) for block in blocks]
num_blocks_tokens = [len(block_tokens) for block_tokens in blocks_tokens]
# solve the longest common subsequence problem to determine which blocks were kept
# need a list of all the tokens in the blocks, plus a correspondence of which
# block they belong to.
# we will determine which of the tokens is in the extracted content,
# then use the correspondence to block id to determine which blocks were kept
# get a flattened sequence of all tokens in all blocks
# and their corresponding block ids
all_blocks_tokens = []
all_blocks_tokens_block_id = []
for i, block_tokens in enumerate(blocks_tokens):
all_blocks_tokens.extend(block_tokens)
all_blocks_tokens_block_id.extend([i] * len(block_tokens))
# TODO: do we really need `num_all_blocks_tokens`?
# it was used to determine if there were more gold standard tokens than *all*
# tokens, and if so, some info was written to disk
# but it seems like an odd check, and it's probably better to take the
# gold standard data at face value -- presumably, somebody checked it!
# num_all_blocks_tokens = len(all_blocks_tokens)
def get_frac_and_str_tokens_in_gs(gs_txt):
"""
For each block, determine which and what fraction of tokens are
also in the gold standard text ``gs_txt`` for either content
or comments.
Returns:
List[float]
List[str]
"""
gs_tokens = tokenizer(gs_txt)
tokens_in_gs = check_inclusion(all_blocks_tokens, gs_tokens)
num_blocks_tokens_in_gs = [0 for _ in range(len(blocks))]
blocks_tokens_in_gs_tokens = [[] for _ in range(len(blocks))]
for token, token_in_gs, block_id in zip(all_blocks_tokens, tokens_in_gs, all_blocks_tokens_block_id):
if token_in_gs is True:
num_blocks_tokens_in_gs[block_id] += 1
blocks_tokens_in_gs_tokens[block_id].append(token)
blocks_tokens_strs_in_gs = [
' '.join(block_tokens_in_gs_tokens)
for block_tokens_in_gs_tokens in blocks_tokens_in_gs_tokens]
frac_blocks_tokens_in_gs = [
num_block_tokens_in_gs / num_block_tokens
for num_block_tokens_in_gs, num_block_tokens
in zip(num_blocks_tokens_in_gs, num_blocks_tokens)]
return (frac_blocks_tokens_in_gs, blocks_tokens_strs_in_gs)
gs_content, gs_comments = read_gold_standard_file(data_dir, fileroot, cetr=cetr, format=format)
frac_blocks_tokens_in_gs_content, blocks_tokens_strs_in_gs_content = \
get_frac_and_str_tokens_in_gs(gs_content)
frac_blocks_tokens_in_gs_comments, blocks_tokens_strs_in_gs_comments = \
get_frac_and_str_tokens_in_gs(gs_comments)
ds_format = FORMATS[format]
output_fname = os.path.join(
data_dir, ds_format.gold_standard_blocks_dirname, fileroot + ds_format.gold_standard_blocks_ext)
line_fmt = u'{frac_content}\t{frac_comments}\t{block_tokens}\t{content_tokens}\t{comment_tokens}\n'
with io.open(output_fname, mode='w') as f:
for block_id, block_tokens in enumerate(blocks_tokens):
line = line_fmt.format(
frac_content=frac_blocks_tokens_in_gs_content[block_id],
frac_comments=frac_blocks_tokens_in_gs_comments[block_id],
block_tokens=' '.join(block_tokens),
content_tokens=blocks_tokens_strs_in_gs_content[block_id],
comment_tokens=blocks_tokens_strs_in_gs_comments[block_id])
f.write(line)
def get_filenames(dirname, full_path=False, match_regex=None, extension=None):
"""
Get all filenames under ``dirname`` that match ``match_regex`` or have file
extension equal to ``extension``, optionally prepending the full path.
Args:
dirname (str): /path/to/dir on disk where files to read are saved
full_path (bool): if False, return filenames without path; if True,
return filenames with path, as ``os.path.join(dirname, fname)``
match_regex (str): include files whose names match this regex pattern
extension (str): if files only of a certain type are wanted,
specify the file extension (e.g. ".txt")
Yields:
str: next matching filename
"""
if not os.path.exists(dirname):
raise OSError('directory "{}" does not exist'.format(dirname))
match_regex = re.compile(match_regex) if match_regex else None
for filename in sorted(os.listdir(dirname)):
if extension and not os.path.splitext(filename)[-1] == extension:
continue
if match_regex and not match_regex.search(filename):
continue
if full_path is True:
yield os.path.join(dirname, filename)
else:
yield filename
def read_html_file(data_dir, fileroot, encoding=None, format='1.0'):
"""
Read the HTML file corresponding to identifier ``fileroot``
in the raw HTML directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
Returns:
str
"""
ds_format: DatasetFormat = FORMATS[format]
fname = os.path.join(
data_dir, ds_format.raw_html_dirname, fileroot + ds_format.raw_html_ext)
encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') # 'utf-16'
for encoding in encodings:
try:
with io.open(fname, mode='rt', encoding=encoding) as f:
raw_html = f.read()
break
except (UnicodeDecodeError, UnicodeError):
raw_html = None
return ftfy.fix_encoding(raw_html).strip()
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False, format='1.0'):
"""
Read the gold standard content file corresponding to identifier ``fileroot``
in the gold standard directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
cetr (bool): if True, assume no comments and parse the gold standard
to remove tags
format (bool): if True, assume no comments and parse the gold standard
to remove tags
Returns:
List[str, str]: contents string and comments string, respectively
"""
ds_format: DatasetFormat = FORMATS[format]
fname = os.path.join(
data_dir, ds_format.gold_standard_dirname, fileroot + ds_format.gold_standard_ext)
encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1')
for encoding in encodings:
try:
with io.open(fname, mode='rt', encoding=encoding) as f:
gold_standard = f.read()
break
except (UnicodeDecodeError, UnicodeError):
gold_standard = None
if not gold_standard:
return [u'', u'']
if format == '1.0':
if not cetr:
content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1)
# if no comments delimiter found, append empty comments string
if len(content_comments) == 1:
content_comments = [content_comments[0], u'']
else:
tree = etree.fromstring(gold_standard, parser=etree.HTMLParser())
content_comments = [u' '.join(text_from_subtree(tree)), u'']
elif format == '2.0':
# Load toml data
# toml parser has an issue with multiline text strings
text = gold_standard.split("'''")[1]
# data = toml.loads(gold_standard)
# text = data['text']
tree = etree.fromstring(text, parser=etree.HTMLParser())
content_comments = [u' '.join(text_from_subtree(tree)), u'']
else:
raise NotImplementedError(f'Format version {format} is not implemented')
# fix text in case of mangled encodings
content_comments = [ftfy.fix_encoding(content_comments[0]).strip(),
ftfy.fix_encoding(content_comments[1]).strip()]
return content_comments
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True, format='1.0'):
"""
Read the gold standard blocks file corresponding to identifier ``fileroot``
in the gold standard blocks directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
split_blocks (bool): If True, split the file's content into blocks.
Returns:
str or List[str]
"""
ds_format: DatasetFormat = FORMATS[format]
fname = os.path.join(
data_dir, ds_format.gold_standard_blocks_dirname, fileroot + ds_format.gold_standard_blocks_ext)
with io.open(fname, mode='r') as f:
data = f.read()
if split_blocks:
return filter(None, data[:-1].split('\n'))
return filter(None, data)
def _parse_content_or_comments_blocks(blocks, block_pct_tokens_thresh):
is_above_thresh = (np.array([ele[0] for ele in blocks]) > block_pct_tokens_thresh).astype(np.int)
token_counts = np.array([ele[1] for ele in blocks])
all_tokens = list(itertools.chain.from_iterable(
ele[2] for ele in blocks if ele[1] > 0))
return (is_above_thresh, token_counts, all_tokens)
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1, format='1.0'):
"""
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``.
Args:
data_dir (str)
fileroot (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:
The first element is simply the raw html as a string. The second and
third elements are 3-tuples for content and comments, respectively,
where the first element is a numpy array of 1s and 0s whose values
correspond to whether or not a given block is considered non-content
or not; the second element is a numpy integer array whose values are
the total number of tokens in each block; and the third element is
a flat list of content or comment tokens as strings, concatenated
from all blocks.
See Also:
:func:`prepare_all_data`
"""
if not 0.0 <= block_pct_tokens_thresh <= 1.0:
raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]')
html = read_html_file(data_dir, fileroot, format=format)
blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True, format=format)
content_blocks = []
comments_blocks = []
for block in blocks:
block_split = block.split('\t')
num_block_tokens = len(block_split[2].split())
# total number of tokens in block is used as weights
content_blocks.append(
(float(block_split[0]), num_block_tokens, block_split[3].split()))
comments_blocks.append(
(float(block_split[1]), num_block_tokens, block_split[4].split()))
parsed_content_blocks = _parse_content_or_comments_blocks(
content_blocks, block_pct_tokens_thresh)
parsed_comments_blocks = _parse_content_or_comments_blocks(
comments_blocks, block_pct_tokens_thresh)
return (html, parsed_content_blocks, parsed_comments_blocks)
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1, format='1.0'):
"""
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
"""
ds_format: DatasetFormat = FORMATS[format]
gs_blocks_dir = os.path.join(data_dir, ds_format.gold_standard_blocks_dirname)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(ds_format.gold_standard_blocks_ext))
gs_blocks_fileroots = (
re.search(r'(.+)' + re.escape(ds_format.gold_standard_blocks_ext), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames)
return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh, format=format)
for fileroot in gs_blocks_fileroots]
```
#### File: dragnet/test/test_data_processing.py
```python
import io
import os
from shutil import rmtree
import tempfile
import pytest
from dragnet import data_processing
FIXTURES = os.path.join('test', 'datafiles')
@pytest.fixture(scope="module")
def fileroots():
return ["bbc.co.story", "f1", "sad8-2sdkfj"]
@pytest.fixture(scope="class")
def datadir(fileroots):
datadir = tempfile.mkdtemp()
for froot in fileroots:
fname = os.path.join(datadir, "{}.html.corrected.txt".format(froot))
with io.open(fname, mode="wt") as f:
f.write(u".")
yield datadir
rmtree(datadir)
@pytest.mark.usefixtures("datadir")
class TestGetFilenames(object):
def test_get_filenames(self, fileroots, datadir):
filenames = list(data_processing.get_filenames(datadir))
assert (
filenames ==
["{}.html.corrected.txt".format(froot) for froot in fileroots]
)
def test_get_filenames_full_path(self, fileroots, datadir):
filenames = list(data_processing.get_filenames(datadir, full_path=True))
assert (
filenames ==
[os.path.join(datadir, "{}.html.corrected.txt".format(froot))
for froot in fileroots]
)
def test_get_filenames_match_regex(self, datadir):
filenames = list(data_processing.get_filenames(datadir, match_regex='f1'))
assert filenames == ['f1.html.corrected.txt']
filenames = list(data_processing.get_filenames(datadir, match_regex='foo'))
assert filenames == []
def test_get_filenames_extension(self, fileroots, datadir):
filenames = list(data_processing.get_filenames(datadir, extension='.txt'))
assert (
filenames ==
['{}.html.corrected.txt'.format(froot) for froot in fileroots]
)
filenames = list(data_processing.get_filenames(datadir, extension='.foo'))
assert filenames == []
class TestReadGoldStandard(object):
def test_read_gold_standard(self):
tests = {
'ascii': u'ascii yo!',
'iso-8859-1': u'\xd3',
'utf-8': u'\xae',
'utf-16': u'\xae',
}
for encoding, expected in tests.items():
content_comments = data_processing.read_gold_standard_file(
FIXTURES, encoding)
assert content_comments[0] == u"Content here\nmore content\n" + expected
assert content_comments[1] == "some comments"
def test_utf8_chinese(self):
actual_chinese_content = u'<h>\u9ad8\u8003\u8bed\u6587\u5168\u7a0b\u68c0\u6d4b\u4e09\uff1a\u6b63\u786e\u4f7f\u7528\u8bcd\u8bed\uff08\u719f\u8bed\u4e00\uff09\n\n\n <h>LEARNING.SOHU.COM 2004\u5e745\u670822\u65e515:36 '
gs = " ".join(data_processing.read_gold_standard_file(FIXTURES, "utf-8_chinese"))
assert gs == actual_chinese_content
def make_filepath(s):
return os.path.join(FIXTURES, "block_corrected", "{}.block_corrected.txt".format(s))
class TestExtractGoldStandard(object):
def test_extract_gold_standard(self):
fileroots = ["page_comments", "page_no_comments"]
for fileroot in fileroots:
actual_filepath = make_filepath(fileroot)
expected_filepath = make_filepath(fileroot + "_expected")
data_processing.extract_gold_standard_blocks(FIXTURES, fileroot)
with io.open(actual_filepath, mode="rt") as f:
actual_blocks = f.read()
with io.open(expected_filepath, mode="rt") as f:
expected_blocks = f.read()
os.remove(actual_filepath)
assert expected_blocks == actual_blocks
def test_extract_blank_label(self):
blank_label = data_processing.read_gold_standard_blocks_file(FIXTURES, "blank_label")
assert len(list(blank_label)) == 0
blank_data = data_processing.prepare_data(FIXTURES, "blank_label")
assert len(blank_data[0]) > 0
```
#### File: dragnet/test/test_models.py
```python
import io
import json
import os
import pytest
from dragnet import extract_content, extract_comments, extract_content_and_comments
from dragnet.blocks import simple_tokenizer
from dragnet.util import evaluation_metrics
FIXTURES = os.path.join('test', 'datafiles')
@pytest.fixture(scope="module")
def html():
with io.open(os.path.join(FIXTURES, "models_testing.html"), mode="rt") as f:
html_ = f.read()
return html_
def test_models(html):
models = [extract_content, extract_comments] # extract_content_and_comments]
with io.open(os.path.join(FIXTURES, 'models_content_mod.json'), 'r') as f:
actual_content = json.load(f)
for i, model in enumerate(models):
gold_standard = actual_content[i]
passed = False
for i in range(10):
content = model(html)
_, _, f1 = evaluation_metrics(
simple_tokenizer(gold_standard), simple_tokenizer(content))
if f1 >= 0.8:
passed = True
break
assert passed
def test_content_and_content_comments_extractor(html):
content = extract_content(html)
content_comments = extract_comments(html)
passed_content = False
passed_content_comments = False
for i in range(10):
# actual_content, actual_content_comments = \
# extract_content_and_comments(html)
actual_content = extract_content(html)
actual_content_comments = extract_comments(html)
passed_content = actual_content == content
passed_content_comments = (
actual_content_comments == content_comments)
if passed_content and passed_content_comments:
break
assert passed_content
assert passed_content_comments
def test_content_and_content_comments_extractor_blocks(html):
"""
The content and content/comments extractor should return proper blocks
"""
content = extract_content(html, as_blocks=True)
content_comments = extract_comments(html, as_blocks=True)
passed_content = False
passed_content_comments = False
for i in range(5):
# actual_content, actual_content_comments = \
# content_and_content_comments_extractor.analyze(
# html, blocks=True)
actual_content = extract_content(html, as_blocks=True)
actual_content_comments = extract_comments(html, as_blocks=True)
passed_content = (
[blk.text for blk in actual_content] ==
[blk.text for blk in content]
)
passed_content_comments = (
[blk.text for blk in actual_content_comments] ==
[blk.text for blk in content_comments]
)
if passed_content and passed_content_comments:
break
assert passed_content
assert passed_content_comments
```
|
{
"source": "jdddog/graynet_keras",
"score": 3
}
|
#### File: jdddog/graynet_keras/__init__.py
```python
from .models import *
from .model_config import *
name = "GrayNetKeras"
def init_input_tensor(input_shape, input_tensor, weights='graynet', **kwargs):
if input_tensor is None:
input_tensor = Input(shape=input_shape)
if input_tensor.shape[-1] == 1 and weights=='graynet':
x = input_tensor
elif input_tensor.shape[-1] == 1:
x = Concatenate(axis=-1)([input_tensor, input_tensor, input_tensor])
elif input_tensor.shape[-1] == 3:
x = input_tensor
else:
print("[ERROR] Wrong input tensor or shape")
raise NotImplementedError
return x
def Densenet121_GrayNet(input_tensor=None, input_shape=None, weights='graynet', **kwargs):
# Make input tensor
input_tensor = init_input_tensor(input_shape, input_tensor, weights=weights)
model_name = 'densenet121'
# Build body
if weights == 'graynet':
print("Input tensor shape:", input_tensor.shape)
weights = get_weight_path('densnet121', 'graynet', input_tensor.shape[-1])
print("weight:", weights)
densnet_model = DenseNet121(include_top=False, weights=None, input_tensor=input_tensor, pooling='avg')
gap = densnet_model(input_tensor)
model = Model(inputs=input_tensor, outputs=gap, name=model_name)
model.load_weights(weights, by_name=True, skip_mismatch=True)
return model
def UnetEncoder_GrayNet(input_tensor=None, input_shape=None, weights='graynet', **kwargs):
# input_tensor = init_input_tensor(input_shape, input_tensor)
model_name = 'unetenc'
last_conv, bypass1, bypass2, bypass3, bypass4 = UNET.encoder(input_tensor, num_chs=64, **kwargs)
gap = GlobalAveragePooling2D()(last_conv)
model = Model(inputs=input_tensor, outputs=gap, name=model_name)
# Load GrayNet weight
if weights is not None:
weights_path = get_weight_path(model_name, weights, input_tensor.shape[-1])
model.load_weights(weights_path, by_name=True, skip_mismatch=True)
return model
def ResNet50_GrayNet(input_tensor=None, input_shape=None, weights='graynet', **kwargs):
# input_tensor = init_input_tensor(input_shape, input_tensor)
model_name = 'resnet50'
gap = RESNET50_LMIC(input_tensor, return_tensor=True, **kwargs)
model = Model(inputs=input_tensor, outputs=gap, name=model_name)
# Load GrayNet weight
if weights is not None:
weights_path = get_weight_path(model_name, weights, input_tensor.shape[-1])
model.load_weights(weights_path, by_name=True, skip_mismatch=True)
return model
def InceptionV3_GrayNet(input_tensor=None, input_shape=None, weights='graynet', **kwargs):
# input_tensor = init_input_tensor(input_shape, input_tensor)
model_name = 'inceptionv3'
gap = INCEPTIONV3_LMIC(input_tensor, return_tensor=True, **kwargs)
model = Model(inputs=input_tensor, outputs=gap, name=model_name)
# Load GrayNet weight
if weights is not None:
weights_path = get_weight_path(model_name, weights, input_tensor.shape[-1])
model.load_weights(weights_path, by_name=True, skip_mismatch=True)
return model
```
|
{
"source": "jdddog/mag-archiver",
"score": 2
}
|
#### File: mag-archiver/tests/test_mag.py
```python
import os
import unittest
from unittest.mock import patch
import pendulum
from azure.common import AzureMissingResourceHttpError
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import ContainerProperties
from mag_archiver.azure import create_table
from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, \
hide_if_not_none
class TestMag(unittest.TestCase):
def test_hide_if_not_none(self):
# Test that None is returned for None
value = hide_if_not_none(None)
self.assertEqual(value, None)
# Test that 'hidden' is returned: string
value = hide_if_not_none('hello world')
self.assertEqual(value, 'hidden')
# Test that 'hidden' is returned: integer
value = hide_if_not_none(123)
self.assertEqual(value, 'hidden')
def test_make_mag_query(self):
start_date = pendulum.datetime(year=2020, month=4, day=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
# No parameters
query = make_mag_query()
self.assertEqual(query, '')
# State parameter
query = make_mag_query(state=MagState.discovered)
self.assertEqual(query, "State eq 'discovered'")
query = make_mag_query(state=MagState.archived)
self.assertEqual(query, "State eq 'archived'")
query = make_mag_query(state=MagState.done)
self.assertEqual(query, "State eq 'done'")
# Start date parameter
query = make_mag_query(start_date=start_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z'")
# End date parameter
query = make_mag_query(end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate lt datetime'2020-05-01T00:00Z'")
# Start date, end date and date type
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z' and ReleaseDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z' and DiscoveredDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z' and ArchivedDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z' and DoneDate lt "
"datetime'2020-05-01T00:00Z'")
# State, start date, end date and date type
query = make_mag_query(state=MagState.discovered, start_date=start_date, end_date=end_date,
date_type=MagDateType.discovered)
self.assertEqual(query, "State eq 'discovered' and DiscoveredDate ge datetime'2020-04-01T00:00Z' "
"and DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.archived, start_date=start_date, end_date=end_date,
date_type=MagDateType.archived)
self.assertEqual(query, "State eq 'archived' and ArchivedDate ge datetime'2020-04-01T00:00Z' "
"and ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.done, start_date=start_date, end_date=end_date,
date_type=MagDateType.done)
self.assertEqual(query, "State eq 'done' and DoneDate ge datetime'2020-04-01T00:00Z' "
"and DoneDate lt datetime'2020-05-01T00:00Z'")
def make_mag_release(account_name: str, account_key: str, year: int, month: int, day: int):
min_date = pendulum.datetime(1601, 1, 1)
partition_key_ = 'mag'
row_key_ = f'mag-{year:0>4d}-{month:0>2d}-{day:0>2d}'
state_ = MagState.discovered
task_ = MagTask.not_started
release_date_ = pendulum.datetime(year=year, month=month, day=day)
source_container_ = row_key_
source_container_last_modified_ = pendulum.datetime(year=year, month=month, day=day, hour=1)
release_container_ = ''
release_path_ = ''
discovered_date_ = pendulum.datetime(year=year, month=month, day=day, hour=2)
archived_date_ = min_date
done_date_ = min_date
return MagRelease(partition_key_, row_key_, state_, task_, release_date_, source_container_,
source_container_last_modified_, release_container_, release_path_, discovered_date_,
archived_date_, done_date_, account_name=account_name, account_key=account_key)
class TestMagRelease(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagRelease, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
release = make_mag_release(account_name, secret, 2020, 1, 1)
self.assertIn('account_key=hidden', release.__repr__())
self.assertNotIn(secret, release.__str__())
self.assertNotIn(secret, release.__repr__())
# Check that account_key is None
release = make_mag_release(account_name, None, 2020, 1, 1)
self.assertIn('account_key=None', release.__repr__())
def test_create(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
success = release.create()
self.assertTrue(success)
finally:
release.delete()
def test_delete(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
# Check that we can create and then delete
release.create()
release.delete()
# Check that second delete fails
with self.assertRaises(AzureMissingResourceHttpError):
release.delete()
def test_update(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
release.create()
# Update release
release.state = MagState.archived
release.archived_date = pendulum.utcnow().microsecond_(0)
release.update()
# Verify that release is updated
service = TableService(account_name=self.account_name, account_key=self.account_key)
entity = service.get_entity(MagRelease.TABLE_NAME, release.partition_key, release.row_key)
updated_release = MagRelease.from_entity(entity)
self.assertEqual(release.state, updated_release.state)
self.assertEqual(release.archived_date, updated_release.archived_date)
finally:
release.delete()
def make_containers():
containers = []
cp1 = ContainerProperties()
cp1.name = 'mag-2020-04-17'
cp1.last_modified = pendulum.datetime(year=2020, month=4, day=18)
containers.append(cp1)
cp3 = ContainerProperties()
cp3.name = 'mag-2020-05-01'
cp3.last_modified = pendulum.datetime(year=2020, month=5, day=1)
containers.append(cp3)
cp2 = ContainerProperties()
cp2.name = 'mag-2020-04-24'
cp2.last_modified = pendulum.datetime(year=2020, month=4, day=25)
containers.append(cp2)
return containers
class TestMagArchiverClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagArchiverClient, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
client = MagArchiverClient(account_name=account_name, account_key=secret, sas_token=secret)
expected = f'MagArchiverClient(account_name={account_name}, account_key=hidden, sas_token=hidden)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
self.assertNotIn(secret, client.__str__())
self.assertNotIn(secret, client.__repr__())
# Check that account_key and sas_token are None
client = MagArchiverClient(account_name=account_name)
expected = f'MagArchiverClient(account_name={account_name}, account_key=None, sas_token=None)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_containers(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Test that 2 containers are returned when last_modified_thresh=1
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers_out = client.list_containers(last_modified_thresh=1)
self.assertEqual(len(containers_out), 2)
# Test that 3 containers are returned when last_modified_thresh=0
containers_out = client.list_containers(last_modified_thresh=0)
self.assertEqual(len(containers_out), 3)
# Test sort order reverse=False
self.assertEqual(containers_in[0].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[1].name, containers_out[2].name)
# Test sort order reverse=True
containers_out = client.list_containers(last_modified_thresh=0, reverse=True)
self.assertEqual(len(containers_out), 3)
self.assertEqual(containers_in[1].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[0].name, containers_out[2].name)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_update_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 2)
self.assertEqual(num_errors, 0)
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, hour=1)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 3)
self.assertEqual(num_errors, 0)
# Two releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 2)
# 1 release
start_date = pendulum.datetime(year=2020, month=4, day=17, minute=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 1)
# Three releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1, minute=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release, reverse=False)
self.assertEqual(len(releases), 3)
# Sorting reverse=False
self.assertEqual(releases[0].row_key, '2020-04-17')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-05-01')
# Sorting reverse=True
releases = client.list_releases(start_date=start_date, end_date=end_date,
state=MagState.discovered, date_type=MagDateType.release,
reverse=True)
self.assertEqual(releases[0].row_key, '2020-05-01')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-04-17')
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
```
|
{
"source": "jdddog/nao_hri",
"score": 2
}
|
#### File: nao_hri/scripts/nao_gesture_action_server_node.py
```python
import rospy
import actionlib
from hri_framework import IGestureActionServer, GestureHandle
from nao_hri import NaoNode, Gesture
from hri_msgs.msg import TargetAction, TargetGoal
from threading import Timer, RLock
from nao_hri import AnimationType
from threading import Thread
class NaoGestureHandle(GestureHandle):
def __init__(self, goal_handle, gesture, motion_id=None, client=None):
GestureHandle.__init__(self, goal_handle, gesture)
self.motion_id = motion_id
self.client = client
class NaoGestureActionServer(IGestureActionServer, NaoNode):
def __init__(self):
IGestureActionServer.__init__(self, Gesture)
self.motion_proxy = None
self.lock = RLock()
self.larm_client = actionlib.SimpleActionClient('nao_point_left', TargetAction)
self.larm_gh = None
self.rarm_client = actionlib.SimpleActionClient('nao_point_right', TargetAction)
self.rarm_gh = None
def start(self):
module_name = self.get_instance_name(globals())
NaoNode.__init__(self, module_name)
self.motion_proxy = self.get_proxy('ALMotion')
super(NaoGestureActionServer, self).start()
@staticmethod
def get_actual_duration(times):
maxTime = 0.0
for time in times:
tempMax = max(time)
if tempMax > maxTime:
maxTime = tempMax
return maxTime
def start_gesture(self, goal_handle):
with self.lock:
goal = goal_handle.get_goal()
if self.is_valid_gesture(goal.gesture):
gesture = Gesture[goal.gesture]
if goal.duration == -1:
duration = gesture.default_duration
else:
duration = goal.duration
if gesture.animation_type is AnimationType.Keyframe:
animations = gesture.keyframe_animations()
names = []
times = []
keys = []
durations = []
for a in animations:
durations.append(a.get_end_time())
(n_temp, t_temp, k_temp) = a.get_ntk(duration)
names += n_temp
times += t_temp
keys += k_temp
actual_duration = NaoGestureActionServer.get_actual_duration(times)
motion_id = self.motion_proxy.post.angleInterpolationBezier(names, times, keys)
gesture_handle = NaoGestureHandle(goal_handle, gesture, motion_id=motion_id)
self.add_gesture_handle(gesture_handle)
gesture_handle.start_timer(actual_duration, self.set_succeeded, [goal_handle])
else:
target_goal = TargetGoal()
target_goal.target = goal.target
target_goal.speed = 0.5
target_goal.acceleration = 0.3
if gesture is Gesture.PointLarm:
if self.larm_gh is None:
self.larm_gh = goal_handle
client = self.larm_client
done_cb = self.larm_succeeded
else:
self.set_aborted(goal_handle)
rospy.logwarn('Left arm is already busy performing a gesture, please cancel it first')
return
elif gesture is Gesture.PointRarm:
if self.rarm_gh is None:
self.rarm_gh = goal_handle
client = self.rarm_client
done_cb = self.rarm_succeeded
else:
self.set_aborted(goal_handle)
rospy.logwarn('Right arm is already busy performing a gesture, please cancel it first')
return
gesture_handle = NaoGestureHandle(goal_handle, gesture, client=client)
self.add_gesture_handle(gesture_handle)
if goal.duration == -1:
client.send_goal(target_goal, done_cb=done_cb)
else:
client.send_goal(target_goal)
gesture_handle.start_timer(duration, self.set_succeeded, [goal_handle])
else:
self.set_aborted(goal_handle)
def larm_succeeded(self):
with self.lock:
self.set_succeeded(self.larm_gh)
self.larm_gh = None
def rarm_succeeded(self):
with self.lock:
self.set_succeeded(self.rarm_gh)
self.rarm_gh = None
def larm_cancelled(self):
with self.lock:
self.cancel_gesture(self.larm_gh)
self.larm_gh = None
def rarm_cancelled(self):
with self.lock:
self.cancel_gesture(self.rarm_gh)
self.rarm_gh = None
def cancel_gesture(self, goal_handle):
with self.lock:
gesture_handle = self.get_gesture_handle(goal_handle)
gesture_handle.stop_timer()
if gesture_handle.gesture.animation_type is AnimationType.Keyframe:
self.motion_proxy.stop(gesture_handle.motion_id)
else:
gesture_handle.client.cancel_goal()
if __name__ == "__main__":
rospy.init_node('gesture_action_server')
gesture_server = NaoGestureActionServer()
gesture_server.start()
rospy.spin()
```
#### File: src/nao_hri/joint.py
```python
import rospy
from ros_blender_bridge import Joint
from naoqi_bridge_msgs.msg import JointAnglesWithSpeed
class NaoJoint(Joint):
def __init__(self, joint_name):
Joint.__init__(self, joint_name)
self.msg = JointAnglesWithSpeed()
self.msg.joint_names.append(joint_name)
self.joint_pub = rospy.Publisher('joint_angles', JointAnglesWithSpeed, queue_size=10)
def reset_msg(self):
self.msg.joint_angles = []
self.msg.header.stamp = rospy.Time().now()
def set_position(self, position):
if position not in self.msg.joint_angles:
self.reset_msg()
self.msg.joint_angles.append(position)
self.msg.speed = self.speed
self.joint_pub.publish(self.msg)
def set_speed(self, speed):
self.speed = speed
def set_acceleration(self, acceleration):
pass
```
|
{
"source": "jddeal/python-cmr",
"score": 3
}
|
#### File: python-cmr/cmr/queries.py
```python
try:
from urllib.parse import quote
except ImportError:
from urllib import pathname2url as quote
from datetime import datetime
from inspect import getmembers, ismethod
from re import search
from requests import get, exceptions
CMR_OPS = "https://cmr.earthdata.nasa.gov/search/"
CMR_UAT = "https://cmr.uat.earthdata.nasa.gov/search/"
CMR_SIT = "https://cmr.sit.earthdata.nasa.gov/search/"
class Query(object):
"""
Base class for all CMR queries.
"""
_base_url = ""
_route = ""
_format = "json"
_valid_formats_regex = [
"json", "xml", "echo10", "iso", "iso19115",
"csv", "atom", "kml", "native"
]
def __init__(self, route, mode=CMR_OPS):
self.params = {}
self.options = {}
self._route = route
self.mode(mode)
def get(self, limit=2000):
"""
Get all results up to some limit, even if spanning multiple pages.
:limit: The number of results to return
:returns: query results as a list
"""
page_size = min(limit, 2000)
url = self._build_url()
results = []
page = 1
while len(results) < limit:
response = get(url, params={'page_size': page_size, 'page_num': page})
try:
response.raise_for_status()
except exceptions.HTTPError as ex:
raise RuntimeError(ex.response.text)
if self._format == "json":
latest = response.json()['feed']['entry']
else:
latest = [response.text]
if len(latest) == 0:
break
results.extend(latest)
page += 1
return results
def hits(self):
"""
Returns the number of hits the current query will return. This is done by
making a lightweight query to CMR and inspecting the returned headers.
:returns: number of results reproted by CMR
"""
url = self._build_url()
response = get(url, params={'page_size': 0})
try:
response.raise_for_status()
except exceptions.HTTPError as ex:
raise RuntimeError(ex.response.text)
return int(response.headers["CMR-Hits"])
def get_all(self):
"""
Returns all of the results for the query. This will call hits() first to determine how many
results their are, and then calls get() with that number. This method could take quite
awhile if many requests have to be made.
:returns: query results as a list
"""
return self.get(self.hits())
def parameters(self, **kwargs):
"""
Provide query parameters as keyword arguments. The keyword needs to match the name
of the method, and the value should either be the value or a tuple of values.
Example: parameters(short_name="AST_L1T", point=(42.5, -101.25))
:returns: Query instance
"""
# build a dictionary of method names and their reference
methods = {}
for name, func in getmembers(self, predicate=ismethod):
methods[name] = func
for key, val in kwargs.items():
# verify the key matches one of our methods
if key not in methods:
raise ValueError("Unknown key {}".format(key))
# call the method
if isinstance(val, tuple):
methods[key](*val)
else:
methods[key](val)
return self
def format(self, output_format="json"):
"""
Sets the format for the returned results.
:param output_format: Preferred output format
:returns: Query instance
"""
if not output_format:
output_format = "json"
# check requested format against the valid format regex's
for _format in self._valid_formats_regex:
if search(_format, output_format):
self._format = output_format
return self
# if we got here, we didn't find a matching format
raise ValueError("Unsupported format '{}'".format(output_format))
def online_only(self, online_only=True):
"""
Only match granules that are listed online and not available for download.
The opposite of this method is downloadable().
:param online_only: True to require granules only be online
:returns: Query instance
"""
if not isinstance(online_only, bool):
raise TypeError("Online_only must be of type bool")
# remove the inverse flag so CMR doesn't crash
if "downloadable" in self.params:
del self.params["downloadable"]
self.params['online_only'] = online_only
return self
def temporal(self, date_from, date_to, exclude_boundary=False):
"""
Filter by an open or closed date range.
Dates can be provided as a datetime objects or ISO 8601 formatted strings. Multiple
ranges can be provided by successive calls to this method before calling execute().
:param date_from: earliest date of temporal range
:param date_to: latest date of temporal range
:param exclude_boundary: whether or not to exclude the date_from/to in the matched range
:returns: GranueQuery instance
"""
iso_8601 = "%Y-%m-%dT%H:%M:%SZ"
# process each date into a datetime object
def convert_to_string(date):
"""
Returns the argument as an ISO 8601 or empty string.
"""
if not date:
return ""
try:
# see if it's datetime-like
return date.strftime(iso_8601)
except AttributeError:
try:
# maybe it already is an ISO 8601 string
datetime.strptime(date, iso_8601)
return date
except TypeError:
raise ValueError(
"Please provide None, datetime objects, or ISO 8601 formatted strings."
)
date_from = convert_to_string(date_from)
date_to = convert_to_string(date_to)
# if we have both dates, make sure from isn't later than to
if date_from and date_to:
if date_from > date_to:
raise ValueError("date_from must be earlier than date_to.")
# good to go, make sure we have a param list
if "temporal" not in self.params:
self.params["temporal"] = []
self.params["temporal"].append("{},{}".format(date_from, date_to))
if exclude_boundary:
self.options["temporal"] = {
"exclude_boundary": True
}
return self
def short_name(self, short_name):
"""
Filter by short name (aka product or collection name).
:param short_name: name of collection
:returns: Query instance
"""
if not short_name:
return self
self.params['short_name'] = short_name
return self
def version(self, version):
"""
Filter by version. Note that CMR defines this as a string. For example,
MODIS version 6 products must be searched for with "006".
:param version: version string
:returns: Query instance
"""
if not version:
return self
self.params['version'] = version
return self
def point(self, lon, lat):
"""
Filter by granules that include a geographic point.
:param lon: longitude of geographic point
:param lat: latitude of geographic point
:returns: Query instance
"""
if not lat or not lon:
return self
# coordinates must be a float
lon = float(lon)
lat = float(lat)
self.params['point'] = "{},{}".format(lon, lat)
return self
def polygon(self, coordinates):
"""
Filter by granules that overlap a polygonal area. Must be used in combination with a
collection filtering parameter such as short_name or entry_title.
:param coordinates: list of (lon, lat) tuples
:returns: Query instance
"""
if not coordinates:
return self
# make sure we were passed something iterable
try:
iter(coordinates)
except TypeError:
raise ValueError("A line must be an iterable of coordinate tuples. Ex: [(90,90), (91, 90), ...]")
# polygon requires at least 4 pairs of coordinates
if len(coordinates) < 4:
raise ValueError("A polygon requires at least 4 pairs of coordinates.")
# convert to floats
as_floats = []
for lon, lat in coordinates:
as_floats.extend([float(lon), float(lat)])
# last point must match first point to complete polygon
if as_floats[0] != as_floats[-2] or as_floats[1] != as_floats[-1]:
raise ValueError("Coordinates of the last pair must match the first pair.")
# convert to strings
as_strs = [str(val) for val in as_floats]
self.params["polygon"] = ",".join(as_strs)
return self
def bounding_box(self, lower_left_lon, lower_left_lat, upper_right_lon, upper_right_lat):
"""
Filter by granules that overlap a bounding box. Must be used in combination with
a collection filtering parameter such as short_name or entry_title.
:param lower_left_lon: lower left longitude of the box
:param lower_left_lat: lower left latitude of the box
:param upper_right_lon: upper right longitude of the box
:param upper_right_lat: upper right latitude of the box
:returns: Query instance
"""
self.params["bounding_box"] = "{},{},{},{}".format(
float(lower_left_lon),
float(lower_left_lat),
float(upper_right_lon),
float(upper_right_lat)
)
return self
def line(self, coordinates):
"""
Filter by granules that overlap a series of connected points. Must be used in combination
with a collection filtering parameter such as short_name or entry_title.
:param coordinates: a list of (lon, lat) tuples
:returns: Query instance
"""
if not coordinates:
return self
# make sure we were passed something iterable
try:
iter(coordinates)
except TypeError:
raise ValueError("A line must be an iterable of coordinate tuples. Ex: [(90,90), (91, 90), ...]")
# need at least 2 pairs of coordinates
if len(coordinates) < 2:
raise ValueError("A line requires at least 2 pairs of coordinates.")
# make sure they're all floats
as_floats = []
for lon, lat in coordinates:
as_floats.extend([float(lon), float(lat)])
# cast back to string for join
as_strs = [str(val) for val in as_floats]
self.params["line"] = ",".join(as_strs)
return self
def downloadable(self, downloadable=True):
"""
Only match granules that are available for download. The opposite of this
method is online_only().
:param downloadable: True to require granules be downloadable
:returns: Query instance
"""
if not isinstance(downloadable, bool):
raise TypeError("Downloadable must be of type bool")
# remove the inverse flag so CMR doesn't crash
if "online_only" in self.params:
del self.params["online_only"]
self.params['downloadable'] = downloadable
return self
def entry_title(self, entry_title):
"""
Filter by the collection entry title.
:param entry_title: Entry title of the collection
:returns: Query instance
"""
entry_title = quote(entry_title)
self.params['entry_title'] = entry_title
return self
def _build_url(self):
"""
Builds the URL that will be used to query CMR.
:returns: the url as a string
"""
# last chance validation for parameters
if not self._valid_state():
raise RuntimeError(("Spatial parameters must be accompanied by a collection "
"filter (ex: short_name or entry_title)."))
# encode params
formatted_params = []
for key, val in self.params.items():
# list params require slightly different formatting
if isinstance(val, list):
for list_val in val:
formatted_params.append("{}[]={}".format(key, list_val))
elif isinstance(val, bool):
formatted_params.append("{}={}".format(key, str(val).lower()))
else:
formatted_params.append("{}={}".format(key, val))
params_as_string = "&".join(formatted_params)
# encode options
formatted_options = []
for param_key in self.options:
for option_key, val in self.options[param_key].items():
# all CMR options must be booleans
if not isinstance(val, bool):
raise ValueError("parameter '{}' with option '{}' must be a boolean".format(
param_key,
option_key
))
formatted_options.append("options[{}][{}]={}".format(
param_key,
option_key,
val
))
options_as_string = "&".join(formatted_options)
res = "{}.{}?{}&{}".format(
self._base_url,
self._format,
params_as_string,
options_as_string
)
res = res.rstrip('&')
return res
def _valid_state(self):
"""
Determines if the Query is in a valid state based on the parameters and options
that have been set. This should be implemented by the subclasses.
:returns: True if the state is valid, otherwise False
"""
raise NotImplementedError()
def mode(self, mode=CMR_OPS):
"""
Sets the mode of the api target to the given URL
CMR_OPS, CMR_UAT, CMR_SIT are provided as class variables
:param mode: Mode to set the query to target
:throws: Will throw if provided None
"""
if mode is None:
raise ValueError("Please provide a valid mode (CMR_OPS, CMR_UAT, CMR_SIT)")
self._base_url = str(mode) + self._route
class GranuleQuery(Query):
"""
Class for querying granules from the CMR.
"""
def __init__(self, mode=CMR_OPS):
Query.__init__(self, "granules", mode)
def orbit_number(self, orbit1, orbit2=None):
""""
Filter by the orbit number the granule was acquired during. Either a single
orbit can be targeted or a range of orbits.
:param orbit1: orbit to target (lower limit of range when orbit2 is provided)
:param orbit2: upper limit of range
:returns: Query instance
"""
if orbit2:
self.params['orbit_number'] = quote('{},{}'.format(str(orbit1), str(orbit2)))
else:
self.params['orbit_number'] = orbit1
return self
def day_night_flag(self, day_night_flag):
"""
Filter by period of the day the granule was collected during.
:param day_night_flag: "day", "night", or "unspecified"
:returns: Query instance
"""
if not isinstance(day_night_flag, str):
raise TypeError("day_night_flag must be of type str.")
day_night_flag = day_night_flag.lower()
if day_night_flag not in ['day', 'night', 'unspecified']:
raise ValueError("day_night_flag must be day, night or unspecified.")
self.params['day_night_flag'] = day_night_flag
return self
def cloud_cover(self, min_cover=0, max_cover=100):
"""
Filter by the percentage of cloud cover present in the granule.
:param min_cover: minimum percentage of cloud cover
:param max_cover: maximum percentage of cloud cover
:returns: Query instance
"""
if not min_cover and not max_cover:
raise ValueError("Please provide at least min_cover, max_cover or both")
if min_cover and max_cover:
try:
minimum = float(min_cover)
maxiumum = float(max_cover)
if minimum > maxiumum:
raise ValueError("Please ensure min_cloud_cover is lower than max cloud cover")
except ValueError:
raise ValueError("Please ensure min_cover and max_cover are both floats")
self.params['cloud_cover'] = "{},{}".format(min_cover, max_cover)
return self
def instrument(self, instrument=""):
"""
Filter by the instrument associated with the granule.
:param instrument: name of the instrument
:returns: Query instance
"""
if not instrument:
raise ValueError("Please provide a value for instrument")
self.params['instrument'] = instrument
return self
def platform(self, platform=""):
"""
Filter by the satellite platform the granule came from.
:param platform: name of the satellite
:returns: Query instance
"""
if not platform:
raise ValueError("Please provide a value for platform")
self.params['platform'] = platform
return self
def granule_ur(self, granule_ur=""):
"""
Filter by the granules unique ID. Note this will result in at most one granule
being returned.
:param granule_ur: UUID of a granule
:returns: Query instance
"""
if not granule_ur:
raise ValueError("Please provide a value for platform")
self.params['granule_ur'] = granule_ur
return self
def concept_id(self, IDs):
"""
Filter by concept ID (ex: C1299783579-LPDAAC_ECS or G1327299284-LPDAAC_ECS)
Collections and granules are uniquely identified with this ID. If providing a collection's concept ID
here, it will filter by granules associated with that collection. If providing a granule's concept ID
here, it will uniquely identify those granules.
:param IDs: concept ID(s) to search by. Can be provided as a string or list of strings.
:returns: Query instance
"""
if isinstance(IDs, str):
IDs = [IDs]
self.params["concept_id"] = IDs
return self
def _valid_state(self):
# spatial params must be paired with a collection limiting parameter
spatial_keys = ["point", "polygon", "bounding_box", "line"]
collection_keys = ["short_name", "entry_title"]
if any(key in self.params for key in spatial_keys):
if not any(key in self.params for key in collection_keys):
return False
# all good then
return True
class CollectionQuery(Query):
"""
Class for querying collections from the CMR.
"""
def __init__(self, mode=CMR_OPS):
Query.__init__(self, "collections", mode)
self._valid_formats_regex.extend([
"dif", "dif10", "opendata", "umm_json", "umm_json_v[0-9]_[0-9]"
])
def archive_center(self, center):
"""
Filter by the archive center that maintains the collection.
:param archive_center: name of center as a string
:returns: Query instance
"""
if center:
self.params['archive_center'] = center
return self
def keyword(self, text):
"""
Case insentive and wildcard (*) search through over two dozen fields in
a CMR collection record. This allows for searching against fields like
summary and science keywords.
:param text: text to search for
:returns: Query instance
"""
if text:
self.params['keyword'] = text
return self
def concept_id(self, IDs):
"""
Filter by concept ID (ex: C1299783579-LPDAAC_ECS)
Collections are uniquely identified with this ID.
:param IDs: concept ID(s) to search by. Can be provided as a string or list of strings.
:returns: Query instance
"""
if isinstance(IDs, str):
IDs = [IDs]
# verify we weren't provided any granule concept IDs
for ID in IDs:
if ID.strip()[0] != "C":
raise ValueError("Only collection concept ID's can be provided (begin with 'C'): {}".format(ID))
self.params["concept_id"] = IDs
return self
def _valid_state(self):
return True
```
|
{
"source": "jddelalamo/Google-Drive-Sync",
"score": 3
}
|
#### File: Google-Drive-Sync/testing/file_object.py
```python
class File:
def __init__(self, file_id, fileName, parentFolder, dateModified, mimeType, filePath):
self.__file_id = file_id
self.__fileName = fileName
self.__parentFolder = parentFolder
self.__dateModified = dateModified
self.__mimeType = mimeType
self.__filePath = filePath
def get_file_id(self):
return self.__file_id
def get_fileName(self):
return self.__fileName
def get_parentFolder(self):
return self.__parentFolder
def get_dateModified(self):
return self.__dateModified
def get_mimeType(self):
return self.__mimeType
def get_filePath(self):
return self.__filePath
```
|
{
"source": "jddes/Frequency-comb-DPLL",
"score": 2
}
|
#### File: Frequency-comb-DPLL/digital_servo_python_gui/XEM_GUI_MainWindow.py
```python
from __future__ import print_function
import time
from PyQt5 import QtGui, Qt
#import PyQt5.Qwt5 as Qwt
import numpy as np
import math
from scipy.signal import lfilter
from scipy.signal import decimate
from scipy.signal import detrend
# For make_sure_path_exists() and os.rename()
import os
import errno
#from SuperLaserLand_JD2 import SuperLaserLand_JD2
from LoopFiltersUI import LoopFiltersUI
from DisplayVNAWindow import DisplayVNAWindow
from LoopFiltersUI_DAC1_and_DAC2 import LoopFiltersUI_DAC1_and_DAC2
from DisplayDitherSettingsWindow import DisplayDitherSettingsWindow
#from DisplayCrashMonitorWindow import DisplayCrashMonitorWindow
#from ILX_laser_control import ILX_laser_control
#from PyDAQmx_single_1 import NIDAQ_USB
#from NIUSB_DAQ import Instrument
from user_friendly_QLineEdit import user_friendly_QLineEdit
import SpectrumWidget
#import matplotlib.pyplot as plt
import traceback
# stuff for Python 3 port
import pyqtgraph as pg
import RP_PLL # for CommsError
from SocketErrorLogger import logCommsErrorsAndBreakoutOfFunction
import logging
def round_to_N_sig_figs(x, Nsigfigs):
leading_pos = np.floor(np.log10(np.abs(x)))
factor = 10**((Nsigfigs-1)-leading_pos)
return np.round(x * factor)/factor
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
class XEM_GUI_MainWindow(QtGui.QWidget):
display_phase = 0 # used to refresh the phase noise plot only once every N refresh cycles
VCO_detected_gain_in_Hz_per_Volts = [1, 1, 1]
bFirstTimeLockCheckBoxClicked = True
# def __init__(self):
# super(XEM_GUI_MainWindow, self).__init__()
#
# self.bDisplayTiming = False # Activate to turn a lot of timing print()s
# self.output_controls = (True, True, True)
# self.initUI()
def __init__(self, sl, strTitle, selected_ADC, output_controls, sp, custom_style_sheet, strFGPASerialNumber):
super(XEM_GUI_MainWindow, self).__init__()
self.strTitle = strTitle
self.sl = sl
self.sp = sp # Holds the system parameters (configuration values)
self.bDisplayTiming = False # Activate to turn a lot of timing print()s
self.selected_ADC = selected_ADC
self.output_controls = output_controls
self.setObjectName('MainWindow')
self.setStyleSheet(custom_style_sheet)
self.strFGPASerialNumber = strFGPASerialNumber
self.logger = logging.getLogger(__name__)
self.logger_name = ':XEM_GUI_MainWindow'
self.timerIDDither = None
self.timerID = 0
# For the crash monitor
self.crash_number = 0
self.crash_windows = []
self.crash_windows_opening_times = []
self.bAveragePhaseNoise = True
self.bAveragePhaseNoiseLast = False
self.N_spc_average = 10.
# For the residuals streaming:
# Only one window takes care of reading both the CEO and optical residuals
if self.selected_ADC == 0:
strFolder = 'c:\\SuperLaserLandLogs\\ResidualsStreaming'
self.make_sure_path_exists(strFolder)
self.word_counter = 0
self.foutput_residuals = open('%s\\residuals_ceo_%s.bin' % (strFolder, self.strFGPASerialNumber), 'wb')
self.foutput_residuals2 = open('%s\\residuals_optical_%s.bin' % (strFolder, self.strFGPASerialNumber), 'wb')
self.foutput_residuals_time = open('%s\\residuals_time_%s.bin' % (strFolder, self.strFGPASerialNumber), 'wb', 0) # the 0 means un-buffered writes
self.initUI()
def getValues(self):
self.bFirstTimeLockCheckBoxClicked = False
self.getVCOGain()
self.spectrum.getDACoffset()
self.getVCOFreq()
self.qloop_filters[self.selected_ADC].getValues() # We should get qloop_filters.kc before (done in getVCOGain)
self.setLock()
self.timerIDDither = Qt.QTimer(self)
self.timerIDDither.timeout.connect(self.timerDitherEvent)
self.startTimers()
self.displayDAC() # This populates the current DAC values with the actual value
self.qchk_refresh.setChecked(False)
self.refreshChk_event()
def pushActualValues(self):
print("Push actual values of MainWindow, TODO")
def pushDefaultValues(self):
# print("XEM_GUI_MainWindow::pushDefaultValues()")
#For now, equivalent to calling initSL()
self.loadParameters()
# Send values to FPGA
self.setVCOFreq_event()
self.setVCOGain_event()
self.chkLockClickedEvent()
self.timerIDDither = Qt.QTimer(self)
self.timerIDDither.timeout.connect(self.timerDitherEvent)
self.startTimers()
self.displayDAC() # This populates the current DAC values with the actual value
# print("XEM_GUI_MainWindow::pushDefaultValues(): after displayDAC")
if self.output_controls[0] == True:
self.slowStart100VSwitchingSupply()
@logCommsErrorsAndBreakoutOfFunction()
def slowStart100VSwitchingSupply(self):
# need to set the switching supply to its default values:
# do a slow start over ~ 100 ms.
f_switching = 200e3
Vtarget = 100.
Vsupply = 30.
T_slow_start = 100e-3
target_duty_cycle = (Vtarget-Vsupply)/Vtarget
oscillator_modulus = int(round( self.sl.fs/f_switching ))
print("slowStart100VSwitchingSupply(): starting")
N_steps = 10
for k in range(int(N_steps)+1):
# print("slowStart100VSwitchingSupply(): here")
current_duty_cycle = float(k)/N_steps * target_duty_cycle
# print("slowStart100VSwitchingSupply(): here2")
oscillator_modulus_active = int(round( oscillator_modulus * current_duty_cycle ))
# print("slowStart100VSwitchingSupply(): here3")
self.sl.setTestOscillator(bEnable=1, bPolarity=1, oscillator_modulus=oscillator_modulus, oscillator_modulus_active=oscillator_modulus_active)
# try:
# self.sl.setTestOscillator(bEnable=1, bPolarity=1, oscillator_modulus=oscillator_modulus, oscillator_modulus_active=oscillator_modulus_active)
# except RP_PLL.CommsError:
# break
time.sleep(T_slow_start/N_steps)
print("slowStart100VSwitchingSupply(): finished")
def killTimers(self):
# print("XEM_GUI_MainWindow::killTimers(): %s" % self.strTitle)
#traceback.print_stack()
if self.timerIDDither is not None:
self.timerIDDither.stop()
if self.qchk_refresh.isChecked():
self.qchk_refresh.setChecked(False)
self.refreshChk_event()
def startTimers(self):
# print("XEM_GUI_MainWindow::startTimers(): %s" % self.strTitle)
# Need to init timerID
self.timerID = 0
# Start the timer which reads the dither:
if self.timerIDDither is not None:
self.timerIDDither.start(100) # 100 ms readout delay, increased to 1000 ms for debugging
def getVCOGainFromUI(self, output_number):
try:
VCO_gain_in_Hz_per_Volts = float(self.qedit_vco_gain[output_number].text())
except:
VCO_gain_in_Hz_per_Volts = 1e9
return VCO_gain_in_Hz_per_Volts
@logCommsErrorsAndBreakoutOfFunction()
def setVCOGain_event(self):
# Update the loop filters gain settings based on the new VCO gains:
# Also set the scale on the manual output sliders (and the steps)
# We want the user to be able to easily control the beat frequency with the mousewheel.
# (mousewheel scroll: 3 small steps or arrow keys: 1 small step)
# We want each mousewheel step to be about 0.5 MHz,
# large steps (clicking in the open area of the scrollbar) to be about 5 MHz
for k in range(3):
if self.output_controls[k]:
VCO_gain_in_Hz_per_Volts = self.getVCOGainFromUI(k)
# getFreqDiscriminatorGain is in DDC Counts/Hz
# getDACGainInVoltsPerCounts is in V/(DAC Counts)
VCO_gain_in_counts_per_counts = VCO_gain_in_Hz_per_Volts * self.sl.getFreqDiscriminatorGain() * self.sl.getDACGainInVoltsPerCounts(k) #.sl.getFreqDiscriminatorGain() and self.sl.getDACGainInVoltsPerCounts(k) are constant (different for each k)
if k == 0 or k == 1:
self.qloop_filters[k].kc = VCO_gain_in_counts_per_counts
self.qloop_filters[k].checkFirmwareLimits()
self.qloop_filters[k].updateFilterSettings()
self.qloop_filters[k].updateGraph()
elif k == 2:
# DAC 2 loop settings are controlled by the same widget as DAC1
self.qloop_filters[1].kc_dac2 = VCO_gain_in_counts_per_counts
self.qloop_filters[1].checkFirmwareLimits()
self.qloop_filters[1].updateFilterSettings()
self.qloop_filters[1].updateGraph()
self.sl.save_openLoop_gain(k, VCO_gain_in_counts_per_counts) #Save the value of the open-loop gain in the FPGA to allow reconnection (usefull to read Loop-Filter gain value)
self.spectrum.setSliderStepSize(k, VCO_gain_in_Hz_per_Volts)
# This function needs the VCO gain to compute the control effort so we have to update it if we have changed.
self.spectrum.setDACOffset_event()
@logCommsErrorsAndBreakoutOfFunction()
def getVCOGain(self):
if self.selected_ADC == 0:
dac_list = [0]
elif self.selected_ADC == 1:
dac_list = [1, 2]
for k in dac_list:
# if self.output_controls[k]:
VCO_gain_in_counts_per_counts = self.sl.get_openLoop_gain(k)
# print("k = %d, VCO_gain_in_counts_per_counts=%f" % (k, VCO_gain_in_counts_per_counts))
VCO_gain_in_Hz_per_Volts = VCO_gain_in_counts_per_counts / (self.sl.getFreqDiscriminatorGain() * self.sl.getDACGainInVoltsPerCounts(k))
# print("k = %d, VCO_gain_in_Hz_per_Volts=%f" % (k, VCO_gain_in_Hz_per_Volts))
# prevent divide-by-0 bug:
if VCO_gain_in_Hz_per_Volts == 0:
VCO_gain_in_Hz_per_Volts = 1.
self.qedit_vco_gain[k].blockSignals(True)
self.qedit_vco_gain[k].setText('{:.1e}'.format(VCO_gain_in_Hz_per_Volts))
self.qedit_vco_gain[k].blockSignals(False)
if k == 0 or k == 1:
self.qloop_filters[k].kc = VCO_gain_in_counts_per_counts
self.qloop_filters[k].checkFirmwareLimits()
self.qloop_filters[k].updateGraph()
elif k == 2:
# DAC 2 loop settings are controlled by the same widget as DAC1
self.qloop_filters[1].kc_dac2 = VCO_gain_in_counts_per_counts
self.qloop_filters[1].checkFirmwareLimits()
self.qloop_filters[1].updateGraph()
self.spectrum.setSliderStepSize(k, VCO_gain_in_Hz_per_Volts)
@logCommsErrorsAndBreakoutOfFunction()
def setVCOFreq_event(self, checked=False):
# print("setVCOFreq_event: self.selected_ADC = %d" % self.selected_ADC)
try:
frequency_in_hz = float(self.qedit_ref_freq.text())
except:
frequency_in_hz = 5e6
# If the VCO has positive sign, we need to put a negative reference frequency to make the
# total loop sign be negative so that it's stable when we close the loop
if self.qsign_positive.isChecked():
frequency_in_hz =-frequency_in_hz
#print('frequency_in_hz = %e' % frequency_in_hz)
if self.selected_ADC == 0:
self.sl.set_ddc0_ref_freq(frequency_in_hz)
elif self.selected_ADC == 1:
self.sl.set_ddc1_ref_freq(frequency_in_hz)
#print('frequency_in_hz = %e (after)' % frequency_in_hz)
@logCommsErrorsAndBreakoutOfFunction()
def getVCOFreq(self):
if self.selected_ADC == 0:
frequency_in_hz = self.sl.get_ddc0_ref_freq_from_RAM()
elif self.selected_ADC == 1:
frequency_in_hz = self.sl.get_ddc1_ref_freq_from_RAM()
# If the VCO has positive sign, we need to put a negative reference frequency to make the
# total loop sign be negative so that it's stable when we close the loop
if frequency_in_hz < 0:
self.qsign_positive.setChecked(True)
else:
self.qsign_negative.setChecked(True)
self.qedit_ref_freq.blockSignals(True)
self.qedit_ref_freq.setText('%.2e' % abs(frequency_in_hz))
self.qedit_ref_freq.blockSignals(False)
def refreshChk_event(self):
if self.qchk_refresh.isChecked():
# We are doing a not running->running transition
try:
# if True:
# print('self.qedit_timerdelay.text() = %s' % self.qedit_timerdelay.text())
timer_delay = float(self.qedit_timerdelay.text())
except:
# else:
timer_delay = 1000
# print('Timer delay = %d ms' % timer_delay)
if self.timerID != 0:
self.killTimer(self.timerID)
self.timerID = self.startTimer(int(round(timer_delay)))
self.timerEvent(0) # run the event handler once right away, makes the checkbox feel more responsive
# print('Starting timer')
else:
# We are doing a running->not running transition
if self.timerID != 0:
self.killTimer(self.timerID)
self.timerID = 0
# print('Stopping timer')
def exportData(self):
# First need to create a unique file name template (with good probability)
# We simply use the system date and time, and hope that this function doesn't get called twice in a second
strNameTemplate = time.strftime("data_export\\%m_%d_%Y_%H_%M_%S_")
# Data to write:
# self.inst_freq
# self.freq_noise_psd
# self.freq_noise_axis
# self.raw_adc_samples
# Create the subdirectory if it doesn't exist:
self.make_sure_path_exists('data_export')
# Open files for output, write raw data
# if True:
try:
strCurrentName = strNameTemplate + 'raw_adc_samples.bin'
f = open(strCurrentName, 'wb')
f.write(self.raw_adc_samples)
f.close()
except:
pass
try:
strCurrentName = strNameTemplate + 'inst_freq.bin'
f = open(strCurrentName, 'wb')
f.write(self.inst_freq)
f.close()
except:
pass
try:
strCurrentName = strNameTemplate + 'freq_noise_psd.bin'
f = open(strCurrentName, 'wb')
f.write(self.freq_noise_psd)
f.close()
except:
pass
try:
strCurrentName = strNameTemplate + 'freq_noise_axis.bin'
f = open(strCurrentName, 'wb')
f.write(self.freq_noise_axis)
f.close()
except:
pass
def showVNA(self):
self.vna = DisplayVNAWindow(self.sl)
def grabAndExportData(self, bSyncReadOnNextTimeQuantization=True):
start_time = time.perf_counter()
print('Grabbing and exporting data')
# Check if another function is currently using the DDR2 logger:
if self.sl.bDDR2InUse:
print('grabAndExportData(): DDR2 logger in use, cannot get data from adc')
return
# Ask which input to use:
currentSelector, ok = QtGui.QInputDialog.getItem(self, 'Raw data export',
'Select the data source:', ('ADC0', 'ADC1', 'DAC0', 'DAC1', 'DAC2'))
if not ok:
return
currentSelector = str(currentSelector)
# Ask how many points:
N_points_str, ok = QtGui.QInputDialog.getText(self, 'Raw data export',
'Enter the number of points desired [1, 32768]:', Qt.QLineEdit.Normal, '32768')
if not ok:
return
# Block access to the DDR2 Logger to any other function until we are done:
self.sl.bDDR2InUse = True
try:
N_points = int(float(N_points_str))
except:
N_points = 4e3
if N_points < 64:
N_points = 64
try:
# Read from selected source
print("currentSelector = %s" % currentSelector)
self.sl.setup_write(self.sl.LOGGER_MUX[currentSelector], N_points)
##################################################
# Synchronize trigger as best as possible to the next multiple of time_quantum seconds:
if bSyncReadOnNextTimeQuantization:
time_quantum = 0.01
time_now = time.time()
time_target = np.ceil(time_now/time_quantum) * time_quantum
print('time_now = %f, time_target = %f' % (time_now, time_target))
while time_target > time_now:
time.sleep(1e-3)
time_now = time.time()
self.sl.trigger_write()
if bSyncReadOnNextTimeQuantization:
print('time_now = %f, time_target = %f' % (time_now, time_target))
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
samples_out = samples_out.astype(dtype=np.float)/2**15
except:
# ADC read failed.
print('Unhandled exception in ADC read')
# del self.sl
# raise
# Signal to other functions that they can use the DDR2 logger
self.sl.bDDR2InUse = False
print('Elapsed time (Comm) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
# Write the data to disk:
strNameTemplate = time.strftime("data_export\\%m_%d_%Y_%H_%M_%S_")
self.make_sure_path_exists('data_export')
# Open files for output, write raw data
try:
strCurrentName = strNameTemplate + self.strFGPASerialNumber + '_raw_adc_samples.bin'
f = open(strCurrentName, 'wb')
f.write(samples_out)
f.close()
except:
pass
print('Elapsed time (write to disk) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
def setLock(self):
bLock = self.qloop_filters[self.selected_ADC].qchk_lock.isChecked()
self.qchk_lock.setChecked(bLock)
if bLock:
#We are reconnecting to a RP which has a locked loop filter
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
else:
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
@logCommsErrorsAndBreakoutOfFunction()
def chkLockClickedEvent(self, checked=False):
bLock = self.qchk_lock.isChecked()
if bLock:
# we are doing an unlocked->locked transition.
# We first check if the detected VCO gain seems right:
if self.sl.dither_enable[self.selected_ADC]:
# check if gain is OK
try:
VCO_gain_in_Hz_per_Volts = float(self.qedit_vco_gain[self.selected_ADC].text())
except:
VCO_gain_in_Hz_per_Volts = 1e9
# First check if sign is right:
if np.sign(self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]) != np.sign(VCO_gain_in_Hz_per_Volts):
# Display warning message.
reply = QtGui.QMessageBox.question(self, 'Warning',
"The detected VCO gain is negative. This will most likely make the loop unstable. This is either caused by trying to lock to an incorrect sideband, or an incorrect setting of the VCO sign in the UI. Do you want to turn on the lock anyway?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
# Exit early
self.qchk_lock.setChecked(False)
return
print('Warning about the loop sign ignored.')
else:
print('Gain sign OK')
# Now we check if the magnitude of the entered VCO gain and the detected gain agree within some tolerance:
if self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]/VCO_gain_in_Hz_per_Volts > 1.5 or self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]/VCO_gain_in_Hz_per_Volts < 1/1.5:
# Display warning message.
reply = QtGui.QMessageBox.question(self, 'Warning',
"The detected VCO gain (%.2e Hz/V) has a significantly different magnitude than the entered value used for designing the controller (%.2e Hz/V). This may make the loop unstable. Do you want to turn on the lock anyway?" % (self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC], VCO_gain_in_Hz_per_Volts),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
# Exit early
self.qchk_lock.setChecked(False)
return
print('Warning about the loop gain ignored.')
else:
print('Gain magnitude OK')
# If we get here that means either that all the parameters have passed the checks, or the dither was off.
# Turn the dither off if the dither mode is automatic:
if self.selected_ADC == 0:
if self.sl.dither_mode_auto[0] == 1:
# automatic mode
self.sl.setDitherLockInState(0, False)
else:
# Optical lock: we have two dithers to take care of:
if self.sl.dither_mode_auto[1] == 1:
# automatic mode
self.sl.setDitherLockInState(1, False)
# if self.sl.dither_mode_auto[2] == 1:
# # automatic mode
# self.sl.setDitherLockInState(2, False)
self.logger.info('Red_Pitaya_GUI{}: Lock'.format(self.logger_name))
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
# Turn the lock on
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(True)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
# Lock procedure if there is no 3rd DAC on the Red Pitaya:
# self.qloop_filters[1].qchk_lock.setChecked(True)
# self.qloop_filters[1].updateFilterSettings()
# There is a different procedure for turning the lock on on the optical loop:
# first we grab the beat using the DAC2 frequency-locked loop. then we set this integrator to hold
# and switch to the DAC1 PLL + DAC2 second integrator.
self.qloop_filters[1].qradio_mode_off.setChecked(False)
self.qloop_filters[1].qradio_mode_slow.setChecked(True)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(False)
self.qloop_filters[1].updateSettings()
# Wait for the integrator to grab on to the beat
time.sleep(0.2)
# Turn on the full-blown PLL
self.qloop_filters[1].qradio_mode_off.setChecked(False)
self.qloop_filters[1].qradio_mode_slow.setChecked(False)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(True)
self.qloop_filters[1].updateSettings()
else: # bLock = False
if not self.sl.output_vco[self.selected_ADC]:
if not self.bFirstTimeLockCheckBoxClicked:
# We are doing a locked->unlocked transition
# 1. Smoothly ramp the manual dac offsets to where the lock has decided to sit:
# This is to prevent any violent step on the actuator when we turn off the lock:
# It also prevents mode changes (the laser should stay fairly close to when it was locked.
if self.selected_ADC == 0:
# Go and measure the current DAC DC value:
N_points = 10e3
self.sl.setup_DAC0_write(N_points)
self.sl.trigger_write()
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
# print(np.mean(samples_out))
current_dac_offset_in_counts = np.mean(samples_out)
kDAC = 0
elif self.selected_ADC == 1:
N_points = 10e3
self.sl.setup_DAC1_write(N_points)
self.sl.trigger_write()
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
# print(np.mean(samples_out))
current_dac_offset_in_counts = np.mean(samples_out)
kDAC = 1
# Read the current manual offset value:
current_manual_offset_in_slider_units = float(self.spectrum.q_dac_offset[kDAC].value())
# Convert the DAC DC offset to the slider units:
current_dac_offset_in_slider_units = float(current_dac_offset_in_counts - self.sl.DACs_limit_low[kDAC])/float(self.sl.DACs_limit_high[kDAC] - self.sl.DACs_limit_low[kDAC])*1e6
# Set up a ramp with 20 steps:
desired_ramp = np.linspace(current_manual_offset_in_slider_units, current_dac_offset_in_slider_units, 20)
# print('ramping from %d to %d in slider units' % (current_manual_offset_in_slider_units, current_dac_offset_in_slider_units))
Total_ramp_time = 0.1
for k2 in range(len(desired_ramp)):
# print('set slider to %d' % desired_ramp[k2])
self.spectrum.q_dac_offset[kDAC].setValue(desired_ramp[k2])
self.spectrum.setDACOffset_event()
time.sleep(float(Total_ramp_time)/len(desired_ramp))
# 2. turn the lock off
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(False)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
# Unlock procedure for when there is no 3rd DAC on the Red Pitaya
# self.qloop_filters[1].qchk_lock.setChecked(False)
# self.qloop_filters[1].updateFilterSettings()
# There is a different procedure for turning the lock on on the optical loop:
# first we grab the beat using the DAC2 frequency-locked loop. then we set this integrator to hold
# and switch to the DAC1 PLL + DAC2 second integrator.
self.qloop_filters[1].qradio_mode_off.setChecked(True)
self.qloop_filters[1].qradio_mode_slow.setChecked(False)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(False)
self.qloop_filters[1].updateSettings()
else:
# if the VCO is activated, we don't want to try to estimate the output offset, we just turn off the lock directly
# 2. turn the lock off
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(False)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
self.qloop_filters[1].qchk_lock.setChecked(False)
self.qloop_filters[1].updateFilterSettings()
# 3. Turn the dither on if the dither mode is automatic:
if self.selected_ADC == 0:
if self.sl.dither_mode_auto[0] == 1:
# automatic mode
self.sl.setDitherLockInState(0, True)
else:
# Optical lock: we have two dithers to take care of:
if self.sl.dither_mode_auto[1] == 1:
# automatic mode
self.sl.setDitherLockInState(1, True)
# if self.sl.dither_mode_auto[2] == 1:
# # automatic mode
# self.sl.setDitherLockInState(2, True)
self.logger.info('Red_Pitaya_GUI{}: Unlock'.format(self.logger_name))
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
self.bFirstTimeLockCheckBoxClicked = False
def initUI(self):
# second_half_offset = 50
# Change the background color of the main form so that each controls group stand out better
PalNormal = Qt.QPalette()
# Assign the palette to the main form to read off the 'normal' background color:
self.setPalette(PalNormal)
normalBackgroundRGB = PalNormal.color(Qt.QPalette.Background).getRgb()
# print(normalBackground.getRgb())
# Darken the background of the dialog slightly
darker_factor = 0.5
PalDarkerBackground = Qt.QPalette()
PalDarkerBackground.setColor(Qt.QPalette.Background, Qt.QColor(normalBackgroundRGB[0]*darker_factor, normalBackgroundRGB[1]*darker_factor, normalBackgroundRGB[2]*darker_factor))
# PalDarkerBackground.setColor(Qt.QPalette.Background, Qt.QColor(255, 255, 255))
self.setPalette(PalDarkerBackground)
self.setAutoFillBackground(True)
# PalNormal's color has been changed when we assigned PalDarkerBackground to self - this statement seems very circular but somehow it works
PalNormal.setColor(Qt.QPalette.Background, PalNormal.color(Qt.QPalette.Background))
######################################################################
# Settings
######################################################################
self.qgroupbox_settings = Qt.QGroupBox('Settings', self)
# Button which exports the data to the disk
self.qbtn = QtGui.QPushButton('Export PSD data')
self.qbtn.clicked.connect(self.exportData)
# Button which grabs a single acquisition from the DDR memory and exports the data to the disk
self.qbtn_grab = QtGui.QPushButton('Export ADC data')
self.qbtn_grab.clicked.connect(self.grabAndExportData)
# Button which opens the VNA window:
self.qbtn_VNA = QtGui.QPushButton('Transfer function')
self.qbtn_VNA.clicked.connect(self.showVNA)
# VCO modulation gain:
self.qedit_vco_gain = {}
self.qlabel_detected_vco_gain = {}
if self.selected_ADC == 0:
# CEO Lock: only one output (DAC0)
self.qlabel_vco_gain = Qt.QLabel('VCO Gain (DAC0) [Hz/V]:')
self.qlabel_detected_vco_gain_label = Qt.QLabel('Detected VCO Gain [Hz/V]:')
self.qedit_vco_gain[0] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[0].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[0].setMaximumWidth(60)
self.qlabel_detected_vco_gain[0] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[0].setAlignment(Qt.Qt.AlignHCenter)
else:
# Optical lock
self.qlabel_vco_gain = Qt.QLabel('VCO Gains (DAC1, DAC2HV) [Hz/V]:')
# self.qlabel_vco_gain = Qt.QLabel('VCO Gain (DAC1) [Hz/V]:')
self.qlabel_detected_vco_gain_label = Qt.QLabel('Detected VCO Gain [Hz/V]:')
self.qedit_vco_gain[1] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[1].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[1].setMaximumWidth(60)
self.qedit_vco_gain[2] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[2].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[2].setMaximumWidth(60)
self.qlabel_detected_vco_gain[1] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[1].setAlignment(Qt.Qt.AlignHCenter)
self.qlabel_detected_vco_gain[2] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[2].setAlignment(Qt.Qt.AlignHCenter)
# DDC reference frequency:
self.qlabel_ref_freq = Qt.QLabel('Reference freq [Hz]:')
self.qedit_ref_freq = user_friendly_QLineEdit('5e6')
self.qedit_ref_freq.returnPressed.connect(self.setVCOFreq_event)
self.qedit_ref_freq.setMaximumWidth(60)
# Main button for turning the locks on/off:
self.qchk_lock = Qt.QCheckBox('Lock')
self.qchk_lock.setStyleSheet('')
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
# self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
self.qchk_lock.clicked.connect(self.chkLockClickedEvent)
self.qchk_lock.setChecked(False)
# VCO sign:
self.qsign_positive = Qt.QRadioButton('VCO sign +')
self.qsign_negative = Qt.QRadioButton('VCO sign -')
self.qsign_group = Qt.QButtonGroup(self)
self.qsign_group.addButton(self.qsign_positive)
self.qsign_group.addButton(self.qsign_negative)
self.qsign_positive.setChecked(True)
self.qsign_negative.setChecked(False)
self.qsign_positive.clicked.connect(self.setVCOFreq_event)
self.qsign_negative.clicked.connect(self.setVCOFreq_event)
# Create widgets to indicate performance
self.last_refresh = time.perf_counter()
self.qlabel_refreshrate_display = Qt.QLabel('Actual delay:')
self.qlabel_refreshrate = Qt.QLabel('1000 ms')
# self.qlabel_refreshrate.resize(self.qlabel_refreshrate.sizeHint())
self.qlabel_timerdelay = Qt.QLabel('Refresh delay [ms]:')
self.qedit_timerdelay = user_friendly_QLineEdit('33')
self.qedit_timerdelay.returnPressed.connect(self.refreshChk_event)
self.qedit_timerdelay.setMaximumWidth(60)
self.qchk_refresh = Qt.QCheckBox('Auto-refresh')
self.qchk_refresh.clicked.connect(self.refreshChk_event)
# Status reporting:
if self.selected_ADC == 0:
self.qlbl_status1 = Qt.QLabel('Status: Idle')
elif self.selected_ADC == 1:
self.qlbl_status1 = Qt.QLabel('Status: Idle')
self.qlbl_status2 = Qt.QLabel('Status: Idle')
# Put all the widgets into a grid layout
grid = QtGui.QGridLayout()
grid.setHorizontalSpacing(10)
grid.setVerticalSpacing(1)
# 3 rows, XX columns
grid.addWidget(self.qbtn, 0, 0)
grid.addWidget(self.qbtn_VNA, 1, 0)
grid.addWidget(self.qbtn_grab, 2, 0)
grid.addWidget(self.qchk_refresh, 0, 1)
grid.addWidget(self.qlabel_timerdelay, 1, 1)
grid.addWidget(self.qedit_timerdelay, 1, 2)
grid.addWidget(self.qlabel_refreshrate_display, 2, 1)
grid.addWidget(self.qlabel_refreshrate, 2, 2)
# grid.addWidget(self.qlabel_bytes_skip, 0, 3)
# grid.addWidget(self.qedit_bytes_skip, 0, 4)
grid.addWidget(self.qchk_lock, 0, 3, 1, 2)
grid.addWidget(self.qlabel_ref_freq, 1, 3)
grid.addWidget(self.qedit_ref_freq, 1, 4)
# # both PLLs need to receive a threshold for the residuals.
# # See tooltip for info
# grid.addWidget(self.qlabel_crash_threshold, 2, 3)
# grid.addWidget(self.qedit_crash_threshold, 2, 4)
# only the first PLL has a crash monitor module in the current firmware:
if self.selected_ADC == 0:
pass
#FEATURE
#grid.addWidget(self.qlabel_crash_threshold_freq, 2, 5)
#grid.addWidget(self.qedit_crash_threshold_freq, 2, 6)
#grid.addWidget(self.qchk_crash_monitor, 2, 7)
# We put a sub-grid in the grid
# we put the VCO controls in the sub-grid, this way the outer grid stays the same size regardless of the number of elements
grid2 = Qt.QGridLayout()
grid2.setHorizontalSpacing(10)
grid2.setVerticalSpacing(10)
if self.selected_ADC == 0:
# CEO Lock: only one output (DAC0)
grid2.addWidget(self.qlabel_vco_gain, 0, 0)
grid2.addWidget(self.qlabel_detected_vco_gain_label, 1, 0)
grid2.addWidget(self.qedit_vco_gain[0], 0, 1)
grid2.addWidget(self.qlabel_detected_vco_gain[0], 1, 1)
else:
# Optical lock: two outputs (DAC1 and DAC2)
grid2.addWidget(self.qlabel_vco_gain, 0, 0)
grid2.addWidget(self.qlabel_detected_vco_gain_label, 1, 0)
grid2.addWidget(self.qedit_vco_gain[1], 0, 1)
grid2.addWidget(self.qlabel_detected_vco_gain[1], 1, 1)
grid2.addWidget(self.qedit_vco_gain[2], 0, 2)
# grid2.addWidget(self.qlabel_detected_vco_gain[2], 1, 2)
grid.addLayout(grid2, 0, 5, 2, 2)
grid.addWidget(self.qsign_positive, 0, 7)
grid.addWidget(self.qsign_negative, 1, 7)
grid.addWidget(Qt.QLabel(), 0, 9, 1, 1)
grid.setColumnStretch(9, 1)
self.qgroupbox_settings.setLayout(grid)
self.qgroupbox_settings.setPalette(PalNormal)
self.qgroupbox_settings.setAutoFillBackground(True)
######################################################################
# Spectrum analyzer/Diagnostics
######################################################################
self.spectrum = SpectrumWidget.SpectrumWidget(self, self.selected_ADC, self.output_controls, self.sl, PalNormal)
######################################################################
# Create the controls for the loop filters
######################################################################
self.qgroupbox_loop_filters = Qt.QGroupBox('Loop filters', self)
hbox = Qt.QHBoxLayout()
self.qloop_filters = {}
for k in range(3):
if self.output_controls[k] == True:
if k == 0:
# print('XEM_GUI_MainWindow(): About to call LoopFiltersUI()')
self.qloop_filters[k] = LoopFiltersUI(self.sl, k, bDisplayLockChkBox=False)
hbox.addWidget(self.qloop_filters[k])
#self.qloop_filters[k].show()
elif k == 1:
self.qloop_filters[k] = LoopFiltersUI_DAC1_and_DAC2(self.sl, k, self.sl.pll[k])
hbox.addWidget(self.qloop_filters[k])
self.qloop_filters[k].show()
self.qgroupbox_loop_filters.setLayout(hbox)
# self.qgroupbox_loop_filters.setLayout(grid)
self.qgroupbox_loop_filters.setPalette(PalNormal)
self.qgroupbox_loop_filters.setAutoFillBackground(True)
######################################################################
# Phase noise analysis
######################################################################
self.qgroupbox_phasenoise = Qt.QGroupBox('Phase noise (all computed from DDC output)', self)
# Selector for the plot type (phase or freq noise)
# self.qlabel_ddc_plot_select = Qt.QLabel('Plot type:')
self.qcombo_ddc_plot = Qt.QComboBox()
self.qcombo_ddc_plot.addItem('Freq')
self.qcombo_ddc_plot.addItem('Phase')
self.qcombo_ddc_plot.addItem('Freq: time domain')
self.qcombo_ddc_plot.addItem('Phase: time domain')
self.qcombo_ddc_plot.setCurrentIndex(1)
# Create widgets to set the number of points for the DDC graphs:
self.qlabel_ddc_rbw = Qt.QLabel('RBW: 100 kHz; Points:')
self.qedit_ddc_length = Qt.QLineEdit('32.768e3') # this used to be 3e5 in the Dave Leibrant box version, but was changed to 16e3 due to RedPitaya memory limitations
self.qedit_ddc_length.setMaximumWidth(60)
# Create widgets to set the higher frequency of the integration:
self.qlabel_cumul_integral = Qt.QLabel('Integration\nlimit [Hz]:')
self.qedit_cumul_integral = Qt.QLineEdit('5e6')
self.qedit_cumul_integral.setMaximumWidth(60)
# Display mean frequency error:
self.qlbl_mean_freq_error = Qt.QLabel('Mean freq error = 0 MHz')
# Checkbox to enable faster updates of the phase noise plot:
self.qchk_phase_noise_fast_updates = Qt.QCheckBox('Faster updates')
self.qchk_phase_noise_fast_updates.setChecked(False)
# X and Y limits for the plot:
self.qlbl_xlims = Qt.QLabel('Xmin, Xmax')
self.qedit_xlims = Qt.QLineEdit('3e3, 5e6')
self.qedit_xlims.setMaximumWidth(60)
self.qlbl_ylims = Qt.QLabel('Ymin, Ymax')
self.qedit_ylims = Qt.QLineEdit('-100, -30')
self.qedit_ylims.setMaximumWidth(60)
# Averaging controls: # Averages (1=off)
self.qlbl_spc_averaging = Qt.QLabel('# Averages\n(1=off)')
self.qedit_spc_averaging = Qt.QLineEdit('1')
self.qedit_spc_averaging.setMaximumWidth(60)
# Create the frequency domain plot for the DDC0
self.qplt_DDC0_spc = pg.PlotWidget()
self.qplt_DDC0_spc.setTitle('Freq noise PSD')
#self.qplt_DDC0_spc.setCanvasBackground(Qt.Qt.white)
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLog10ScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=True)
self.qplt_DDC0_spc.setYRange(-60, 60)
#self.qplt_DDC0_spc.enableAxis(Qwt.QwtPlot.yRight)
self.qplt_DDC0_spc.setLabel('bottom', 'Frequency [Hz]')
self.qplt_DDC0_spc.setLabel('left', 'PSD [dB Hz^2/Hz]')
#self.qplt_DDC0_spc.setLabel('right', 'Phase [rad]')
# create the right-side axis:
p1 = self.qplt_DDC0_spc.getPlotItem()
self.qplt_DDC0_spc_right_viewbox = pg.ViewBox()
#self.qplt_DDC0_spc_right_viewbox.setLogMode(x=True)
p1.showAxis('right')
p1.scene().addItem(self.qplt_DDC0_spc_right_viewbox)
p1.getAxis('right').linkToView(self.qplt_DDC0_spc_right_viewbox)
self.qplt_DDC0_spc_right_viewbox.setXLink(p1)
p1.getAxis('right').setLabel('Phase [rad]')
self.updatePhaseNoiseViews()
p1.vb.sigResized.connect(self.updatePhaseNoiseViews)
self.qplt_DDC0_spc.showGrid(x=True, y=True)
#plot_grid = Qwt.QwtPlotGrid()
#plot_grid.setMajPen(Qt.QPen(Qt.Qt.black, 0, Qt.Qt.DotLine))
#plot_grid.attach(self.qplt_DDC0_spc)
# Create the curve in the plot
self.curve_DDC0_spc = self.qplt_DDC0_spc.getPlotItem().plot(title='Phase noise PSD', pen='b')
#self.curve_DDC0_spc.attach(self.qplt_DDC0_spc)
#self.curve_DDC0_spc.setPen(Qt.QPen(Qt.Qt.blue))
#self.curve_DDC0_cumul_phase = pg.PlotCurveItem(pen='g')
self.curve_DDC0_cumul_phase = pg.PlotDataItem(pen='k')
self.curve_DDC0_cumul_phase.setLogMode(True, False)
self.qplt_DDC0_spc_right_viewbox.addItem(self.curve_DDC0_cumul_phase)
#self.curve_DDC0_cumul_phase = self.qplt_DDC0_spc_right_viewbox.getPlotItem().plot(pen='k')
#self.curve_DDC0_cumul_phase.attach(self.qplt_DDC0_spc)
#self.curve_DDC0_cumul_phase.setPen(Qt.QPen(Qt.Qt.black))
# self.curve_DDC0_cumul_phase.setYAxis(Qwt.QwtPlot.yRight)
self.curve_DDC0_spc_avg = self.qplt_DDC0_spc.getPlotItem().plot(pen='g')
#self.curve_DDC0_spc_avg.attach(self.qplt_DDC0_spc)
#self.curve_DDC0_spc_avg.setPen(Qt.QPen(Qt.Qt.darkGreen))
# Put all the widgets into a grid layout
grid = Qt.QGridLayout()
# grid.addWidget(self.qlabel_ddc_plot_select, 0, 0)
grid.addWidget(self.qcombo_ddc_plot, 0, 0, 1, 2)
grid.addWidget(self.qlabel_ddc_rbw, 1, 0)
grid.addWidget(self.qedit_ddc_length, 1, 1)
grid.addWidget(self.qlabel_cumul_integral, 2, 0)
grid.addWidget(self.qedit_cumul_integral, 2, 1)
grid.addWidget(self.qlbl_xlims, 3, 0, 1, 1)
grid.addWidget(self.qedit_xlims, 3, 1, 1, 1)
grid.addWidget(self.qlbl_ylims, 4, 0, 1, 1)
grid.addWidget(self.qedit_ylims, 4, 1, 1, 1)
grid.addWidget(self.qlbl_spc_averaging, 5, 0, 1, 1)
grid.addWidget(self.qedit_spc_averaging, 5, 1, 1, 1)
grid.addWidget(self.qchk_phase_noise_fast_updates, 6, 0, 1, 2)
grid.addWidget(self.qlbl_mean_freq_error, 7, 0, 1, 2)
grid.addWidget(Qt.QLabel(''), 8, 0)
grid.addWidget(self.qplt_DDC0_spc, 0, 2, 9, 1)
grid.setRowStretch(7, 1)
grid.setColumnStretch(2, 1)
self.qgroupbox_phasenoise.setLayout(grid)
self.qgroupbox_phasenoise.setPalette(PalNormal)
self.qgroupbox_phasenoise.setAutoFillBackground(True)
######################################################################
# Layout for the whole form:
######################################################################
grid = Qt.QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qgroupbox_settings, 0, 0, 1, 0)
grid.addWidget(self.spectrum, 1, 0, 1, 0)
grid.addWidget(self.qgroupbox_phasenoise, 2, 0, 1, 1)
grid.addWidget(self.qgroupbox_loop_filters, 3, 0, 1, 1)
# grid.setRowStretch(2, 1)
self.setLayout(grid)
# Adjust the size and position of the window
# self.resize(940, 1080-100+30)
# self.center()
#self.setGeometry(18, 40, 950, 1010)
#self.setGeometry(0, 0, 750, 1000)
self.setWindowTitle(self.strTitle)
#self.show()
# def resizeEvent(self, event):
# print('resizeEvent')
# print(self.geometry())
## Handle view resizing for the phase noise plot (since we need to manual link the left and right side axes)
def updatePhaseNoiseViews(self):
## view has resized; update auxiliary views to match
p1 = self.qplt_DDC0_spc.getPlotItem()
self.qplt_DDC0_spc_right_viewbox.setGeometry(p1.vb.sceneBoundingRect())
## need to re-update linked axes since this was called
## incorrectly while views had different shapes.
## (probably this should be handled in ViewBox.resizeEvent)
self.qplt_DDC0_spc_right_viewbox.linkedViewChanged(p1.vb, self.qplt_DDC0_spc_right_viewbox.XAxis)
def loadParameters(self):
# Update the values in the UI to reflect the internal values:
# Get values from xml file
for k in range(3):
if self.output_controls[k] == True:
# print('XEM_GUI_MainWindow(): About to call loadParameters()')
if k < 2: # For qloop_filter 0 and 1
# print('before calling self.qloop_filters[k].loadParameters(self.sp)')
self.qloop_filters[k].loadParameters(self.sp) # Get values from xml file for loop_filters
# print('after calling self.qloop_filters[k].loadParameters(self.sp)')
# self.qchk_lock.setChecked(self.qloop_filters[k].qchk_lock.isChecked()) # update the qchk_lock in this widget with the value loaded from sp
# print('after calling setChecked')
# Get dac gain from the system parameters object and set it in the UI:
# print('before calling self.sp.getValue(''VCO_gain'', strDAC)')
strDAC = 'DAC{:01d}'.format(k)
str_VCO_gain = (self.sp.getValue('VCO_gain', strDAC))
# print('before calling self.qedit_vco_gain[k].setText(str_VCO_gain)')
self.qedit_vco_gain[k].blockSignals(True)
self.qedit_vco_gain[k].setText(str_VCO_gain)
self.qedit_vco_gain[k].blockSignals(False)
# print('after calling self.qedit_vco_gain[k].setText(str_VCO_gain)')
# Output offsets values:
output_offset_in_volts = float(self.sp.getValue('Output_offset_in_volts', strDAC))
# Scale this to the correct units for the output offset slider:
min_output_in_volts = float(self.sp.getValue('Output_limits_low', strDAC))
max_output_in_volts = float(self.sp.getValue('Output_limits_high', strDAC))
slider_units = (output_offset_in_volts - min_output_in_volts)/(max_output_in_volts-min_output_in_volts) * 1e6
# print('calling dac offset slider setValue()')
# self.q_dac_offset[k].blockSignals(True)
# self.q_dac_offset[k].setValue(slider_units)
# self.q_dac_offset[k].blockSignals(False)
self.spectrum.setDacOffset(k, slider_units)
# print('done calling dac offset slider setValue()')
# Get ddc reference frequency from the system parameters object and set it in the UI:
strDDC = 'DDC{:01d}'.format(self.selected_ADC)
str_ref_freq = (self.sp.getValue('Reference_frequency', strDDC))
self.qedit_ref_freq.setText(str_ref_freq)
self.qedit_ref_freq.reset_my_color()
# print('done loadParameters()')
return
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
# self.move(qr.topLeft())
self.move(QtGui.QDesktopWidget().availableGeometry().topLeft() + Qt.QPoint(10, 10))
def initSL(self):
# Old function to start the GUI communication
# self.sl = SuperLaserLand_JD2()
# self.sl.open()
print("initSL()")
self.loadParameters()
bUpdateFPGA = True
# Send values to FPGA
if bUpdateFPGA == True:
self.setVCOFreq_event()
self.setVCOGain_event()
self.chkLockClickedEvent()
if self.output_controls[0] == True:
self.setPWM0_event()
# self.setFLL0_event()
# self.setPLL0_event()
# self.timerID = self.startTimer(500 * 1/1)
self.timerID = 0
self.qchk_refresh.setChecked(False) # Set this to True to have the auto-refresh active when running the GUI, or to False to wait for the user to check the box
self.refreshChk_event()
# Start the timer which reads the dither:
# self.timerIDDither = Qt
self.timerIDDither = Qt.QTimer(self)
self.timerIDDither.timeout.connect(self.timerDitherEvent)
self.timerIDDither.start(100) # 100 ms readout delay, increased to 1000 ms for debugging
# print "Warning! Increased self.timerIDDither.start(100) to 3000 for debugging."
# self.timerDitherEvent()
# self.grabAndDisplayADC()
# self.displayDDC()
self.displayDAC() # This populates the current DAC values with the actual value
# self.timerEvent(0) # run the event handler once right away, to populate the rest of the window
print("initSL() done")
def closeEvent(self, event):
# from http://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
# quit_msg = "Are you sure you want to exit the program?"
# reply = QtGui.QMessageBox.question(self, 'Message',
# quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
#
# print(event)
#
# if reply == QtGui.QMessageBox.Yes:
# event.accept()
# app = Qt.QApplication.instance()
# app.closeAllWindows()
## app.
# else:
# event.ignore()
return
@logCommsErrorsAndBreakoutOfFunction()
def timerDitherEvent(self):
# print('timerDitherEvent')
# Check if the sl object exists: otherwise this timer will keep throwing exceptions, filling up the console messages
# and preventing us form seeing the real cause
if not hasattr(self, 'sl'):
print('SL object does not exist anymore. disabling timer in timerDitherEvent')
self.timerIDDither.stop()
return
start_time = time.perf_counter()
for k in range(2): #There is no dither for the 2nd DAC
if self.output_controls[k]:
if self.sl.dither_enable[k] == False:
if k != 2: # DAC2 currently does not have dither
self.qlabel_detected_vco_gain[k].setText('off')
self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: black")
else:
samples = self.sl.ditherRead(2, k)
# There is an implicit (-) sign because the DDC has to shift the frequency to 0.
# This means that the detected gain will be positive when the VCO sign checkbox is correctly tuned
samples = -samples
samples = np.mean(samples)
# np.mean() returns a numpy.float, but the conversions functions expect an ndarray
samples = np.ndarray((1,), dtype=np.float, buffer=samples)
# TODO: fancier things with the real and imaginary part, to try to detect invalid readings? Is this necessary?
# TODO: Compare many different readings to try to sort out any incorrect ones?
VCO_detected_gain_in_Hz_per_Volts = self.sl.scaleDitherResultsToHzPerVolts(samples, k)
self.VCO_detected_gain_in_Hz_per_Volts[k] = VCO_detected_gain_in_Hz_per_Volts
self.qlabel_detected_vco_gain[k].setText('%.1e' % VCO_detected_gain_in_Hz_per_Volts)
elapsed_time = time.perf_counter() - start_time
# print('Elapsed time (timerDitherEvent) = %f ms' % (1000*elapsed_time))
# If the detected gain is negative, the loop will be unstable when closed, so we switch to a red background so that the user can flip the sign
if VCO_detected_gain_in_Hz_per_Volts > 0:
# self.qedit_fi.setStyleSheet("background-color: %s" % Qt.QColor(Qt.Qt.white).name())
self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: green")
else:
# red background
# self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: %s" % Qt.QColor(Qt.Qt.red).name())
self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: red")
@logCommsErrorsAndBreakoutOfFunction()
def timerEvent(self, e):
# print 'timerEvent : %.3f sec' % (time.perf_counter())
# Check if the sl object exists: otherwise this timer will keep throwing exceptions, filling up the console messages
# and preventing us form seeing the real cause. We let only one exception go through and then disable
try:
# Read out residuals and dump to disk:
if self.selected_ADC == 0:
pass
# Handle the LEDs display
ret = self.sl.readLEDs()
if ret is not None:
(LED_G0, LED_R0, LED_G1, LED_R1, LED_G2, LED_R2) = ret
# print ('%d, %d, %d, %d, %d, %d' % (LED_G0, LED_R0, LED_G1, LED_R1, LED_G2, LED_R2))
# if self.selected_ADC == 0:
# # if self.qchk_crash_monitor.isChecked():
# # self.checkCrash()
# # LEDs for CEO lock:
# if LED_G0 and (not LED_R0):
# # status is locked
# self.qlbl_status1.setText('Status: Locked')
# self.qlbl_status1.setStyleSheet('color: white; background-color: green')
# elif LED_G0 and LED_R0:
# # Status is railed
# self.qlbl_status1.setText('Status: Railed')
# self.qlbl_status1.setStyleSheet('color: white; background-color: orange')
# elif (not LED_G0) and LED_R0:
# # Residuals above threshold
# self.qlbl_status1.setText('Status: Residuals > threshold')
# self.qlbl_status1.setStyleSheet('color: white; background-color: red')
# elif (not LED_G0) and (not LED_R0):
# # Idle
# self.qlbl_status1.setText('Status: Idle')
# self.qlbl_status1.setStyleSheet('')
# elif self.selected_ADC == 1:
# # LEDs for Optical lock
# if LED_G1 and (not LED_R1):
# # status is locked
# self.qlbl_status1.setText('Status: Locked')
# self.qlbl_status1.setStyleSheet('color: white; background-color: green')
# elif LED_G1 and LED_R1:
# # Status is railed
# self.qlbl_status1.setText('Status: Railed')
# self.qlbl_status1.setStyleSheet('color: white; background-color: orange')
# elif (not LED_G1) and LED_R1:
# # Residuals above threshold
# self.qlbl_status1.setText('Status: Residuals above threshold')
# self.qlbl_status1.setStyleSheet('color: white; background-color: red')
# elif (not LED_G1) and (not LED_R1):
# # Idle
# self.qlbl_status1.setText('Status: Idle')
# self.qlbl_status1.setStyleSheet('')
# # LEDs for slow PZT output:
# if LED_G2 and (not LED_R2):
# # status is locked
# self.qlbl_status2.setText('Status: Locked')
# self.qlbl_status2.setStyleSheet('color: white; background-color: green')
# elif LED_G2 and LED_R2:
# # Status is railed
# self.qlbl_status2.setText('Status: Railed')
# self.qlbl_status2.setStyleSheet('color: white; background-color: orange')
# elif (not LED_G2) and LED_R2:
# # Residuals above threshold
# self.qlbl_status2.setText('Status: Residuals above threshold')
# self.qlbl_status2.setStyleSheet('color: white; background-color: red')
# elif (not LED_G2) and (not LED_R2):
# # Idle
# self.qlbl_status2.setText('Status: Idle')
# self.qlbl_status2.setStyleSheet('')
if self.qchk_refresh.isChecked():
self.grabAndDisplayADC()
self.displayDAC()
if self.display_phase == 0 or self.qchk_phase_noise_fast_updates.isChecked():
self.displayDDC()
self.display_phase = self.display_phase + 1
if self.display_phase > 5:
self.display_phase = 0
except:
print('SL object does not exist anymore. disabling timer in timerEvent')
self.killTimer(self.timerID)
self.timerID = 0
self.qchk_refresh.setChecked(False)
raise
self.qlabel_refreshrate.setText('%.0f ms' % (1000*(time.perf_counter() - self.last_refresh)))
self.last_refresh = time.perf_counter()
# timerEvent()
def displayDAC(self):
# Check if another function is currently using the DDR2 logger:
if self.sl.bDDR2InUse:
print('displayDAC(): DDR2 logger in use, cannot get data from dac')
return
# For now: we grab the smallest chunk of points from the output (so as to not use too much time to refresh)
# and display the current average:
for k in range(3):
if self.output_controls[k]:
# Read from DAC #k
start_time = time.perf_counter()
(samples_out, ref_exp0) = self.getADCdata(input_select="DAC%d" % k, N_samples=256)
if samples_out is None:
return
elapsed_time = time.perf_counter() - start_time
if self.bDisplayTiming == True:
print('Elapsed time (read dac values) = %f ms' % (1000*elapsed_time))
samples_out = samples_out.astype(dtype=np.float)
VCO_gain_in_Hz_per_Volts = self.getVCOGainFromUI(k)
# Update the display:
# For the USB bug, compute the mean from the last points
current_output_in_volts = self.sl.convertDACCountsToVolts(k, np.mean(samples_out[128:256]))
current_output_in_hz = current_output_in_volts * VCO_gain_in_Hz_per_Volts
self.spectrum.qthermo_dac_current[k].setValue(current_output_in_volts)
self.spectrum.qlabel_dac_current_value[k].setText('{:.4f} V\n{:.0f} MHz'.format(current_output_in_volts, current_output_in_hz/1e6))
elapsed_time = time.perf_counter() - start_time
if self.bDisplayTiming == True:
print('Elapsed time (displayDAC total) = %f ms' % (1000*elapsed_time))
def displayDDC(self):
# self.bDisplayTiming = True
# Read from DDC0
try:
try:
N_points = int(float(self.qedit_ddc_length.text()))
except:
N_points = 100e3
if N_points < 64:
N_points = 64
start_time = time.perf_counter()
# if self.selected_ADC == 0:
# self.sl.setup_DDC0_write(N_points)
# elif self.selected_ADC == 1:
# self.sl.setup_DDC1_write(N_points)
# self.sl.trigger_write()
# self.sl.wait_for_write()
# if self.bDisplayTiming == True:
# print('Elapsed time (setup write) = %f' % (time.perf_counter()-start_time))
# start_time = time.perf_counter()
# inst_freq = self.sl.read_ddc_samples_from_DDR2()
inst_freq = self.getADCdata(input_select='DDC%d' % self.selected_ADC, N_samples=N_points, bReadAsDDC=True)
if inst_freq is None:
return
self.inst_freq = inst_freq
if self.bDisplayTiming == True:
print('Elapsed time (communication) = %f' % (time.perf_counter()-start_time))
# print('mean freq error = %f MHz, raw code = %f' % (np.mean(inst_freq)/1e6, np.mean(inst_freq)*2**10 / self.sl.fs*4))
self.qlbl_mean_freq_error.setText('Freq error: %.2f MHz' % (np.mean(inst_freq)/1e6))
# Compute the spectrum:
# We first perform decimation on the data since we don't have useful information above the cut-off frequency anyway:
start_time = time.perf_counter()
N_decimation = 10
fs_new = self.sl.fs/N_decimation
#inst_freq_decimated = decimate(inst_freq, N_decimation, zero_phase=False)
inst_freq_decimated = decimate(detrend(inst_freq), N_decimation, zero_phase=False)
# inst_freq_decimated = inst_freq
# fs_new = self.sl.fs
# For debugging: we want to check
# inst_freq_decimated = np.random.randn(100e3)
# print('Data std dev = %f Hz' % np.std(inst_freq_decimated))
# print('Data variance = %f Hz^2' % np.var(inst_freq_decimated))
if self.bDisplayTiming == True:
print('Elapsed time (decimation) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
# Compute the spectrum of the decimated signal:
start_time = time.perf_counter()
N_fft = 2**(int(np.ceil(np.log2(len(inst_freq_decimated)))))
frequency_axis = np.linspace(0, (N_fft-1)/float(N_fft)*fs_new, N_fft)
last_index_shown = int(np.round(len(frequency_axis)/2))
window_function = np.blackman(len(inst_freq_decimated))
window_NEB = np.sum((window_function/np.sum(window_function))**2) * fs_new;
# print('window_NEB = %f Hz' % window_NEB)
spc = np.fft.fft(inst_freq_decimated * window_function, N_fft)
spc = np.real(spc*np.conj(spc))/(sum(window_function)**2) # Spectrum is now scaled in power (Hz^2 per bin)
# Scale the spectrum to be a single-sided power spectral density in Hz^2/Hz:
spc[1:last_index_shown] = 2*spc[1:last_index_shown] / window_NEB
# # Compute the running average:
# Compute spectrum averaging with exponential smoothing (simple first-order IIR filter)
try:
n_spc_avg = int(round(float(self.qedit_spc_averaging.text())))
if n_spc_avg > 1.:
self.bAveragePhaseNoise = True
self.N_spc_average = n_spc_avg
else:
self.bAveragePhaseNoise = False
self.N_spc_average = 1.
except:
n_spc_avg = 1.
self.bAveragePhaseNoise = False
if self.bAveragePhaseNoise:
# print('self.N_spc_average = %d' % self.N_spc_average)
filter_alpha = np.exp(-1./self.N_spc_average)
try:
# if this is not the first time we are called with averaging enabled, we run the filter:
if self.bAveragePhaseNoiseLast:
self.spc_running_sum = filter_alpha * self.spc_running_sum + + (1-filter_alpha)*spc
else:
# this is the first time that we are called with averaging enabled, so we reset the current state
self.spc_running_sum = spc
except:
# This is the first time that we are called:
self.spc_running_sum = spc
self.bAveragePhaseNoiseLast = self.bAveragePhaseNoise
# print('Freq noise PSD: %e Hz^2/Hz' % (np.mean(spc[1:last_index_shown])))
self.freq_noise_psd = spc[1:last_index_shown]
self.freq_noise_axis = frequency_axis[1:last_index_shown]
# spc = np.abs(spc)
if self.bDisplayTiming == True:
print('Elapsed time (FFT) = %f' % (time.perf_counter()-start_time))
try:
f_limits = self.qedit_xlims.text()
f_limits = f_limits.split(',')
f_limits = (float(f_limits[0]), float(f_limits[1]))
except:
f_limits = (frequency_axis[1], frequency_axis[last_index_shown])
try:
y_limits = self.qedit_ylims.text()
y_limits = y_limits.split(',')
y_limits = (float(y_limits[0]), float(y_limits[1]))
except:
y_limits = (-140, 60)
# Update the graph
if self.qcombo_ddc_plot.currentIndex() == 0:
# Display the frequency noise
spc = 10*np.log10(spc + 1e-20)
self.curve_DDC0_spc.setData(frequency_axis[1:last_index_shown], spc[1:last_index_shown])
if self.bAveragePhaseNoise:
self.curve_DDC0_spc_avg.setData(frequency_axis[1:last_index_shown], 10*np.log10(self.spc_running_sum[1:last_index_shown] + 1e-20))
self.curve_DDC0_spc_avg.setVisible(True)
else:
self.curve_DDC0_spc_avg.setVisible(False)
self.qplt_DDC0_spc.setTitle('Freq noise PSD')
self.qplt_DDC0_spc.setLabel('left', 'PSD [dB Hz^2/Hz]')
self.qplt_DDC0_spc.setYRange(y_limits[0], y_limits[1])
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.xBottom, frequency_axis[1], frequency_axis[last_index_shown])
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=True)
self.qplt_DDC0_spc.setXRange(np.log10(f_limits[0]), np.log10(f_limits[1]))
# self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLog10ScaleEngine())
self.qplt_DDC0_spc.setLabel('bottom', 'Frequency [Hz]')
self.curve_DDC0_cumul_phase.setVisible(False)
elif self.qcombo_ddc_plot.currentIndex() == 1:
# Compute the phase noise time-domain standard deviation:
phasenoise_stddev = np.std(np.cumsum(inst_freq*2*np.pi/self.sl.fs))
# Display the phase noise (equal to 1/f^2 times the frequency noise PSD)
self.curve_DDC0_spc.setData(frequency_axis[1:last_index_shown], 10*np.log10(spc[1:last_index_shown] + 1e-20) - 20*np.log10(frequency_axis[1:last_index_shown]))
if self.bAveragePhaseNoise:
self.curve_DDC0_spc_avg.setData(frequency_axis[1:last_index_shown], 10*np.log10(self.spc_running_sum[1:last_index_shown] + 1e-20) - 20*np.log10(frequency_axis[1:last_index_shown]))
self.curve_DDC0_spc_avg.setVisible(True)
else:
self.curve_DDC0_spc_avg.setVisible(False)
self.qplt_DDC0_spc.setXRange(f_limits[0], f_limits[1])
self.qplt_DDC0_spc.setTitle('Phase noise PSD, std dev = %.2f radrms' % phasenoise_stddev)
self.qplt_DDC0_spc.setLabel('left', 'PSD [dBc/Hz]')
self.qplt_DDC0_spc.setYRange(y_limits[0], y_limits[1])
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLog10ScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=True)
self.qplt_DDC0_spc.setXRange(np.log10(f_limits[0]), np.log10(f_limits[1]*5./6.0)) # the scaling is because the widget doesn't seem to use the exact values that we pass...
self.qplt_DDC0_spc.setLabel('bottom', 'Frequency [Hz]')
# Display the cumulative integral of the phase noise:
# Select desired frequency range:
try:
integration_higher_bound = float(self.qedit_cumul_integral.text())
except:
integration_higher_bound = 1e6
if integration_higher_bound > fs_new/2:
integration_higher_bound = fs_new/2
if integration_higher_bound <= 2/len(spc)*fs_new:
integration_higher_bound = 2/len(spc)*fs_new
integration_higher_index = int(round(integration_higher_bound/fs_new*len(spc)))
# print('integration up to %d out of %d' % (integration_higher_index, len(spc)))
frequency_axis_integral = frequency_axis[1:integration_higher_index]
# Integrate the phase noise PSD, from the highest frequency to the lowest
phase_psd = spc[1:integration_higher_index] / frequency_axis_integral**2
cumul_int = np.flipud(np.cumsum(np.flipud(phase_psd))) * np.mean(np.diff(frequency_axis_integral))
# print((cumul_int).shape)
# cumul_int = 0*cumul_int + 10
# print((cumul_int).shape)
# Show results
self.curve_DDC0_cumul_phase.setData(frequency_axis_integral, np.sqrt(cumul_int))
self.curve_DDC0_cumul_phase.setVisible(True)
#self.qplt_DDC0_spc_right_viewbox.setYRange(0, 2*2*np.pi)
#self.qplt_DDC0_spc_right_viewbox.setXRange(0, 2*2*np.pi)
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.xBottom, frequency_axis[1], frequency_axis[last_index_shown])
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yRight, 0, 2*2*np.pi)
elif self.qcombo_ddc_plot.currentIndex() == 2:
# Display the raw, time-domain instantaneous frequency output by the DDC block, mostly for debugging:
time_axis = np.arange(0, len(inst_freq))/self.sl.fs
self.curve_DDC0_spc.setData(time_axis, inst_freq)
self.curve_DDC0_spc_avg.setVisible(False)
self.curve_DDC0_cumul_phase.setVisible(False)
self.qplt_DDC0_spc.setTitle('Instantaneous frequency error, std dev = %.1f kHz' % (np.std(inst_freq_decimated)/1e3))
self.qplt_DDC0_spc.setLabel('left', 'Freq [Hz]')
self.qplt_DDC0_spc.setLabel('bottom', 'Time [s]')
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yLeft, -self.sl.fs/2, self.sl.fs/2)
self.qplt_DDC0_spc.setYRange(np.min(inst_freq), np.max(inst_freq))
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLinearScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=False)
self.qplt_DDC0_spc.setXRange(time_axis[0], time_axis[-1])
elif self.qcombo_ddc_plot.currentIndex() == 3:
# Display the time-domain instantaneous phase output by the DDC block (computed by integrating the frequency), mostly for debugging:
time_axis = np.arange(0, len(inst_freq))/self.sl.fs
inst_phase = np.cumsum(inst_freq*2*np.pi/self.sl.fs)
# Compute the phase noise time-domain standard deviation:
phasenoise_stddev = np.std(inst_phase)
self.curve_DDC0_spc.setData(time_axis, inst_phase)
self.curve_DDC0_spc_avg.setVisible(False)
self.curve_DDC0_cumul_phase.setVisible(False)
self.qplt_DDC0_spc.setTitle('Instantaneous phase error, std dev = %.2f radrms' % phasenoise_stddev)
self.qplt_DDC0_spc.setLabel('left', 'Phase [rad]')
self.qplt_DDC0_spc.setLabel('bottom', 'Time [s]')
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yLeft, -self.sl.fs/2, self.sl.fs/2)
self.qplt_DDC0_spc.setYRange(np.min(inst_phase), np.max(inst_phase))
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLinearScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=False)
self.qplt_DDC0_spc.setXRange(time_axis[0], time_axis[-1])
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yLeft, -3.14, 3.14)
# print "debug warning: phase noise plot scaled to +/- pi"
# # Display the un-decimated spectrum:
# frequency_axis = np.linspace(0, (len(inst_freq)-1)/float(len(inst_freq))*(self.sl.fs), len(inst_freq))
# last_index_shown = np.round(len(frequency_axis)/2)
# window_function = np.blackman(len(inst_freq))
# spc = np.abs(np.fft.fft((inst_freq-np.mean(inst_freq)) * window_function))/(sum(window_function)/2)
# spc = 20*np.log10(np.abs(spc) + 1e-7)
# self.curve_DDC0_spc.setData(frequency_axis[1:last_index_shown], spc[1:last_index_shown])
# Refresh the display:
self.qplt_DDC0_spc.replot()
if window_NEB > 1e6:
self.qlabel_ddc_rbw.setText('RBW: %.1f MHz; Points:' % (round(window_NEB*1e5)/1e5/1e6))
elif window_NEB > 1e3:
self.qlabel_ddc_rbw.setText('RBW: %.1f kHz; Points:' % (round(window_NEB*1e2)/1e2/1e3))
else:
self.qlabel_ddc_rbw.setText('RBW: %.0f Hz; Points:' % (round(window_NEB)))
except:
del self.sl
print('Unhandled exception')
raise
# pause(1/10.)
self.bDisplayTiming = False
def grabAndDisplayADC(self):
(input_select, plot_type, N_samples) = self.spectrum.getGUIsettingsForADCdata()
# print("input_select = %s" % input_select)
# Grab data from the FPGA:
start_time = time.perf_counter()
(samples_out, ref_exp0) = self.getADCdata(input_select, N_samples)
if (samples_out is None) or (ref_exp0 is None):
return
self.raw_adc_samples = samples_out.astype(dtype=np.float)
self.spectrum.plotADCdata(input_select, plot_type, samples_out, ref_exp0)
# Update the scale which indicates the ADC fill ratio in numbers of bits:
self.spectrum.updateScaleDisplays(samples_out)
def getADCdata(self, input_select, N_samples, bReadAsDDC=False):
if bReadAsDDC:
empty_return_value = None
else:
empty_return_value = (None, None)
start_time = time.perf_counter()
# Check if another function is currently using the DDR2 logger:
if self.sl.bDDR2InUse:
print('grabAndDisplayADC(): DDR2 logger in use, cannot get data from adc')
return empty_return_value
# Block access to the DDR2 Logger to any other function until we are done:
self.sl.bDDR2InUse = True
time_start = time.perf_counter()
try:
# Read from selected source
self.sl.setup_write(self.sl.LOGGER_MUX[input_select], N_samples)
self.sl.trigger_write()
self.sl.wait_for_write()
if bReadAsDDC == False:
# read from ADC:
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
else:
# read from DDC:
samples_out = self.sl.read_ddc_samples_from_DDR2()
return samples_out
max_abs = np.max(np.abs(samples_out))
samples_out = samples_out.astype(dtype=np.float)
self.raw_adc_samples = samples_out
except RP_PLL.CommsLoggeableError as e:
# log exception
logging.error("Exception occurred", exc_info=True)
return empty_return_value
except RP_PLL.CommsError as e:
# do not log exception (because it's simply an obvious follow-up to a previous one, and we don't want to fill up the log with repeated information)
return empty_return_value
finally:
# Tear-down, whether or not an exception occured: Signal to other functions that they can use the DDR2 logger
self.sl.bDDR2InUse = False
if self.bDisplayTiming == True:
print('Elapsed time (Comm) = %f' % (time.perf_counter()-start_time))
# A little bit of data validation:
if input_select in ['ADC0', 'ADC1']:
if np.real(ref_exp0) == 0 and np.imag(ref_exp0) == 0:
print('getADCdata(): Invalid complex exponential. Probably because of a version mismatch between the RP firmware and Python GUI.')
return empty_return_value
else:
ref_exp0 = 1.0
return (samples_out, ref_exp0)
# From: http://stackoverflow.com/questions/273192/create-directory-if-it-doesnt-exist-for-file-write
def make_sure_path_exists(self, path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
```
|
{
"source": "jddixon/alertz",
"score": 2
}
|
#### File: src/alertz/daemon.py
```python
__all__ = ['clear_logs', 'run_the_daemon', ]
import os
import socket
import sys
import traceback
# from io import StringIO # NOT YET USED
from optionz import dump_options
from xlattice.ftlog import LogMgr
from xlattice.proc_lock import ProcLock
from wireops.chan import Channel
#import fieldz.msg_spec as M
#import fieldz.typed as T
# from fieldz.parser import StringProtoSpecParser # AS YET UNUSED
from fieldz.msg_impl import MsgImpl # , make_msg_class, make_field_class
from alertz import STR_OBJ_MODEL, BUFSIZE
from alertz.chan_io import recv_from_cnx
# from alertz_proto_spec import ALERTZ_PROTO_SPEC # NOT YET USED
# DAEMON ------------------------------------------------------------
def clear_logs(options):
log_dir = options.log_dir
print("DEBUG: clearLogs, logDir = '%s'" % log_dir)
if os.path.exists(log_dir):
if log_dir.startswith('/') or log_dir.startswith('..'):
raise RuntimeError("cannot delete %s/*" % log_dir)
files = os.listdir(log_dir)
if files:
if options.verbose:
print("found %u files" % len(files))
for file in files:
os.unlink(os.path.join(log_dir, file))
def _actually_run_the_daemon(options):
verbose = options.verbose
chan = Channel(BUFSIZE)
string = None
(cnx, addr) = (None, None)
string = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
string.bind(('', options.port))
string.listen(1)
try:
running = True
while running:
print("\nWAITING FOR CONNECTION") # DEBUG
cnx, addr = string.accept()
try:
accept_msg = "CONNECTION FROM %s" % str(addr)
if verbose:
print(accept_msg)
print("BRANCH TO options.accessLog.log()")
sys.stdout.flush()
options.access_log.log(accept_msg)
print("BACK FROM options.access.log()")
sys.stdout.flush()
while 1:
chan.clear()
# print "BRANCH TO recvFromCnx" ; sys.stdout.flush()
msg_ndx = recv_from_cnx(cnx, chan) # may raise exception
(msg, _) = MsgImpl.read(chan, STR_OBJ_MODEL)
# print " MSG_NDX: CALCULATED %s, REAL %s" % (
# msgNdx, realNdx)
# switch on message type
if msg_ndx == 0:
print("GOT ZONE MISMATCH MSG")
# pylint: disable=no-member
print(" timestamp %s" % msg.timestamp)
print(" seqNbr %s" % msg.seq_nbr)
print(" zoneName %s" % msg.zone_name)
print(" expectedSerial %s" % msg.expected_serial)
print(" actualSerial %s" % msg.actual_serial)
text =\
"mismatch, domain %s: expected serial %s, got %s" % (
msg.zone_name, msg.expected_serial, msg.actual_serial)
options.alertz_log.log(text)
elif msg_ndx == 1:
# timestamp, seqNb
print("GOT CORRUPT LIST MSG")
# pylint: disable=no-member
print(" timestamp %s" % msg.timestamp)
print(" seqNbr %s" % msg.seq_nbr)
text = "corrupt list: %s" % (msg.seq_nbr)
options.alertz_log.log(text)
elif msg_ndx == 2:
# has one field, remarks
print("GOT SHUTDOWN MSG")
# pylint: disable=no-member
print(" remarks %s" % msg.remarks)
running = False
string.close()
# XXX STUB: log the message
text = "shutdown: %s" % (msg.remarks)
options.alertz_log.log(text)
cnx.close()
break # permit only one message/cnx
except KeyboardInterrupt:
print("<keyboard interrupt received while connection open>")
if cnx:
cnx.close()
running = False
except KeyboardInterrupt:
print("<keyboard interrupt received while listening>")
# listening socket will be closed
finally:
if cnx:
cnx.close()
if string:
string.close()
# COMMENTING THIS OUT PREVENTS SEGFAULT ON STOCKTON ---------
# if options.logMgr is not None:
# options.logMgr.close()
# options.logMgr = None
# END COMMENTING OUT ----------------------------------------
if options.lock_mgr is not None:
options.lock_mgr.unlock()
options.lock_mgr = None
def run_the_daemon(options):
"""
Completes setting up the namespace; if this isn't a "just-show" run,
creates lock and log managers, creates the logs, and actually runs
the daemon.
"""
if options.verbose or options.showVersion or options.justShow:
print(options.pgm_name_and_version, end=' ')
if options.showTimestamp:
print('run at %s GMT' % options.timestamp) # could be prettier
else:
print() # there's a comma up there
if options.justShow or options.verbose:
print(dump_options(options))
if not options.justShow:
lock_mgr = None
access_log = None
error_log = None
try:
lock_mgr = ProcLock('alertzd')
options.lock_mgr = lock_mgr
log_mgr = LogMgr(options.log_dir)
options.log_mgr = log_mgr
access_log = log_mgr.open('access')
options.access_log = access_log
alertz_log = log_mgr.open('alertz')
options.alertz_log = alertz_log
error_log = log_mgr.open('error')
options.error_log = error_log
_actually_run_the_daemon(options)
except BaseException:
traceback.print_exc()
sys.exit(1)
finally:
lock_mgr.unlock()
```
#### File: alertz/tests/test_alertz_proto.py
```python
import unittest
from io import StringIO
# import fieldz
# from fieldz import parser # UNUSED
from wireops.enum import FieldTypes
from fieldz import reg
from fieldz.enum import Quants
from fieldz.parser import StringProtoSpecParser
import fieldz.msg_spec as M
# PROTOCOLS ---------------------------------------------------------
from alertz_proto_spec import ALERTZ_PROTO_SPEC
# TESTS -------------------------------------------------------------
class TestAlertzProto(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# actual unit tests #############################################
def test_alertz_proto(self):
""" this is in fact the current spec for a log entry """
proto_name = 'org.xlattice.alertz'
node_reg = reg.NodeReg()
proto_reg = reg.ProtoReg(proto_name, node_reg)
msg_reg = reg.MsgReg(proto_reg)
proto_spec = M.ProtoSpec(proto_name, proto_reg)
self.assertEqual(proto_name, proto_spec.name)
msg_name = 'zoneMismatch'
fields = [
# pylint: disable=no-member
M.FieldSpec(
msg_reg,
'timestamp',
FieldTypes.F_UINT32,
Quants.REQUIRED,
0),
M.FieldSpec(
msg_reg,
'seq_nbr',
FieldTypes.V_UINT32,
Quants.REQUIRED,
1),
M.FieldSpec(
msg_reg,
'zone_name',
FieldTypes.L_STRING,
Quants.REQUIRED,
2),
M.FieldSpec(
msg_reg,
'expected_serial',
FieldTypes.V_UINT32,
Quants.REQUIRED,
3),
M.FieldSpec(
msg_reg,
'actual_serial',
FieldTypes.V_UINT32,
Quants.REQUIRED,
4),
]
msg_spec = M.MsgSpec(msg_name, proto_spec, msg_reg)
self.assertEqual(msg_name, msg_spec.name)
for file in fields:
msg_spec.add_field(file)
# This is supposedly already done in __init__()
proto_spec.add_msg(msg_spec)
self.round_trip_proto_spec_via_string(proto_spec) # GEEP
def round_trip_proto_spec_via_string(self, match):
"""
Convert a MsgSpec object model to canonical string form,
parse that to make a clone, and verify that the two are
"""
canonical_spec = str(match.__repr__())
# DEBUG
print("### roundTrip: SPEC IN CANONICAL FORM:\n" + canonical_spec)
print("### END SPEC IN CANONICAL FORM #######")
# END
str_ps_parser = StringProtoSpecParser(StringIO(canonical_spec))
cloned_spec = str_ps_parser.parse()
self.assertIsNone(cloned_spec.parent)
self.assertIsNotNone(cloned_spec.reg)
# DEBUG
clone_repr = cloned_spec.__repr__()
print("### CLONED SPEC IN CANONICAL FORM:\n" + clone_repr)
print("### END CLONED SPEC ##############")
# END
# crude tests of __eq__ AKA ==
self.assertFalse(match is None)
self.assertTrue(match == match)
# one way of saying it ------------------
# XXX NEXT LINE FAILS
self.assertTrue(match.__eq__(cloned_spec))
self.assertTrue(cloned_spec.__eq__(match))
# this is the same test -----------------
self.assertTrue(match == cloned_spec)
self.assertTrue(cloned_spec == match)
def test_parse_and_write_proto_spec(self):
data = StringIO(ALERTZ_PROTO_SPEC)
str_ps_parser = StringProtoSpecParser(
data) # data should be file-like
# object model from string serialization
str_obj_model = str_ps_parser.parse()
self.assertIsNotNone(str_obj_model)
self.assertTrue(isinstance(str_obj_model, M.ProtoSpec))
self.assertEqual('org.xlattice.alertz', str_obj_model.name)
self.assertEqual(0, len(str_obj_model.enums))
self.assertEqual(16, len(str_obj_model.msgs))
self.assertEqual(0, len(str_obj_model.seqs))
msg_spec = str_obj_model.msgs[0]
self.assertEqual(msg_spec.field_name(0), 'timestamp')
self.assertEqual(msg_spec.field_type_name(0), 'fuint32')
self.assertEqual(msg_spec.field_name(1), 'seq_nbr')
self.assertEqual(msg_spec.field_type_name(1), 'vuint32')
self.assertEqual(msg_spec.field_name(2), 'zone_name')
self.assertEqual(msg_spec.field_type_name(2), 'lstring')
self.assertEqual(msg_spec.field_name(3), 'expected_serial')
self.assertEqual(msg_spec.field_type_name(3), 'vuint32')
self.assertEqual(msg_spec.field_name(4), 'actual_serial')
self.assertEqual(msg_spec.field_type_name(4), 'vuint32')
if __name__ == '__main__':
unittest.main()
```
#### File: alertz/tests/test_alertz_serialization.py
```python
import time
import unittest
from io import StringIO
# from importlib import reload # <---------------------
from rnglib import SimpleRNG
from wireops.chan import Channel
from fieldz.parser import StringProtoSpecParser
import fieldz.msg_spec as M
from fieldz.msg_impl import make_msg_class, MsgImpl
#import alertz_proto_spec
# reload(alertz_proto_spec) # <----------------------
from alertz_proto_spec import ALERTZ_PROTO_SPEC
BUFSIZE = 16 * 1024
RNG = SimpleRNG(time.time())
class TestAlertzSerialization(unittest.TestCase):
def setUp(self):
# data = StringIO(ALERTZ_PROTO_SPEC)
# str_ps_parser = StringProtoSpecParser(data) # data must be file-like
# # object model from string serialization
# self.str_obj_model = str_ps_parser.parse()
# self.proto_name = self.str_obj_model.name # dotted name of protocol
data = StringIO(ALERTZ_PROTO_SPEC)
print("AAA")
str_ps_parser = StringProtoSpecParser(data) # data must be file-like
print("BBB")
# object model from string serialization
self.str_obj_model = str_ps_parser.parse()
print("CCC")
self.proto_name = self.str_obj_model.name # dotted name of protocol
print("NNN")
def tearDown(self):
pass
# utility functions #############################################
def zone_mismatch_fields(self):
""" returns a list """
timestamp = RNG.next_int32()
seq_nbr = RNG.next_int32()
zone_name = RNG.next_file_name(8)
expected_serial = RNG.next_int32()
actual_serial = RNG.next_int32()
while actual_serial == expected_serial:
actual_serial = RNG.next_int32()
# NOTE that this is a list
return [timestamp, seq_nbr, zone_name, expected_serial, actual_serial]
def corrupt_list_fields(self):
timestamp = RNG.next_int32()
seq_nbr = RNG.next_int32()
remarks = RNG.next_file_name(16) # so 1 to 16 characters
return [timestamp, seq_nbr, remarks]
# actual unit tests #############################################
def test_zone_mismatch_msg(self):
# DEBUG
print("\nTEST_ZONE_MISMATCH_MSG")
# END
# from setUp(): =============================================
# end stuff from setup ======================================
# -----------------------------------------------------------
# XXX This code has been crudely hacked from another test
# module, and so needs careful review
# -----------------------------------------------------------
# verify that this adds 1 (msg) + 3 (field count) to the number
# of entries in getters, putters, etc
self.assertIsNotNone(self.str_obj_model)
self.assertTrue(isinstance(self.str_obj_model, M.ProtoSpec))
self.assertEqual('org.xlattice.alertz', self.str_obj_model.name)
self.assertEqual(0, len(self.str_obj_model.enums))
self.assertEqual(16, len(self.str_obj_model.msgs))
self.assertEqual(0, len(self.str_obj_model.seqs))
msg_spec = self.str_obj_model.msgs[0]
msg_name = msg_spec.name
self.assertEqual('zoneMismatch', msg_name)
# Create a channel ------------------------------------------
# its buffer will be used for both serializing # the instance
# data and, by deserializing it, for creating a second instance.
chan = Channel(BUFSIZE)
buf = chan.buffer
self.assertEqual(BUFSIZE, len(buf))
# create the ZoneMismatchMsg class ------------------------------
zone_mismatch_msg_cls = make_msg_class(self.str_obj_model, msg_name)
# create a message instance ---------------------------------
values = self.zone_mismatch_fields() # list of quasi-random values
zmm_msg = zone_mismatch_msg_cls(values)
self.assertEqual(msg_spec.name, zmm_msg.name)
# we don't have any nested enums or messages
# pylint: disable=no-member
self.assertEqual(0, len(zmm_msg.enums))
self.assertEqual(0, len(zmm_msg.msgs))
self.assertEqual(5, len(zmm_msg.field_classes))
self.assertEqual(5, len(zmm_msg)) # number of fields in instance
self.assertEqual(values[0], zmm_msg.timestamp)
self.assertEqual(values[1], zmm_msg.seq_nbr)
self.assertEqual(values[2], zmm_msg.zone_name)
self.assertEqual(values[3], zmm_msg.expected_serial)
self.assertEqual(values[4], zmm_msg.actual_serial)
# serialize the object to the channel -----------------------
# XXX WRITE HEADER FIRST!
# DEBUG
print("DIR (ZMM_MSG): ", end=' ')
print(dir(zmm_msg))
# END
zmm_msg.write_stand_alone(chan)
chan.flip()
# deserialize the channel, making a clone of the message ----
(read_back, _) = MsgImpl.read(chan, self.str_obj_model)
self.assertIsNotNone(read_back)
# DEBUG
print("position after mis-match read back is %d" % chan.position)
# END
# verify that the messages are identical --------------------
self.assertTrue(zmm_msg.__eq__(read_back))
# produce another message from the same values --------------
zmm_msg2 = zone_mismatch_msg_cls(values)
chan2 = Channel(BUFSIZE)
zmm_msg2.write_stand_alone(chan2)
chan2.flip()
(copy2, _) = MsgImpl.read(chan2, self.str_obj_model)
self.assertTrue(zmm_msg.__eq__(copy2))
self.assertTrue(zmm_msg2.__eq__(copy2)) # GEEP
def test_corrupt_list_msg(self):
# DEBUG
print("\nTEST_CORRUPT_LIST_MSG")
# END
# -----------------------------------------------------------
# XXX This code has been crudely hacked from another test
# module, and so needs careful review
# -----------------------------------------------------------
# verify that this adds 1 (msg) + 3 (field count) to the number
# of entries in getters, writeStandAlones, etc
msg_spec = self.str_obj_model.msgs[1] # <------
msg_name = msg_spec.name
self.assertEqual('corruptZoneList', msg_name)
# Create a channel ------------------------------------------
# its buffer will be used for both serializing # the instance
# data and, by deserializing it, for creating a second instance.
chan = Channel(BUFSIZE)
buf = chan.buffer
self.assertEqual(BUFSIZE, len(buf))
# create the CorruptListMsg class ------------------------------
corrupt_list_msg_cls = make_msg_class(self.str_obj_model, msg_name)
# create a message instance ---------------------------------
values = self.corrupt_list_fields() # list of quasi-random values
cl_msg = corrupt_list_msg_cls(values)
self.assertEqual(msg_spec.name, cl_msg.name)
# we don't have any nested enums or messages
# pylint: disable=no-member
self.assertEqual(0, len(cl_msg.enums))
self.assertEqual(0, len(cl_msg.msgs))
self.assertEqual(3, len(cl_msg.field_classes)) # <---
self.assertEqual(3, len(cl_msg)) # number of fields in instance
self.assertEqual(values[0], cl_msg.timestamp)
self.assertEqual(values[1], cl_msg.seq_nbr)
self.assertEqual(values[2], cl_msg.remarks)
# serialize the object to the channel -----------------------
cl_msg.write_stand_alone(chan)
chan.flip()
# deserialize the channel, making a clone of the message ----
(read_back, _) = MsgImpl.read(chan, self.str_obj_model)
self.assertIsNotNone(read_back)
# DEBUG
print("position after corrupt list read back is %d" % chan.position)
# END
# verify that the messages are identical --------------------
self.assertTrue(cl_msg.__eq__(read_back))
# produce another message from the same values --------------
cl_msg2 = corrupt_list_msg_cls(values)
chan2 = Channel(BUFSIZE)
cl_msg2.write_stand_alone(chan2)
chan2.flip()
(copy2, _) = MsgImpl.read(chan2, self.str_obj_model)
self.assertTrue(cl_msg.__eq__(copy2))
self.assertTrue(cl_msg2.__eq__(copy2)) # GEEP GEEP
def test_shutdown_msg(self):
# DEBUG
print("\nTEST_SHUTDOWN_MSG")
# END
# -----------------------------------------------------------
# XXX This code has been crudely hacked from another test
# module, and so needs careful review
# -----------------------------------------------------------
# verify that this adds 1 (msg) + 3 (field count) to the number
# of entries in getters, writeStandAlones, etc
msg_spec = self.str_obj_model.msgs[2] # <------
msg_name = msg_spec.name
self.assertEqual('shutdown', msg_name)
# Create a channel ------------------------------------------
# its buffer will be used for both serializing # the instance
# data and, by deserializing it, for creating a second instance.
chan = Channel(BUFSIZE)
buf = chan.buffer
self.assertEqual(BUFSIZE, len(buf))
# create the CorruptListMsg class ------------------------------
shutdown_msg_cls = make_msg_class(self.str_obj_model, msg_name)
# create a message instance ---------------------------------
values = [RNG.next_file_name(8), ] # list of quasi-random values
sd_msg = shutdown_msg_cls(values)
self.assertEqual(msg_name, sd_msg.name)
# we don't have any nested enums or messages
# pylint: disable=no-member
self.assertEqual(0, len(sd_msg.enums))
self.assertEqual(0, len(sd_msg.msgs))
self.assertEqual(1, len(sd_msg.field_classes)) # <---
self.assertEqual(1, len(sd_msg)) # number of fields in instance
self.assertEqual(values[0], sd_msg.remarks)
# serialize the object to the channel -----------------------
sd_msg.write_stand_alone(chan)
chan.flip()
# deserialize the channel, making a clone of the message ----
(read_back, _) = MsgImpl.read(chan, self.str_obj_model)
self.assertIsNotNone(read_back)
# DEBUG
print("position after shutdown read back is %d" % chan.position)
# END
# verify that the messages are identical --------------------
self.assertTrue(sd_msg.__eq__(read_back))
# produce another message from the same values --------------
sd_msg2 = shutdown_msg_cls(values)
chan2 = Channel(BUFSIZE)
sd_msg2.write_stand_alone(chan2)
chan2.flip()
(copy2, _) = MsgImpl.read(chan2, self.str_obj_model)
self.assertTrue(sd_msg.__eq__(copy2))
self.assertTrue(sd_msg2.__eq__(copy2)) # GEEP GEEP GEEP
if __name__ == '__main__':
unittest.main()
```
#### File: alertz/tests/test_with_dummy_client.py
```python
import os
import threading
import time
import unittest
# from io import StringIO # UNUSED
from rnglib import SimpleRNG
from wireops.chan import Channel
# import wireops.typed as T # AS YET UNUSED
# import fieldz.msg_spec as M # AS YET UNUSED
# from fieldz.parser import StringProtoSpecParser # AS YET UNUSED
# from fieldz.msg_impl import make_msg_class, make_field_class # UNUSED
from alertz import(CORRUPT_LIST_MSG, ZONE_MISMATCH_MSG,
__version__, __version_date__, Namespace, BUFSIZE)
from alertz.chan_io import send_to_end_point
from alertz.daemon import run_the_daemon, clear_logs
# from alertz_proto_spec import ALERTZ_PROTO_SPEC # AS YET UNUSED
RNG = SimpleRNG(time.time())
class TestWithDummyClient(unittest.TestCase):
next_seq_nbr = 0 # increment after each use
def setUp(self):
pass
def tearDown(self):
pass
# utility functions ---------------------------------------------
def do_clear_logs(self, options):
self.assertIsNotNone(options)
log_dir = options.log_dir
self.assertIsNotNone(log_dir)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
self.assertTrue(os.path.exists(log_dir))
clear_logs(options)
# excessive paranoia
files = os.listdir(log_dir)
if files:
self.fail('logs/ has not been cleared')
# -----------------------------------------------------
def zone_mismatch_fields(self):
""" returns a list """
timestamp = int(time.time())
seq_nbr = TestWithDummyClient.next_seq_nbr
TestWithDummyClient.next_seq_nbr += 1 # used, so increment it
zone_name = RNG.next_file_name(8)
expected_serial = RNG.next_int32()
actual_serial = RNG.next_int32()
while actual_serial == expected_serial:
actual_serial = RNG.next_int32()
# NOTE that this is a list
return [timestamp, seq_nbr, zone_name, expected_serial, actual_serial]
def next_zone_mismatch_msg(self):
values = self.zone_mismatch_fields()
return ZONE_MISMATCH_MSG(values)
# -----------------------------------------------------
def corrupt_list_fields(self):
timestamp = int(time.time())
seq_nbr = TestWithDummyClient.next_seq_nbr
TestWithDummyClient.next_seq_nbr += 1 # used, so increment it
remarks = RNG.next_file_name(16)
return [timestamp, seq_nbr, remarks]
def next_corrupt_list_msg(self):
values = self.corrupt_list_fields()
return CORRUPT_LIST_MSG(values) # GEEP
# -----------------------------------------------------
def shutdown_fields(self):
# global next_seq_nbr
# timestamp = int(time.time())
# seqNbr = next_seq_nbr
# next_seq_nbr += 1 # used, so increment it
remarks = RNG.next_file_name(16)
return [remarks, ]
def next_shutdown_msg(self):
values = self.shutdown_fields()
return shutdown_msg_cls(values) # GEEP
# actual unit test(s) -------------------------------------------
def test_the_daemon(self):
chan = Channel(BUFSIZE)
chan.clear() # XXX should be guaranteed on new channel
msg_count = 8 + RNG.next_int16(25) # so 8..32
# DEBUG
print("MSG_COUNT = %u" % msg_count)
# END
# set up options ----------------------------------
now = int(time.time())
pgm_name_and_version = "testWithDummyClient v%s %s" % (
__version__, __version_date__)
with open('/etc/hostname', 'r') as file:
this_host = file.read().strip()
options = {} # a namespace, so to speak
options['ec2Host'] = False
options['justShow'] = False
options['log_dir'] = 'logs'
options['pgm_name_and_version'] = pgm_name_and_version
options['port'] = 55555
options['showTimestamp'] = False
options['showVersion'] = False
options['testing'] = True
options['this_host'] = this_host
options['timestamp'] = now
options['verbose'] = False
ns_ = Namespace(options)
# clear the log files (so delete any files under logs/) -----
self.do_clear_logs(ns_)
# start the daemon --------------------------------
daemon_t = threading.Thread(target=run_the_daemon, args=(ns_,))
daemon_t.start()
# give the daemon time to wake up --------------------------
time.sleep(0.15) # XXX without this we get an abort saying
# that libev cannot allocate (2G - 16)B
# start sending (some fixed number of ) messages ------------
msgs_sent = []
for nnn in range(msg_count):
msg = self.next_zone_mismatch_msg()
seq_nbr_field = msg[1]
# XXX by name would be better!
self.assertEqual(nnn, seq_nbr_field.value)
# serialize msg into the channel
chan.clear()
msg.write_stand_alone(chan)
chan.flip()
# send the msg to the daemon ------------------
skt = send_to_end_point(chan, '127.0.0.1', 55555)
time.sleep(0.05)
skt.close()
msgs_sent.append(msg)
# DEBUG
print("MSG %d HAS BEEN SENT" % nnn)
# END
self.assertEqual(msg_count, len(msgs_sent))
# delay a few ms --------------------------------------------
time.sleep(0.05)
# build and send shutdown msg -------------------------------
msg = self.next_shutdown_msg()
chan.clear()
msg.write_stand_alone(chan)
chan.flip()
skt = send_to_end_point(chan, '127.0.0.1', 55555)
# DEBUG
print("SHUTDOWN MSG HAS BEEN SENT")
# END
# delay a few ms --------------------------------------------
time.sleep(0.05)
skt.close()
# join the daemon thread ------------------------------------
time.sleep(0.05)
daemon_t.join()
# verify that the daemon's logs have the expected contents --
# XXX STUB XXX
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/buildList",
"score": 2
}
|
#### File: buildList/tests/test_listgen.py
```python
import os
import time
import unittest
# from argparse import ArgumentParser
# from Crypto.PublicKey import RSA
from rnglib import SimpleRNG
from xlattice import HashTypes
from xlu import DirStruc # , UDirf # NOT YET USED
from buildlist import BuildList
# We expect this script to be run in $DEV_BASE/py/buildlist
PATH_TO_DATA = os.path.join(
'..', '..', 'dat', 'xl_testData', 'treeData', 'binExample_1')
DATA_DIR = os.path.join(PATH_TO_DATA, 'dataDir')
RSA_FILE = os.path.join(PATH_TO_DATA, 'node', 'skPriv.pem')
class TestBuildList(unittest.TestCase):
""" Test BuildList.listgen functionality. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def do_listgen_test(self, title, hashtype, dirstruc):
"""
Test buildlist functionality for specific hash type and DirStruc.
"""
# MAJOR ERROR: This code logs to .dvcz/buildlist, the actual
# project log! Fix is:
dvcz_dir = os.path.join('tmp', self.rng.next_file_name(8))
while os.path.exists(dvcz_dir):
dvcz_dir = os.path.join('tmp', self.rng.next_file_name(8))
os.mkdir(dvcz_dir, 0o744)
# create the BuildList from what's in DATA_DIR
# -- RESTRUCTURE and just do this once for each hashtype -- in
# other words, this should be in a higher level function, one
# which runs a test for each dirstruc
BuildList.list_gen(
title=title,
data_dir=DATA_DIR,
dvcz_dir=dvcz_dir, # THE FIX
# list_file= # lastBuildList
logging=True,
u_path=os.path.join('tmp', str(hashtype.value), dirstruc.name),
hashtype=hashtype,
using_indir=True
)
# THE SAME BUILDLIST IS USED FOR EACH OF THE THREE DIRSTRUCS
# UNFINISHED
# Compare the BuildList with
# UNFINISHED
def test_build_list(self):
""" Test listgen functionality for suppored hash types. """
# DEBUG
# print("DATA_DIR is '%s'" % DATA_DIR)
# END
self.assertTrue(os.path.exists(DATA_DIR))
self.assertTrue(os.path.exists(RSA_FILE))
for hashtype in HashTypes:
for dirstruc in DirStruc:
self.do_listgen_test('SHA test', hashtype, dirstruc)
if __name__ == '__main__':
unittest.main()
```
#### File: buildList/tests/test_random_dir.py
```python
import os
import sys
import time
import unittest
import hashlib
from buildlist import BuildList
from rnglib import SimpleRNG
from xlattice import HashTypes, check_hashtype
from xlu import file_sha1hex, file_sha2hex, file_sha3hex, file_blake2b_hex
if sys.version_info < (3, 6):
# pylint:disable=unused-import
import sha3 # monkey-patches hashlib
assert sha3 # suppress warning
class TestRandomDir(unittest.TestCase):
""" Test building quasi-random data files and directory structures. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def do_test_random_dir(self, hashtype):
""" Test building random directories with specific SHA hash type. """
check_hashtype(hashtype)
depth = 1 + self.rng.next_int16(3) # so 1 to 3
width = 1 + self.rng.next_int16(16) # so 1 to 16
blk_count = 1 + self.rng.next_int16(3) # so 1 to 3
# last block will usually be only partically populated
max_len = BuildList.BLOCK_SIZE * (blk_count - 1) +\
self.rng.next_int16(BuildList.BLOCK_SIZE)
min_len = 1
# we want the directory name to be unique
path_to_dir = os.path.join('tmp', self.rng.next_file_name(8))
while os.path.exists(path_to_dir):
path_to_dir = os.path.join('tmp', self.rng.next_file_name(8))
self.rng.next_data_dir(path_to_dir, depth, width, max_len, min_len)
data = bytearray(max_len) # that many null bytes
self.rng.next_bytes(data) # fill with random data
if hashtype == HashTypes.SHA1:
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
# pylint:disable=no-member
sha = hashlib.sha3_256()
elif hashtype == HashTypes.BLAKE2B:
sha = hashlib.blake2b(digest_size=32)
else:
raise NotImplementedError
sha.update(data)
hash_ = sha.hexdigest()
file_name = self.rng.next_file_name(8)
path_to_file = os.path.join('tmp', file_name)
while os.path.exists(path_to_file):
file_name = self.rng.next_file_name(8)
path_to_file = os.path.join('tmp', file_name)
with open(path_to_file, 'wb') as file:
file.write(data)
if hashtype == HashTypes.SHA1:
file_hash = file_sha1hex(path_to_file)
elif hashtype == HashTypes.SHA2:
file_hash = file_sha2hex(path_to_file)
elif hashtype == HashTypes.SHA3:
file_hash = file_sha3hex(path_to_file)
elif hashtype == HashTypes.BLAKE2B:
file_hash = file_blake2b_hex(path_to_file)
else:
raise NotImplementedError
self.assertEqual(hash_, file_hash)
def test_random_dir(self):
""" Test building random directories with supported SHA hash types. """
for hashtype in HashTypes:
self.do_test_random_dir(hashtype)
if __name__ == '__main__':
unittest.main()
```
#### File: buildList/tests/test_timestamp.py
```python
import hashlib
import os
import time
import unittest
from rnglib import SimpleRNG
from xlu import file_sha1hex
from buildlist import BuildList
class TestTimestamp(unittest.TestCase):
""" Ostensibly tests BuildList timestamp. (Why?) """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def test_sha1_file(self):
"""
Verify functioning of xlu.file_sha1hex().
"""
blk_count = 1 + self.rng.next_int16(3) # so 1 to 3
# last block will usually be only partically populated
byte_count = BuildList.BLOCK_SIZE * (blk_count - 1) +\
self.rng.next_int16(BuildList.BLOCK_SIZE)
data = bytearray(byte_count) # that many null bytes
self.rng.next_bytes(data) # fill with random data
d_val = hashlib.new('sha1')
d_val.update(data)
hash_ = d_val.hexdigest()
# make a unique test file name
file_name = self.rng.next_file_name(8)
path_to_file = os.path.join('tmp', file_name)
while os.path.exists(path_to_file):
file_name = self.rng.next_file_name(8)
path_to_file = os.path.join('tmp', file_name)
with open(path_to_file, 'wb') as file:
file.write(data)
file_hash = file_sha1hex(path_to_file)
self.assertEqual(hash_, file_hash)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/clp",
"score": 3
}
|
#### File: src/clp/__init__.py
```python
__version__ = '0.1.14'
__version_date__ = '2018-03-21'
__all__ = ['CLPError',
'serialize_str_list']
class CLPError(RuntimeError):
""" Errors encountered in processing computer language. """
pass
def serialize_str_list(name, indent, elements, line_len=78):
"""
Given a list of strings, serialize it with the specified
initial indent, with list members SQUOTEd and separated by
COMMA SPACE. Return the serialized list as a list of one or more lines,
each of length not exceeding line_len. Newlines are not appended to
the output.
"""
output = []
name_len = len(name)
out = ' ' * indent + name + '=['
out_len = len(out)
if elements:
for element in elements:
elm_len = len(element) + 4 # tswo SQUOTEs, comma, space
if out_len + elm_len > line_len:
output.append(out[0:-1])
out = (' ' * (indent + name_len + 2)) + "'" + element + "', "
out_len = len(out)
else:
out += "'" + element + "', "
out_len += elm_len
if out.endswith(', '):
out = out[0:-2]
out += ']'
output.append(out)
return output
```
|
{
"source": "jddixon/dvcz",
"score": 2
}
|
#### File: dvcz/tests/test_dvc_setup.py
```python
import os
import unittest
# from dvcz import DvczError
from rnglib import SimpleRNG, valid_file_name
class DvcTestSetup(object):
"""
Create a directory structure under ./tmp/
See https://jddixon.github.io/dvcz for an extended description
of the subdirectory.
"""
def __init__(self):
self._rng = SimpleRNG()
self._run_id = self.rng.next_file_name(8)
self._run_dir = os.path.join('tmp', self._run_id)
while os.path.exists(self._run_dir):
self._run_id = self.rng.next_file_name(8)
self._run_dir = os.path.join('tmp', self._run_id)
os.makedirs(self._run_dir, mode=0o755)
# under this add home/LOGIN/dvcz/committers/ and stores/
self._login = os.environ['LOGNAME']
dvc_dir = os.path.join(
self._run_dir, os.path.join(
'home', os.path.join(self._login, 'dvcz')))
self._committers_dir = os.path.join(dvc_dir, 'committers')
self._home_stores_dir = os.path.join(dvc_dir, 'stores')
os.makedirs(self._committers_dir, mode=0o755)
os.makedirs(self._home_stores_dir, mode=0o755)
# on the same level as home
self._projects_dir = os.path.join(self._run_dir, 'projects')
os.makedirs(self._projects_dir, mode=0o755)
# on the same level as home
self._stores_dir = os.path.join(self._run_dir, 'stores')
os.makedirs(self._stores_dir, mode=0o755)
# maps
self._name2project = None
self._name2store = None
# lists
self._projects = [] # a list of Project objects
self._stores = [] # a list of UStore objects
@property
def rng(self):
""" Return a simple random number generator. """
return self._rng
@property
def run_id(self):
"""
Return the quasi-random integer uniquely identifying this
run directory.
"""
return self._run_id
@property
def run_dir(self):
"""
Return a path to this run directory, a path incorporating run_id."""
return self._run_dir
@property
def login(self):
"""
Return the host operating system login associated with this test run.
"""
return self._login
@property
def committers_dir(self):
"""
Return a path to the committers/ subdirectory containing
serialized Committer objects. In general one login may have many
such Committer objects.
"""
return self._committers_dir
@property
def home_stores_dir(self):
"""
Return a path to the stores/ subdirectory containing serialized
Store objects, one for each content-keyed store that a Committer
might have access to.
"""
return self._home_stores_dir
@property
def projects_dir(self):
"""
Return a path to the projects/ subdirectory. This will contain
subdirectories by Committer name, and below each serialized
Project objects .
"""
return self._projects_dir
@property
def stores_dir(self):
"""
Return a path to the stores/ subdirectory.
"""
return self._stores_dir
class TestDvcSetup(unittest.TestCase):
""" Test the setUp function for dvcz testing. """
def tearDown(self):
pass
def test_setup(self):
"""
We verify that setUp() has created the expected directory
structure.
"""
cfg = DvcTestSetup()
# DEBUG
print('cfg.run_id = %s' % cfg.run_id)
# END
self.assertTrue(valid_file_name(cfg.run_id))
self.assertTrue(os.path.exists(cfg.run_dir))
self.assertEqual(cfg.run_dir, os.path.join('tmp', cfg.run_id))
# weird problem with python: os.getlogin() usually returns '' but
# sometimes returns eg 'jdd'
try:
ret = os.getlogin()
print("os.getlogin() unexpectedly worked, returning %s" % ret)
except FileNotFoundError:
pass
self.assertEqual(cfg.login, os.environ['LOGNAME'])
# pure lazinessM
self.assertEqual(cfg.committers_dir, cfg.run_dir + '/home/' +
cfg.login + '/dvcz/committers')
self.assertTrue(os.path.exists(cfg.committers_dir))
self.assertTrue(os.path.exists(cfg.home_stores_dir))
self.assertTrue(os.path.exists(cfg.projects_dir))
self.assertTrue(os.path.exists(cfg.stores_dir))
if __name__ == '__main__':
unittest.main()
```
#### File: dvcz/tests/test_store.py
```python
import unittest
from dvcz import DvczError
from dvcz.store import Store
from xlattice import HashTypes
from xlu import DirStruc
class TestStore(unittest.TestCase):
"""
Test the Store object and related functions.
In testing we create under tmp/ a unique runID for each test run.
Below this we create a dummy home directory:
tmp/RUN_ID/home/LOGIN/
and below this an equivalent to what in production would be .dvcz:
tmp/RUN_ID/home/LOGIN/dvcz/
USER_ID
projects/[PROJECT_NAME]*
stores/[STORE_NAME]*
For each store we create a UDir:
tmp/RUN_ID/STORE_NAME/
in/
tmp/
--hash--
--hash--
...
And for each project we create a dummy project:
tmp/RUN_ID/PROJECT_NAME/
.dvczignore
.dvcz/
builds
lastBuildList
...
version
-- arbitrary directory structure --
The simplest way to create such a dummy project is to copy a real
project directory there.
"""
def setUp(self):
pass
def tearDown(self):
pass
def do_test_good(self, name, u_path, dir_struc, hashtype):
""" Verify that parameters that should succeed do so. """
store = Store(name, u_path, dir_struc, hashtype)
self.assertEqual(store.name, name)
self.assertEqual(store.u_path, u_path)
self.assertEqual(store.dir_struc, dir_struc)
self.assertEqual(store.hashtype, hashtype)
# round-trip it
ser = store.__str__()
store_b = Store.create_from_string(ser)
self.assertEqual(store_b, store)
def test_good_stores(self):
""" Test various combinations of parameters that should succeed. """
for dir_struc in DirStruc:
for hashtype in HashTypes:
self.do_test_good('grinch', 'tmp/pqr', dir_struc, hashtype)
def do_test_bad_name(self, bad_name, u_path,
dir_struc=DirStruc.DIR_FLAT, hashtype=HashTypes.SHA2):
""" Verify that names that should be rejected are. """
try:
Store(bad_name, u_path, dir_struc, hashtype)
self.fail("Store didn't detect bad name '%s'" % bad_name)
except DvczError:
pass
def test_bad_names(self):
""" Test some instances of invalid names (ie, bad runIDs). """
self.do_test_bad_name('', 'tmp/frog')
self.do_test_bad_name(' ', 'tmp/frog') # space
self.do_test_bad_name('.', 'tmp/frog') # dot
self.do_test_bad_name('$', 'tmp/frog') # dollar
self.do_test_bad_name('a$b', 'tmp/frog')
self.do_test_bad_name('.b', 'tmp/frog')
self.do_test_bad_name('a b', 'tmp/frog') # space
self.do_test_bad_name('a\tb', 'tmp/frog') # tab
# ---------------------------------------------------------------
def do_test_bad_path(self, name, bad_path,
dir_struc=DirStruc.DIR16x16, hashtype=HashTypes.SHA2):
""" Verify that a bad path to a store is rejected. """
try:
Store(name, bad_path, dir_struc, hashtype)
self.fail("Store didn't detect bad path '%s'" % name)
except PermissionError:
pass
def test_bad_paths(self):
""" Verify that various inacceptable store paths are rejected. """
self.do_test_bad_path('frog', '/frog') # no permission to write
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/fieldz",
"score": 3
}
|
#### File: src/fieldz/enum_spec.py
```python
import sys
from fieldz.enum import SimpleEnum
# pylint: disable=too-few-public-methods
class QEnum(SimpleEnum):
"""
This is actually another intertwined pair of enums: we want
these values to map to ['', '?', '*', '+'].
"""
def __init__(self):
super(QEnum, self).__init__(['REQUIRED', 'OPTIONAL', 'STAR', 'PLUS'])
# This allows us to import a reference to the class instance.
sys.modules[__name__] = QEnum()
```
#### File: src/fieldz/msg_impl.py
```python
from wireops.enum import FieldTypes, PrimTypes
from wireops.raw import(length_as_varint, field_hdr_len, read_field_hdr,
write_raw_varint, read_raw_varint,
write_field_hdr)
from wireops.typed import T_GET_FUNCS, T_LEN_FUNCS, T_PUT_FUNCS
from fieldz import FieldzError
from fieldz.enum import Quants
from fieldz.field_impl import make_field_class
__all__ = ['make_msg_class', 'make_field_class', 'write', 'impl_len', ]
# SERIALIZATION METHODS ---------------------------------------------
# This interface should be compatible with registry {put,get,len}Func but
# is NOT. SHOULD REPLACE buf, pos WITH chan IN ALL PARAMETER LISTS
def impl_len(msg, nnn):
"""
msg is a reference to an instance of the MsgImpl class, n is its
field number. Returns the int length of the serialized object,
including the lenght of the field header.
"""
return msg.wire_len(nnn)
def _check_position(chan, end):
if chan.position > end:
err_msg = "read beyond end of buffer: position := %d, end is %d" % (
chan.position, end)
raise RuntimeError(err_msg)
# -------------------------------------------------------------------
# CODE FRAGMENTS: METHODS USED AS COMPONENTS IN BUILDING CLASSES
# -------------------------------------------------------------------
# pylint: disable=unused-argument
def write(self):
raise NotImplementedError
def my_getter(self):
raise NotImplementedError
def my_wire_len(self):
print("DEBUG: myWireLen invoked")
raise NotImplementedError
def my_p_wire_len(self, nnn): # field number for nested msg, regID otherwise
raise NotImplementedError
# specific to messages ----------------------------------------------
def my_enums(self):
#pylint: disable=protected-access
return self._enums
def my_msgs(self):
#pylint: disable=protected-access
return self._msgs
def my_field_classes(self):
#pylint: disable=protected-access
return self._field_classes
# specific to fields ------------------------------------------------
# FOR A GIVEN FIELD, THESE ARE CONSTANTS ASSIGNED BY make_field_class
#
#
# def myFType(cls): return cls._fType
#
#
# def myQuantifier(cls): return cls._quantifier
#
#
# def myFieldNbr(cls): return cls._fieldNbr
#
#
# def myDefault(cls): return cls._default
#
# these get and set the value attribute of the field instance; they
# have nothing to do with de/serialization to and from the channel
#
#
# def myValueGetter(self): return self._value
# XXX TYPE-SPECIFIC VALIDATION, COERCION:
#
#
# def myValueSetter(self, value): self._value = value # GEEP
# -------------------------------------------------------------------
# MESSAGE CLASS
# -------------------------------------------------------------------
# WAS OF TYPE 'type' 2016-08-02
class MsgImpl(object):
"""
An abstract class intended to serve as parent to automatically
generated classes whose purpose will be to ease user handling
of data being sent or received across the wire.
"""
# DISABLE __slots__ until better understood
# __slots__ = ['_field_classes', # list of field instances
# # '_fields_by_name',
# '_enums', # nested enums
# '_msgs', # nested messages
# ]
def __init__(self, mname, field_classes=None, enums=None, msgs=None):
self._mname = mname
self._field_classes = field_classes
self._enums = enums
self._msgs = msgs
self._parent_spec = None
# EXPERIMENT 2018-01-05
@property
def mname(self):
return self._mname
# END EXPERIMENT
def __eq__(self, other):
if other is None:
return False
if self is other:
return True
if self._mname != other.mname:
return False
# print "MESSAGE NAMES THE SAME" # DEBUG
# -- compare fields -------------------------------
if self._field_classes is None or other.field_classes is None:
return False
# print "SAME NUMBER OF FIELDS" # DEBUG
if len(self._field_classes) != len(other.field_classes):
return False
for i in range(len(self._field_classes)):
if self._field_classes[i] != other.field_classes[i]:
# DEBUG
print("MESSAGE FIELDS %d DIFFER" % i)
# END
return False
# print "FIELDS ARE THE SAME" # DEBUG
# -- compare nested enums -------------------------
if self._enums is None or other.enums is None:
return False
if len(self._enums) != len(other.enums):
return False
for i in range(len(self._enums)):
if self._enums[i] != other.enums[i]:
return False
# -- compare nested msgs --------------------------
if self._msgs is None or other.msgs is None:
return False
if len(self._msgs) != len(other.msgs):
return False
for i in range(len(self._msgs)):
if self._msgs[i] != other.msgs[i]:
return False
return True
def __len__(self):
return len(self._field_classes)
def __getitem__(self, nnn):
# 2016-08-02, same fix
# return self._fields[n]
return self._field_classes[nnn]
# -- INSTANCE SERIALIZATION -------------------------------------
# INSTANCE PUT ----------------------------------------
def write_stand_alone(self, chan):
"""
Write the message stand-alone, as the topmost message on the
channel. Returns the message index as a convenience in testing.
"""
mname = self._mname
ndx = self._parent_spec.msg_name_index(mname)
# DEBUG
print("WRITE_STAND_ALONE: MSG %s INDEX IS %d" % (mname, ndx))
# END
self.write(chan, ndx)
return ndx
def write(self, chan, nnn):
"""
n is the msg's field number OR regID
"""
write_field_hdr(
chan,
nnn,
PrimTypes.LEN_PLUS) # write the field header
msg_len = self._wire_len() # then the unprefixed length
write_raw_varint(chan, msg_len)
# XXX DEBUG
print("ENTERING MsgImpl.write FIELD NBR " +
"%u, MSG LEN IS %u; AFTER WRITING HDR OFFSET %u" % (
nnn, msg_len, chan.position))
# XXX This only makes sense for simple messages all of whose
# fields are required and so have only a single instance
for field in self._field_classes: # instances with a value attr
# CLASS-LEVEL SLOTS are '_name', '_fType', '_quantifier',
# '_fieldNbr', '_default',]
# INSTANCE-LEVEL SLOT is '_value'
#pylint: disable=protected-access
f_name = field._name
f_nbr = field.field_nbr
f_quant = field.quantifier # NEXT HURDLE
field_type = field.field_type
value = field.value
# default = field.default
# pylint: disable=no-member
if f_quant == Quants.REQUIRED or f_quant == Quants.OPTIONAL:
if field_type > 23:
# DEBUG
reg = self.msg_spec.reg
print("RECURSING TO WRITE FIELD %u TYPE %s" % (
f_nbr, reg.reg_id2name(field_type)))
# END
value.write(chan, f_nbr)
else:
# DEBUG
display_val = value
if field_type == FieldTypes.L_STRING and len(
display_val) > 16:
display_val = display_val[:16] + '...'
print("WRITING FIELD %u TYPE %u VALUE %s" % (
f_nbr, field_type, display_val))
# END
T_PUT_FUNCS[field_type](chan, value, f_nbr)
elif f_quant == Quants.PLUS or f_quant == Quants.STAR:
v_list = value
for varint_ in v_list:
# WORKING HERE
if field_type > 23:
# DEBUG
reg = self.msg_spec.reg
print("RECURSING TO WRITE FIELD %u TYPE %s" % (
f_nbr, reg.reg_id2name(field_type)))
# END
# this function recursing
varint_.write(chan, f_nbr)
else:
T_PUT_FUNCS[field_type](chan, varint_, f_nbr)
else:
raise RuntimeError(
"field '%s' has unknown quantifier '%s'" % (
f_name, f_quant)) # GEEP
# # DEBUG
# print "AFTER WRITING ENTIRE MESSAGE OFFSET IS %d" % chan.position
# # END
# -- INSTANCE GET -------------------------------------
@classmethod
def read(cls, chan, parent_spec):
"""msg refers to the msg, n is field number; returns msg, n"""
(p_type, nnn) = read_field_hdr(chan)
if nnn < 0 or nnn >= len(parent_spec.msgs):
raise RuntimeError("msg ID '%s' out of range" % nnn)
msg_spec = parent_spec.msgs[nnn]
msg_len = read_raw_varint(chan)
# DEBUG
print("IMPL_GETTER, P_TYPE %d, MSG/FIELD NBR %d, MSG_LEN %d" % (
p_type, nnn, msg_len))
# END
end = chan.position + msg_len
cls = _make_msg_class(parent_spec, msg_spec) # generated class
field_classes = [] # ???
values = [] # ???
# XXX THIS IS NOT GOING TO WORK, BECAUSE WE NEED TO PEEK XXX
# pylint: disable=no-member
for f_class in cls._field_classes:
#pylint: disable=protected-access
f_quant = f_class._quantifier
field_type = f_class._field_type # a number
#pylint: disable=protected-access
f_quant = f_class._quantifier
field_nbr = f_class._field_nbr
# read the field header
(p_type, nbr) = read_field_hdr(chan)
# DEBUG
print(
" GET_FROM_CHAN, FIELD %u, TYPE %u" %
(field_nbr, field_type))
# END
if field_nbr != nbr:
raise RuntimeError(" EXPECTED FIELD_NBR %d, GOT %d" % (
field_nbr, nbr))
# pylint: disable=no-member
if f_quant == Quants.REQUIRED or f_quant == Quants.OPTIONAL:
if field_type > 23:
reg = cls.msg_spec.reg
# BEGIN JUNK ------------------------------------
# DEBUG
print(
"READING: FIELD TYPE IS %s" %
reg.reg_id2name(field_type))
# END
entry = reg.reg_id2entry(field_type)
print("READING: FIELD TYPE bis IS %s" % entry.name)
# END JUNK --------------------------------------
# child_spec = entry.msg_spec
# child_class = _make_msg_class(msg_spec, child_spec)
# RECURSE: read(childCls, chan, msgSpec)
# msgSpec is parentSpec here
value = T_GET_FUNCS[field_type](chan) # XXX WRONG
else:
value = T_GET_FUNCS[field_type](chan)
_check_position(chan, end)
values.append(value)
elif f_quant == Quants.PLUS or f_quant == Quants.STAR:
# v_list = [] # we are reading a list of values
# WORKING HERE
pass
else:
raise RuntimeError("unknown quantifier, index '%u'" % f_quant)
# DEBUG
print("AFTER COLLECTING %u FIELDS, OFFSET IS %u" % (
len(field_classes), chan.position))
# END
# XXX BLOWS UP: can't handle Quants.PLUS or Quants.STAR (about line
# 407)
return (cls(values), nnn) # GEEP
# -- INSTANCE SERIALIZED LENGTH -----------------------
def _wire_len(self):
"""
Returns the length of the body of a serialized message, excluding
the header.
"""
msg_len = 0
nnn = 0 # DEBUG
for field in self._field_classes:
f_name = field.fname
f_nbr = field.field_nbr
f_quant = field.quantifier # NEXT HURDLE
field_type = field.field_type
value = field.value
# XXX What follows doesn't quite make sense. If a REQUIRED
# message is missing, we simply won't find it. Likewise
# for Quants.STAR
# pylint: disable=no-member
if f_quant == Quants.REQUIRED or f_quant == Quants.OPTIONAL:
contrib = T_LEN_FUNCS[field_type](value, f_nbr)
# DEBUG
if field_type > 23:
reg = self.msg_spec.reg # or protocol reg?
# XXX is the registry for the protocol? msgSpec?
print(" F_TYPE %u IS MSG %s" %
(field_type, reg.reg_id2name(field_type)))
print(" LEN: FIELD %u (%s), TYPE %u, CONTRIBUTION %d" %
(nnn, f_name, field_type, contrib))
nnn += 1
# END
msg_len += contrib
elif f_quant == Quants.PLUS or f_quant == Quants.STAR:
# value will be a non-empty list; handle each individual
# member like Quants.REQUIRED
v_list = value
for varint_ in v_list:
# HACKING ABOUT
if field_type > 23:
# pylint: disable=no-member
reg = self.msg_spec.reg
# DEBUG
print(" LEN: FIELD TYPE IS %s" %
reg.reg_id2name(field_type))
# entry = reg.regID2Entry(fType)
# print " LEN: FIELD TYPE bis IS %s" % entry.name
# END
contrib = varint_.wire_len(f_nbr)
else:
# END HACKING
# -----------------------------------------------
# XXX FAILS with list index error, fType == 24 XXX
# -----------------------------------------------
# DEBUG
print("FIELD '%s' Quants.PLUS MEMBER TYPE IS %s" % (
f_name, field_type))
# END
contrib = T_LEN_FUNCS[field_type](varint_, f_nbr)
# DEBUG
print(" LEN: FIELD %u (%s), TYPE %u, CONTRIB %d" % (
nnn, f_name, field_type, contrib))
# END
nnn += 1
msg_len += contrib
else:
raise RuntimeError(
"field '%s' has unknown quantifier '%s'" % (
f_name, f_quant))
return msg_len
def wire_len(self, nnn):
"""
Return the length of a serialized message including the field
header, where n is the field number of a nested message or the
regID if the message is not nested.
"""
len_ = length_as_varint(field_hdr_len(nnn, PrimTypes.LEN_PLUS))
count = self._wire_len()
return len_ + length_as_varint(count) + count
# META_MSG ======================================================
class MetaMsg(type):
def __new__(mcs, name, bases, namespace, **kwargs):
# DEBUG
print("MetaMsgNEW gets called once")
# END
return super().__new__(mcs, name, bases, namespace)
def __init__(cls, name, bases, namespace, **kwargs):
# definitely works:
# setattr(cls, 'baz', '__init__ added to dictionary before super call')
super().__init__(name, bases, namespace)
print("MetaMsgINIT gets called once")
return
#############################################################
# BEING IGNORED - belongs in a maker class
#############################################################
# cls._field_classes = []
# cls._field_classes_by_name = {}
# values = args[0]
# for idx, val in enumerate(values):
# this_field = cls._field_classes[idx](val)
# cls._field_classes.append(this_field)
# cls._field_classes_by_name[thisField.fname] = thisField
# setattr(cls, this_field.fname, val)
# # DEBUG
# print("META_MSG.__call__:")
# print(" idx = %u" % idx)
# print(" name = %s" % cls._field_classes[idx].fname)
# print(" value = %s" % cls._field_classes[idx].value)
# # END
# print " THERE ARE %u FIELDS SET" % len(cls._field_classes) # DEBUG
# return super().__init__(name, bases, namespace)
#########################################################################
# DISABLE FOR NOW; XXX SHOULD BE ADDED WHEN THE INSTANCE HAS BEEN CREATED
#########################################################################
# don't permit any new attributes to be added
# XXX why do we need to do this given __slots__ list?
def __setattr__(cls, attr, value):
""" make the class more or less immutable """
#############################
# WHERE THE ERROR COMES FROM:
#############################
if attr not in dir(cls):
raise AttributeError('cannot create attribute by assignment!')
return type.__setattr__(cls, attr, value)
# ===================================================================
# MAKERS
# ===================================================================
# EXPERIMENT: IMPLEMENTATION OF MESSAGE CLASS __init__
# DROPPING THIS FOR NOW
# UN-DROPPED 2017-03-03
def msg_initter(cls, *args, **attrs):
# We want to create instances of the respective fields and
# assign 'arg' to field 'idx'. This means that field instances
# need to have been created before we get here
# DEBUG
print('INITTER:')
if args:
for idx, arg in enumerate(args):
print(" arg %u is '%s'" % (idx, str(arg)))
if attrs:
# XXX REVIEW ME:
for key, val in iter(attrs.items):
print(" kwarg attr is '%s', value is '%s'" % (key, str(val)))
# END
# XXX if msg_initter is dropped from the dictionary, I get an error at
# line 249 in __call__,
# return type.__call__(cls, *args, **kwargs)
# TypeError: object.__new__() takes no parameters
#
# XXX A Strange Litle Device:
MSG_CLS_BY_Q_NAME = {} # PROTO_NAME . MSG_NAME => class
def make_msg_class(parent, name):
""" construct a MsgClass given a msg name known to the parent """
# DEBUG
print("\nMAKE_MSG_CLASS: parent '%s', name '%s'" % (parent, name))
# END
msg_spec = parent.get_msg_spec(name)
return _make_msg_class(parent, msg_spec)
def _make_msg_class(parent, msg_spec):
""" construct a MsgClass given a MsgSpec """
if parent is None:
raise FieldzError('parent must be specified')
proto_name = parent.name
if msg_spec is None:
raise FieldzError('msgSpec must be specified')
# XXX single-dot name and so NO NESTED MSG_CLASSes
qual_name = '%s.%s' % (proto_name, msg_spec.mname)
# DEBUG
print('_MAKE_MSG_CLASS for %s' % qual_name)
# END
if qual_name in MSG_CLS_BY_Q_NAME:
# XXX BUT CACHING HAS BEEN DISABLED
# DEBUG
print(" USING CACHED CLASS for %s\n" % qual_name)
# END
return MSG_CLS_BY_Q_NAME[qual_name]
# build list of field classes -----------------------------------
field_classes = []
field_classes_by_name = {}
field_classes_by_nbr = {} # by field nbr, not index
# XXX implicit assumption is that fields are ordered by ascending
# XXX field number
for field_spec in msg_spec:
# XXX NO ALLOWANCE FOR NESTED MSG_SPEC
cls = make_field_class(qual_name, field_spec)
field_classes.append(cls)
field_classes_by_name['%s.%s' % (qual_name, field_spec.fname)] = cls
field_classes_by_nbr[field_spec.field_nbr] = cls
# class is not in cache, so construct ---------------------------
# _enums = []
# _msgs = []
# DEBUG
print(" _MAKE_MSG_CLASS: _name is %s" % msg_spec.mname)
# END DEBUG
class Msg(MsgImpl, metaclass=MetaMsg,
# uncommented the next line 2017-02-03
__init__=msg_initter,
# 'name' already in use?
mname=msg_spec.mname,
enums=property(my_enums),
msgs=property(my_msgs),
field_classes=property(my_field_classes),
# EXPERIMENT 2012-12-15
parent_spec=parent,
msg_spec=msg_spec
# END EXPERIMENT
):
pass
# DEBUG =====================================
print("MSG_IMPL DICTIONARY:")
for key in Msg.__dict__:
print(" %-16s => %s" % (key, Msg.__dict__[key]))
# END =======================================
# DEBUG
print("\n_make_msg_class returning something of type %s\n" % type(Msg))
# END
# ----------------------------
# possibly some more fiddling ...
# ---------------------------
# XXX DISABLING CACHINE
# MSG_CLS_BY_Q_NAME[qual_name] = Msg
return Msg
```
#### File: fieldz/tests/test_little_big.py
```python
import time
import unittest
from io import StringIO
from rnglib import SimpleRNG
# XXX DOES NOT import write if named 'putter':
from fieldz.msg_impl import make_msg_class,\
make_field_class
from fieldz.parser import StringProtoSpecParser
import fieldz.msg_spec as M
# import wireops.field_types as F
# import wireops.typed as T
from wireops.chan import Channel
#################################################################
# THIS WAS HACKED FROM testProtoSpec.py; CAN HACK MORE FROM THERE
#################################################################
# PROTOCOLS ---------------------------------------------------------
from little_big_test import LITTLE_BIG_PROTO_SPEC
BUFSIZE = 16 * 1024
# TESTS -------------------------------------------------------------
class TestLittleBig(unittest.TestCase):
def setUp(self):
self.rng = SimpleRNG(time.time())
data = StringIO(LITTLE_BIG_PROTO_SPEC)
ppp = StringProtoSpecParser(data) # data should be file-like
self.str_obj_model = ppp.parse() # object model from string serialization
self.proto_name = self.str_obj_model.name # the dotted name of the protocol
def tearDown(self):
pass
# utility functions #############################################
def lil_big_msg_values(self):
values = []
# XXX these MUST be kept in sync with littleBigTest.py
values.append(self.rng.next_boolean()) # vBoolReqField
values.append(self.rng.next_int16()) # vEnumReqField
values.append(self.rng.next_int32()) # vuInt32ReqField
values.append(self.rng.next_int32()) # vuInt64ReqField
values.append(self.rng.next_int64()) # vsInt32ReqField
values.append(self.rng.next_int64()) # vsInt64ReqField
# #vuInt32ReqField
# #vuInt64ReqField
values.append(self.rng.next_int32()) # fsInt32ReqField
values.append(self.rng.next_int32()) # fuInt32ReqField
values.append(self.rng.next_real()) # fFloatReqField
values.append(self.rng.next_int64()) # fsInt64ReqField
values.append(self.rng.next_int64()) # fuInt64ReqField
values.append(self.rng.next_real()) # fDoubleReqField
values.append(self.rng.next_file_name(16)) # lStringReqField
rnd_len = 16 + self.rng.next_int16(49)
byte_buf = bytearray(rnd_len)
self.rng.next_bytes(byte_buf)
values.append(bytes(byte_buf)) # lBytesReqField
b128_buf = bytearray(16)
self.rng.next_bytes(b128_buf)
values.append(bytes(b128_buf)) # fBytes16ReqField
b160_buf = bytearray(20)
self.rng.next_bytes(b160_buf)
values.append(bytes(b160_buf)) # fBytes20ReqField
b256_buf = bytearray(32)
self.rng.next_bytes(b256_buf)
values.append(bytes(b256_buf)) # fBytes32ReqField
return values
# actual unit tests #############################################
def check_field_impl_against_spec(
self, proto_name, msg_name, field_spec, value):
self.assertIsNotNone(field_spec)
dotted_name = "%s.%s" % (proto_name, msg_name)
cls = make_field_class(dotted_name, field_spec)
if '__dict__' in dir(cls):
print('\nGENERATED FieldImpl CLASS DICTIONARY')
for exc in list(cls.__dict__.keys()):
print("%-20s %s" % (exc, cls.__dict__[exc]))
self.assertIsNotNone(cls)
datum = cls(value)
self.assertIsNotNone(datum)
# class attributes --------------------------------
# pylint:disable=no-member
self.assertEqual(field_spec.fname, datum.fname) # L 106
# pylint:disable=no-member
self.assertEqual(field_spec.field_type, datum.field_type)
# pylint:disable=no-member
self.assertEqual(field_spec.quantifier, datum.quantifier)
# pylint:disable=no-member
self.assertEqual(field_spec.field_nbr, datum.field_nbr)
# pylint:disable=no-member
self.assertIsNone(datum.default) # not an elegant test
# instance attribute ------------------------------
# pylint:disable=no-member
self.assertEqual(value, datum.value)
# with slots enabled, this is never seen ----------
# because __dict__ is not in the list of valid
# attributes for f
if '__dict__' in dir(datum):
print('\nGENERATED FieldImpl INSTANCE DICTIONARY')
for item in list(datum.__dict__.keys()):
print("%-20s %s" % (item, datum.__dict__[item])) # GEEP
def test_field_impl(self):
# DEBUG
print("TEST_FIELD_IMPL")
# END
msg_spec = self.str_obj_model.msgs[0]
# the fields in this imaginary logEntry
values = self.lil_big_msg_values()
for i in range(len(msg_spec)):
print(
"\nDEBUG: field %u ------------------------------------------------------" %
i)
field_spec = msg_spec[i]
self.check_field_impl_against_spec(
self.proto_name, msg_spec.name, field_spec, values[i])
def test_caching(self):
# DEBUG
print("TEST_CACHING")
# END
self.assertTrue(isinstance(self.str_obj_model, M.ProtoSpec))
# XXX A HACK WHILE WE CHANGE INTERFACE ------------
msg_spec = self.str_obj_model.msgs[0]
mname = msg_spec.mname
cls0 = make_msg_class(self.str_obj_model, mname)
# DEBUG
print("EXPECTING CLASS, FOUND: %s" % type(cls0)) # <<<< KUKEMAL !
# END
inst0 = cls0(mname)
# DEBUG
# pylint:disable=no-member
print("Constructed inst0 mname is '%s'" % inst0.mname)
# END
# pylint:disable=no-member
self.assertEqual(mname, inst0.mname)
# THIS IS A CLASS, NOT AN INSTANCE
cls1 = make_msg_class(self.str_obj_model, mname)
inst1 = cls1(mname)
# pylint:disable=no-member
self.assertEqual(mname, inst1.mname)
# END HACK ----------------------------------------
# we cache classe, so the two should be the same
#############################################################
# self.assertEqual(id(cls0), id(cls1)) # FAILS FAILS FAILS
#############################################################
# chan = Channel(BUFSIZE)
values = self.lil_big_msg_values()
lil_big_msg0 = cls0(values)
lil_big_msg1 = cls0(values)
# we don't cache instances, so these will differ
self.assertNotEqual(id(lil_big_msg0), id(lil_big_msg1))
field_spec = msg_spec[0]
dotted_name = "%s.%s" % (self.proto_name, msg_spec.mname)
f0cls = make_field_class(dotted_name, field_spec)
f1cls = make_field_class(dotted_name, field_spec)
self.assertEqual(id(f0cls), id(f1cls))
def test_little_big(self):
self.assertIsNotNone(self.str_obj_model)
self.assertTrue(isinstance(self.str_obj_model, M.ProtoSpec))
self.assertEqual('org.xlattice.fieldz.test.littleBigProto',
self.str_obj_model.name)
self.assertEqual(0, len(self.str_obj_model.enums))
self.assertEqual(1, len(self.str_obj_model.msgs))
self.assertEqual(0, len(self.str_obj_model.seqs))
msg_spec = self.str_obj_model.msgs[0]
# Create a channel ------------------------------------------
# its buffer will be used for both serializing the instance
# data and, by deserializing it, for creating a second instance.
chan = Channel(BUFSIZE)
buf = chan.buffer
self.assertEqual(BUFSIZE, len(buf))
# create the LittleBigMsg class ------------------------------
little_big_msg_cls = make_msg_class(self.str_obj_model, msg_spec.mname)
# -------------------------------------------------------------
# XXX the following fails because field 2 is seen as a property
# instead of a list
if False: # DEBUGGING
print('\nLittleBigMsg CLASS DICTIONARY')
for (ndx, key) in enumerate(little_big_msg_cls.__dict__.keys()):
print(
"%3u: %-20s %s" %
(ndx, key, little_big_msg_cls.__dict__[key]))
# -------------------------------------------------------------
# create a message instance ---------------------------------
values = self.lil_big_msg_values() # quasi-random values
lil_big_msg = little_big_msg_cls(values[0]) # [0] IS EXPERIMENT
# __setattr__ in MetaMsg raises exception on any attempt
# to add new attributes. This works at the class level but
# NOT at the instance level
#
# XXX HACK
print("*** SKIPPING ASSIGNENT-TO-CONSTANT TEST ***")
# END
if False:
try:
lil_big_msg.foo = 42
self.fail(
"ERROR: attempt to assign new instance attribute succeeded")
except AttributeError as a_exc:
# DEBUG
print(
"ATTR ERROR ATTEMPTING TO SET lilBigMsg.foo: " +
str(a_exc))
# END
pass
if '__dict__' in dir(lil_big_msg):
print('\nlilBigMsg INSTANCE DICTIONARY')
for exc in list(lil_big_msg.__dict__.keys()):
print("%-20s %s" % (exc, lil_big_msg.__dict__[exc]))
# lilBigMsg.name is a property
# XXX HACK
print("*** SKIPPING ASSIGNENT-TO-PROPERTY TEST ***")
# END
if False:
try:
lil_big_msg.mname = 'boo'
self.fail("ERROR: attempt to change message name succeeded")
except AttributeError:
pass
# DEBUG
print("TYPE msg_spec.mname: %s" % type(msg_spec.mname))
print("TYPE lil_big_msg.mname: %s" % type(lil_big_msg.mname))
# END
self.assertEqual(msg_spec.mname, lil_big_msg.mname)
# we don't have any nested enums or messages
# pylint:disable=no-member
self.assertEqual(0, len(lil_big_msg.enums))
# pylint:disable=no-member
self.assertEqual(0, len(lil_big_msg.msgs))
# pylint:disable=no-member
self.assertEqual(17, len(lil_big_msg.field_classes))
# number of fields in instance
self.assertEqual(17, len(lil_big_msg))
for i in range(len(lil_big_msg)):
self.assertEqual(values[i], lil_big_msg[i].value)
# serialize the object to the channel -----------------------
print("\nDEBUG: PHASE A ######################################")
nnn = lil_big_msg.write_stand_alone(chan)
old_position = chan.position
chan.flip()
self.assertEqual(old_position, chan.limit)
self.assertEqual(0, chan.position)
# deserialize the channel, making a clone of the message ----
(read_back, nn2) = little_big_msg_cls.read(
chan, self.str_obj_model) # sOM is protoSpec
self.assertIsNotNone(read_back)
self.assertEqual(nnn, nn2)
# verify that the messages are identical --------------------
self.assertTrue(lil_big_msg.__eq__(read_back))
print("\nDEBUG: PHASE B ######################################")
# produce another message from the same values --------------
lil_big_msg2 = little_big_msg_cls(values)
chan2 = Channel(BUFSIZE)
nnn = lil_big_msg2.write_stand_alone(chan2)
chan2.flip()
(copy2, nn3) = little_big_msg_cls.read(chan2, self.str_obj_model)
self.assertIsNotNone(copy2)
self.assertEqual(nnn, nn3)
self.assertTrue(lil_big_msg.__eq__(copy2))
self.assertTrue(lil_big_msg2.__eq__(copy2))
# test clear()
chan2.position = 97
chan2.limit = 107
chan2.clear()
self.assertEqual(0, chan2.limit)
self.assertEqual(0, chan2.position)
if __name__ == '__main__':
unittest.main()
```
#### File: fieldz/tests/test_log_entry.py
```python
import time
import unittest
from rnglib import SimpleRNG
from wireops.enum import FieldTypes
# import wireops.typed as T
from fieldz.enum import Quants
import fieldz.msg_spec as M
import fieldz.reg as R
from fieldz.tfbuffer import TFReader, TFWriter
BUFSIZE = 16 * 1024
RNG = SimpleRNG(time.time())
# -- logEntry msgSpec ---------------------------
PROTOCOL = 'org.xlattice.upax'
NODE_REG = R.NodeReg()
PROTO_REG = R.ProtoReg(PROTOCOL, NODE_REG)
PARENT = M.ProtoSpec(PROTOCOL, PROTO_REG)
MSG_REG = R.MsgReg(PROTO_REG)
NAME = 'logEntry'
ENUM = M.EnumSpec.create('foo', [('not', 0), ('being', 1), ('used', 2), ])
FIELDS = [
# pylint: disable=no-member
M.FieldSpec(MSG_REG, 'timestamp', FieldTypes.F_UINT32, Quants.REQUIRED, 0),
M.FieldSpec(MSG_REG, 'node_id', FieldTypes.F_BYTES20, Quants.REQUIRED, 1),
M.FieldSpec(MSG_REG, 'key', FieldTypes.F_BYTES20, Quants.REQUIRED, 2),
M.FieldSpec(MSG_REG, 'length', FieldTypes.V_UINT32, Quants.REQUIRED, 3),
M.FieldSpec(MSG_REG, 'by_', FieldTypes.L_STRING, Quants.REQUIRED, 4),
M.FieldSpec(MSG_REG, 'path', FieldTypes.L_STRING, Quants.REQUIRED, 5),
]
LE_MSG_SPEC = M.MsgSpec(NAME, PROTO_REG, PARENT)
for file in FIELDS:
LE_MSG_SPEC.add_field(file)
UPAX_PROTO_SPEC = M.ProtoSpec(PROTOCOL, PROTO_REG)
UPAX_PROTO_SPEC.add_enum(ENUM)
UPAX_PROTO_SPEC.add_msg(LE_MSG_SPEC)
# -- end logEntry msgSpec -----------------------
class TestLogEntry(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# utility functions #############################################
def dump_wire_buffer(self, wb_):
for i in range(16):
print("0x%02x " % wb_.buf[i], end=' ')
print()
# actual unit tests #############################################
def test_proto_spec(self):
self.assertIsNotNone(NODE_REG)
self.assertIsNotNone(PROTO_REG)
def test_constructors(self):
p_spec = UPAX_PROTO_SPEC
self.assertEqual(PROTOCOL, p_spec.name)
self.assertEqual(ENUM, p_spec.enums[0])
self.assertEqual(LE_MSG_SPEC, p_spec.msgs[0])
self.assertEqual(0, len(p_spec.seqs))
def test_writing_and_reading(self):
writer = TFWriter.create(LE_MSG_SPEC, BUFSIZE)
writer.clear() # should not be necessary
buf = writer.buffer
# reader and writer share same buffer
reader = TFReader(LE_MSG_SPEC, BUFSIZE, buf)
tstamp = int(time.time())
node_id = bytearray(20) # 160 bit
RNG.next_bytes(node_id) # .... random value
key = bytearray(20) # 160 bit
RNG.next_bytes(key) # .... random value
length = RNG.next_int32()
by_ = RNG.next_file_name(16)
path = 'path/to/' + RNG.next_file_name(16)
nnn = 0 # 0-based field number
# write a log entry into the buffer
writer.put_next(nnn, tstamp)
nnn = nnn + 1
writer.put_next(nnn, node_id)
nnn = nnn + 1
writer.put_next(nnn, key)
nnn = nnn + 1
writer.put_next(nnn, length)
nnn = nnn + 1
writer.put_next(nnn, by_)
nnn = nnn + 1
writer.put_next(nnn, path)
# now read the buffer to see what actually was written
self.assertEqual(0, reader.position)
reader.get_next()
self.assertEqual(0, reader.field_nbr)
self.assertEqual('fuint32', reader.field_type.sym)
self.assertEqual(tstamp, reader.value)
self.assertEqual(5, reader.position)
reader.get_next()
self.assertEqual(1, reader.field_nbr)
self.assertEqual('fbytes20', reader.field_type.sym)
self.assertEqual(node_id, reader.value)
self.assertEqual(26, reader.position)
reader.get_next()
self.assertEqual(2, reader.field_nbr)
self.assertEqual('fbytes20', reader.field_type.sym)
self.assertEqual(key, reader.value)
self.assertEqual(47, reader.position)
reader.get_next()
self.assertEqual(3, reader.field_nbr)
self.assertEqual('vuint32', reader.field_type.sym)
self.assertEqual(length, reader.value)
reader.get_next()
self.assertEqual(4, reader.field_nbr)
self.assertEqual('lstring', reader.field_type.sym)
self.assertEqual(by_, reader.value)
reader.get_next()
self.assertEqual(5, reader.field_nbr)
self.assertEqual('lstring', reader.field_type.sym)
self.assertEqual(path, reader.value)
if __name__ == '__main__':
unittest.main()
```
#### File: fieldz/tests/test_roundtrip_big_test.py
```python
import time
import unittest
from io import StringIO
from rnglib import SimpleRNG
# from fieldz import reg
from fieldz.parser import StringProtoSpecParser
from big_test import BIG_TEST
class TestBigTest(unittest.TestCase):
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def round_trip_poto_spec_via_string(self, match):
"""
Convert a ProtoSpec object model to canonical string form,
parse that to make a clone, and verify that the two are
equal.
"""
canonical_spec = match.__repr__()
ppp = StringProtoSpecParser(StringIO(canonical_spec))
cloned_spec = ppp.parse()
# crude tests of __eq__ AKA ==
self.assertFalse(match is None)
self.assertTrue(match == match)
# one way of saying it ------------------
self.assertTrue(match.__eq__(cloned_spec))
self.assertTrue(cloned_spec.__eq__(match))
# this is the same test -----------------
self.assertTrue(match == cloned_spec)
self.assertTrue(cloned_spec == match)
def test_compiler(self):
# node_reg = reg.NodeReg() # NEVER USED
# protocol = 'org.xlattice.fieldz.test.bigProto' # NEVER USED
# proto_reg = reg.ProtoReg(protocol, node_reg) # never used
data = StringIO(BIG_TEST)
self.assertIsNotNone(data)
ppp = StringProtoSpecParser(data)
# DEBUG
print("INVOKING PARSER")
# END
big_proto_spec = ppp.parse()
# DEBUG
print("BACK FROM PARSER")
# END
# confirm that field numbers are unique and increasing
match = big_proto_spec.msgs[0]
# DEBUG
print("COUNT OF MATCHING FIELDS: %d" % len(match))
# END
last_field_nbr = -1
for field in match:
self.assertTrue(field.field_nbr > last_field_nbr)
last_field_nbr = field.field_nbr
# DEBUG
print("ROUND-TRIPPING BIG PROTO SPEC")
# END
self.round_trip_poto_spec_via_string(big_proto_spec)
# ---------------------------------------------------------------
def round_trip_proto_instance_to_wire_format(self, match):
# invoke WireMsgSpecWriter
# XXX STUB
# invoke WireMsgSpecParser
# XXX STUB
pass
# XXX EFFECTIVELY COMMENTED OUT XXX
def x_test_round_trip_big_test_instances_to_wire_format(self):
# str_spec = StringIO(BIG_TEST)
str_spec = StringIO(BIG_TEST)
ppp = StringProtoSpecParser(str_spec)
big_msg_spec = ppp.parse()
self.round_trip_proto_instance_to_wire_format(big_msg_spec)
if __name__ == '__main__':
unittest.main()
```
#### File: fieldz/tests/test_roundtrip_core_types.py
```python
import unittest
from io import StringIO
from wireops.chan import Channel
from wireops.enum import FieldTypes, PrimTypes
from wireops.raw import(
field_hdr_len,
read_field_hdr,
)
import fieldz.msg_spec as M
from fieldz.enum import CoreTypes, Quants
from fieldz.reg import NodeReg, ProtoReg, MsgReg
from fieldz.parser import StringMsgSpecParser
LOG_ENTRY_MSG_SPEC = """
# protocol org.xlattice.zoggery
message logEntry:
timestamp fuint32
nodeID fbytes20
key fbytes20
length vuint32
by lstring
path lstring
"""
class TestCoreTypes(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_new_coretypes_enum(self):
"""
Test CoreTypes as redefined 2017-01-30.
CoreTypes is now an IntEnum with sym() and from_sym() methods.
"""
for ndx, _ in enumerate(CoreTypes):
self.assertEqual(_.value, ndx)
self.assertEqual(CoreTypes.from_sym(_.sym), _)
# pylint: disable=no-member
self.assertEqual(len(CoreTypes), CoreTypes.PROTO_SPEC + 1)
# utility functions #############################################
def make_registries(self, protocol):
node_reg = NodeReg()
proto_reg = ProtoReg(protocol, node_reg)
msg_reg = MsgReg(proto_reg)
return (node_reg, proto_reg, msg_reg)
# actual unit tests #############################################
def test_the_enum(self):
# pylint: disable=no-member
self.assertEqual(CoreTypes.ENUM_PAIR_SPEC.sym, 'EnumPairSpec')
self.assertEqual(CoreTypes.ENUM_SPEC.sym, 'EnumSpec')
self.assertEqual(CoreTypes.FIELD_SPEC.sym, 'FieldSpec')
self.assertEqual(CoreTypes.MSG_SPEC.sym, 'MsgSpec')
self.assertEqual(CoreTypes.SEQ_SPEC.sym, 'SeqSpec')
self.assertEqual(CoreTypes.PROTO_SPEC.sym, 'ProtoSpec')
self.assertEqual(len(CoreTypes), 6)
def round_trip_to_wire_format(self, chan, field_nbr, c_type, val):
node_reg, proto_reg, msg_reg = self.make_registries(
'org.xlattice.fieldz.test.roundTrip')
# DEBUG
print("roundTripWireFormat: field_nbr = %d, c_type = %d (%s)" % (
field_nbr, c_type.value, c_type.sym))
print(" val is a ", type(val))
print(" symbol '%s' value '%s'" % (val.symbol, val.value))
# END ********************** <--- !!!
chan.clear() # I guess :-)
# buf = chan.buffer # NEVER USED
putter = M.C_PUT_FUNCS[c_type.value]
getter = M.C_GET_FUNCS[c_type.value]
# len_func = M.C_LEN_FUNCS[c_type.value] # NEVER USED
# p_len_func = M.C_P_LEN_FUNCS[c_type.value] # NEVER USED
# comment of unknown value/validity: # BUT c_type.value must be >18!
# XXX WRONG: need the msg spec instance, not the class
# BLOWS UP XXX ##############################################
len_ = field_hdr_len(
field_nbr, M.MsgSpec.field_type_from_nbr(field_nbr)) # FAILS HERE
# END BLOWS UP XXX ##########################################
# r_pos = 0 # read # NEVER USED
# expected_pos = p_len_func(val, field_nbr) # NEVER USED
putter(chan, val, 0) # writing field 0
chan.flip()
# w_pos = chan.limit # NEVER USED
(p_type, field_nbr) = read_field_hdr(chan)
actual_hdr_len = chan.position
self.assertEqual(PrimTypes.LEN_PLUS, p_type)
self.assertEqual(0, field_nbr) # field number
self.assertEqual(len_, actual_hdr_len)
# FAILS:
# if chan is present
# enumPairSpecGetter() takes 1 positional argument but 2 were given
# ret_val = getter(msgReg, chan)
# else # chan is absent
# field_spec_getter() missing 1 req positional argument: 'chan'
# 2016-10-30 GOT FIRST FAILURE MODE
ret_val = getter(msg_reg, chan)
# gets the same error:ret_val = M.cGetFuncs[c_type.value](chan)
# r_pos = chan.position # NEVER USED
# DEBUG
print(" ROUND TRIP: val = %s" % val)
print(" ret_val = %s" % ret_val)
# END
self.assertEqual(val, ret_val)
def test_round_tripping_core_types(self):
buf_size = 16 * 1024
chan = Channel(buf_size)
# -----------------------------------------------------------
field_nbr = 0 # 0-based field number
ser = M.EnumPairSpec('funnyFarm', 497)
self.round_trip_to_wire_format(
# pylint: disable=no-member
chan, field_nbr, CoreTypes.ENUM_PAIR_SPEC, ser) # FAILS ???
# -----------------------------------------------------------
protocol = 'org.xlattice.upax'
node_reg, proto_reg, msg_reg = self.make_registries(protocol)
field_nbr = 0 # 0-based field number
pairs = [('funnyFarm', 497),
('myOpia', 53),
('frogHeaven', 919), ]
ser = M.EnumSpec.create('thisEnum', pairs)
self.assertEqual(3, len(ser))
# self.round_trip_to_wire_format(
# pylint: disable=no-member
# chan, field_nbr, CoreTypes.ENUM_SPEC, ser)
# -----------------------------------------------------------
protocol = 'org.xlattice.upax'
node_reg, proto_reg, msg_reg = self.make_registries(protocol)
field_nbr = 0 # 0-based field number
ser = M.FieldSpec(
msg_reg,
'jollyGood',
# pylint: disable=no-member
FieldTypes.V_SINT32,
Quants.OPTIONAL,
37)
self.round_trip_to_wire_format(
# pylint: disable=no-member
chan, field_nbr, CoreTypes.FIELD_SPEC, ser)
# -----------------------------------------------------------
# MsgSpec without enum
protocol = 'org.xlattice.upax'
node_reg, proto_reg, msg_reg = self.make_registries(protocol)
data = StringIO(LOG_ENTRY_MSG_SPEC)
ppp = StringMsgSpecParser(data)
str_obj_model = ppp.parse() # object model from string serialization
self.assertIsNotNone(str_obj_model)
self.assertTrue(isinstance(str_obj_model, M.MsgSpec))
field_nbr = 0
# XXX FAILS:
self.round_trip_to_wire_format(
# pylint: disable=no-member
chan, field_nbr, CoreTypes.MSG_SPEC, str_obj_model)
if __name__ == '__main__':
unittest.main()
```
#### File: fieldz/tests/test_solo.py
```python
import time
import unittest
from rnglib import SimpleRNG
from fieldz.reg import NodeReg
SOLO = """
protocol org.xlattice.fieldz.test
message SoloMsg
val vuint32 # a single unsigned value, a required field
"""
class TestSolo(unittest.TestCase):
"""
Send the definition of the msg_spec Solo down a channel followed
by a single instance of the msg_spec.
"""
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions #############################################
def dump_buffer(self, buf):
for i in range(16):
print("0x%02x " % buf[i], end=' ')
print()
# actual unit tests #############################################
def test_solo(self):
# Create a registry, write_reg. (Don't need persistence at this
# stage.)
write_reg = NodeReg() # 2012-11-11 was Registry()
# Create writer, a TFWriter, and so wb, the buffer we are going to
# write to.
# Deserialize SOLO as soloSpec, confirming that it is an instance
# of MsgSpec.
# Register the soloSpec object with write_reg.
# Verify that this has automatically created a ProtoSpec instance
# for org.xlattice.fieldz.test
# Write soloSpec, the SoloMsg msg_spec, to the buffer (this is a
# class definition).
# Create an instance of SoloMsg, set its val field to 97,
# and write that instance to the data buffer
# Create a TFReader using wb, the same buffer as writer. The buffer's
# limit will be used to see how much we read from the buffer.
# wb.position is reset to zero.
# Create a separate read registry, read_reg.
read_reg = NodeReg()
# Read the first message from the TFReader, deserializing it
# as a ProtoSpec (questionable)
# Prove that this protoSpec is identical to the writer's protoSpec
# Read the next message from the TFReader and deserialize it.
# Verify that this was a msg_spec for SoloMsg
# Add the msg_spec to the reader's registry.
# Read the next message from the TFReader and deserialize it.
# Verify that this is an instance of SoloMsg and that its val
# field evaluates to 97.
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/magicsack",
"score": 3
}
|
#### File: src/magicsack/__init__.py
```python
import binascii
import os
# NEEDED NEXT:
from Crypto.Cipher import AES
from xlattice import HashTypes
from nlhtree import NLHLeaf
from buildlist import BuildList
from xlcrypto import AES_BLOCK_BYTES
from xlcrypto.padding import add_pkcs7_padding, strip_pkcs7_padding
from xlcrypto.keyderiv import pbkdf2
__all__ = ['__version__', '__version_date__',
'check_puzzle',
'devise_puzzle',
'generate_key',
'make_named_value_leaf', 'name_from_title',
'write_build_list', ]
__version__ = '0.4.13'
__version_date__ = '2018-03-07'
class Config(object):
""" Configuration information. """
def __init__(self, salt, u_dir):
self._salt = salt
self._u_dir = u_dir
@property
def salt(self):
""" Return the salt (some random bytes to confuse hackers). """
return self._salt
@property
def u_dir(self):
""" Path to the store. """
return self._u_dir
class MagicSackError(RuntimeError):
""" Wrapper for errors associated with this package. """
pass
def name_from_title(title):
""" convert a title into an acceptable directory name """
txt = title.strip() # strip off lealding & trailing blanks
chars = list(txt) # atomize the title
for ndx, char in enumerate(chars):
if char == ' ':
chars[ndx] = '_'
elif char == '(':
chars[ndx] = '%28'
elif char == ')':
chars[ndx] = '%29'
elif char == '/':
chars[ndx] = '%2F'
elif char == '\\':
chars[ndx] = '%5C'
return ''.join(chars)
def generate_key(pass_phrase, salt, iterations=10000):
"""
pass_phrase is a string which may not be empty. salt is a
byte array, conventionally either 8 or 16 bytes. The
key returned is a 256-bit value.
"""
# THESE CHECKS SHOULD BE IN THE LIBRARY #####
if not pass_phrase or pass_phrase == '':
raise RuntimeError("empty pass_phrase")
if not salt:
raise RuntimeError("you must supply a salt")
# it is also possible to set the hash function used; it defaults
# to HMAC-SHA1
# END LIBRARY CHECKS ########################
# return PBKDF2(pass_phrase, salt, iterations=iterations).read(32)
return pbkdf2(pass_phrase, salt, hashtype=HashTypes.SHA2,
iterations=iterations)
def devise_puzzle(pass_phrase, salt, rng, iterations=1000):
"""
Create the puzzle that the user has to solve (provide a key for)
in order to access the Magic Sack.
"""
key = generate_key(pass_phrase, salt, iterations)
junk = rng.some_bytes(2 * AES_BLOCK_BYTES)
iv_ = bytes(junk[:AES_BLOCK_BYTES])
junk0 = junk[AES_BLOCK_BYTES: AES_BLOCK_BYTES + 8]
junk2 = junk[AES_BLOCK_BYTES + 8:]
data = junk0 + salt + junk2
padded = bytes(add_pkcs7_padding(data, AES_BLOCK_BYTES))
# DEBUG
# print("devise_puzzle:")
# print(" key %s" % binascii.b2a_hex(key))
# print(" iv_ %s" % binascii.b2a_hex(iv_))
# print(" salt %s" % binascii.b2a_hex(salt))
# print(" padded %s" % binascii.b2a_hex(padded))
# END
cipher = AES.new(key, AES.MODE_CBC, iv_)
puzzle = bytes(iv_ + cipher.encrypt(padded))
return puzzle
def check_puzzle(puzzle, pass_phrase, salt, iterations=10000):
"""
Determine the key then decipher the puzzle, verifying that
the copy of the salt embedded in the puzzle is the same as
the salt from the config file. Return whether verification
succeeded.
"""
key = generate_key(pass_phrase, salt, iterations)
iv_ = puzzle[:AES_BLOCK_BYTES]
cipher = AES.new(key, AES.MODE_CBC, iv_)
decrypted = cipher.decrypt(puzzle[AES_BLOCK_BYTES:])
# DEBUG
# print("check_puzzle:")
# print(" key %s" % binascii.b2a_hex(key))
# print(" iv_ %s" % binascii.b2a_hex(iv_))
# print(" salt %s" % binascii.b2a_hex(salt))
# print(" decrypted %s" % binascii.b2a_hex(decrypted))
# END
data = strip_pkcs7_padding(decrypted, AES_BLOCK_BYTES)
soln = bytes(data[8:8 + AES_BLOCK_BYTES])
return soln == salt, key
# ACTIONS -----------------------------------------------------------
def insert_named_value(global_ns, name, data):
"""
Pad and encrypt the data, writing the encrypted value into u_dir.
If successful, return an NLHLeaf.
"""
u_dir = global_ns.u_dir
u_path = global_ns.u_path
hashtype = global_ns.hashtype
padded = add_pkcs7_padding(data, AES_BLOCK_BYTES)
iv_ = bytes(global_ns.rng.some_bytes(AES_BLOCK_BYTES))
cipher = AES.new(global_ns.key, AES.MODE_CBC, iv_)
encrypted = cipher.encrypt(padded)
# hash and encrypt the data ---------------------------
sha = XLSHA2()
sha.update(encrypted)
bin_hash = sha.digest()
hex_hash = binascii.b2a_hex(bin_hash).decode('utf-8')
# DEBUG
print("len(encrypted) = %d" % len(encrypted))
print("len(hex_hash) = %d" % len(hex_hash))
print("u_path = %s" % u_path)
# END
# add the encrypted data to u_dir -----------------------
length, hash2 = u_dir.put_data(encrypted, hex_hash)
if hex_hash != hash2:
raise MagicSackError(
"INTERNAL ERROR: content key was '%s' but u returned '%s'" % (
hex_hash, hash2))
if len(encrypted) != length:
raise MagicSackError("length encrypted %d but %d bytes written" % (
len(encrypted), length))
return NLHLeaf(name, bin_hash, hashtype)
def make_named_value_leaf(global_ns, name, data):
""" Given its name and data, insert (name, hash) into the NLHTree. """
return insert_named_value(global_ns, name, data)
def add_a_file(global_ns, path_to_file, list_path=None):
"""
Add the contents of a single file to the nlhTree and the content-keyed
store. The file is located at 'path_to_file'. Its name in the NLHTree
will be 'list_path'. If list_path is not set, it defaults to path_to_file.
Return a possibly empty status string.
"""
key = global_ns.key
rng = global_ns.rng
tree = global_ns.tree
u_dir = global_ns.u_dir
hashtype = global_ns.hashtype
status = ''
# XXX AES KEY IS NOT KNOWN XXX
if not os.path.exists(path_to_file):
status = 'file not found: %s' % path_to_file
if not status:
# -----------------------------------------------------------
# NOTE CRITICALLY THIS ASSUMES that the file can be read into memory
# as a single operation; chunking is not required.
# -----------------------------------------------------------
# read, pad, and encrypt the file -----------------
with open(path_to_file, 'rb') as file:
data = file.read()
padded = add_pkcs7_padding(data, AES_BLOCK_BYTES)
iv_ = rng.some_bytes(AES_BLOCK_BYTES)
cipher = AES.new(key, AES.MODE_CBC, iv_)
encrypted = cipher.encrypt(padded)
# hash the file and add it to u_dir ----------------
sha = XLSHA2()
sha.update(encrypted)
hex_hash = sha.hexdigest()
length, hash_back = u_dir.put_data(encrypted, hex_hash)
if hash_back != key:
status =\
"INTERNAL ERROR: content key was '%s' but u returned '%s'" % (
hex_hash, hash_back)
if not status and len(encrypted) != length:
status = "length encrypted %d but %d bytes written" % (
len(encrypted), length)
if not status:
# add the file to the NLHTree ---------------------
if not list_path:
list_path = path_to_file
leaf = NLHLeaf(list_path, hex_hash, hashtype)
tree.insert(leaf, hashtype)
return status
# BUILD LIST --------------------------------------------------------
def write_build_list(global_ns):
""" Serialize the BuildList and write it to disk. """
key = global_ns.key
magic_path = global_ns.magic_path
rng = global_ns.rng
title = global_ns.title
tree = global_ns.tree
build_list = BuildList(title, global_ns.sk_, tree)
# sign build list, encrypt, write to disk -------------
build_list.sign(global_ns.sk_priv_)
text = build_list.__str__()
# DEBUG
print("BUILD LIST:\n%s" % text)
# END
padded = add_pkcs7_padding(text.encode('utf-8'), AES_BLOCK_BYTES)
iv_ = bytes(rng.some_bytes(AES_BLOCK_BYTES))
cipher = AES.new(key, AES.MODE_CBC, iv_)
encrypted = cipher.encrypt(padded)
path_to_build_list = os.path.join(magic_path, 'bVal')
with open(path_to_build_list, 'wb') as file:
file.write(encrypted)
def read_build_list(global_ns):
""" Read a serialized BuildList from the disk. """
key = global_ns.key
magic_path = global_ns.magic_path
# rng = global_ns.rng
# u_path = global_ns.u_path
hashtype = global_ns.hashtype
path_to_build_list = os.path.join(magic_path, 'bVal')
with open(path_to_build_list, 'rb') as file:
data = file.read()
iv_ = data[:AES_BLOCK_BYTES]
ciphertext = data[AES_BLOCK_BYTES]
cipher = AES.new(key, AES.MODE_CBC, iv_)
plaintext = cipher.decrypt(ciphertext)
text = strip_pkcs7_padding(plaintext, AES_BLOCK_BYTES).decode('utf-8')
build_list = BuildList.parse(text, hashtype)
if not build_list.verify():
raise MagicSackError("could not verify digital signature on BuildList")
global_ns.timestamp = build_list.timestamp
global_ns.title = build_list.title
global_ns.tree = build_list.tree
# retrieve __ckPriv__ and __sk_priv__ hashes from the BuildList, and
# use these to extract their binary values from u_dir
# Retrieve any top-level leaf nodes whose names begin with double
# underscores ('__'). Regard these as reserved names. For any
# such keys, add the key/value combination to global_ns, where the
# value is a hex_hash.
# NOTE STUB NOTE
```
|
{
"source": "jddixon/merkletree",
"score": 3
}
|
#### File: src/merkletree/__init__.py
```python
import binascii
import os
import re
import sys
from stat import S_ISDIR
from xlattice import(SHA1_BIN_LEN, SHA1_BIN_NONE, SHA1_HEX_NONE,
SHA2_BIN_LEN, SHA2_BIN_NONE, SHA2_HEX_NONE,
SHA3_BIN_LEN, SHA3_BIN_NONE, SHA3_HEX_NONE,
BLAKE2B_256_BIN_LEN, BLAKE2B_256_BIN_NONE,
BLAKE2B_256_HEX_NONE,
HashTypes, check_hashtype)
from xlutil import make_ex_re, make_match_re
from xlcrypto import SP # for getSpaces()
from xlcrypto.hash import XLSHA1, XLSHA2, XLSHA3, XLBLAKE2B_256
from xlu import(file_sha1bin, file_sha2bin, file_sha3bin, file_blake2b_256_bin)
__all__ = ['__version__', '__version_date__',
# BELONGS IN xlattice_py:
'get_hash_func',
# classes
'MerkleDoc', 'MerkleLeaf', 'MerkleTree', 'MerkleParseError', ]
__version__ = '5.4.0'
__version_date__ = '2018-07-26'
# -------------------------------------------------------------------
def get_hash_func(hashtype):
"""
Given a HashType, return the appropriate library SHA hash function or
None if there is no matching hash func.
XXX THIS METHOD BELONGS IN xlcrypto_py
"""
sha = None
if hashtype == HashTypes.SHA1:
sha = XLSHA1()
elif hashtype == HashTypes.SHA2:
sha = XLSHA2()
elif hashtype == HashTypes.SHA3:
sha = XLSHA3()
elif hashtype == HashTypes.BLAKE2B_256:
sha = XLBLAKE2B_256()
else:
raise NotImplementedError
return sha
class MerkleParseError(RuntimeError):
""" Class for MerkleTree/Doc parse errors. """
pass
class MerkleNode(object):
"""
Abstract class to which all Nodes in a MerkleDoc or MerkleTree
belong.
"""
# __slots__ = [ A PERFORMANCE ENHANCER ]
def __init__(self, name, is_leaf=False, hashtype=HashTypes.SHA2):
check_hashtype(hashtype)
self._bin_hash = None
if name is None:
raise RuntimeError("MerkleNode: null MerkleNode name")
self._name = name.strip()
if not self._name:
raise RuntimeError("MerkleNode: null or empty name")
self._is_leaf = is_leaf
self._hashtype = hashtype
@property
def hex_hash(self):
"""
Return the hash associated with the MerkleNode as a hex value.
"""
if self._bin_hash is None:
if self._hashtype == HashTypes.SHA1:
return SHA1_HEX_NONE
elif self._hashtype == HashTypes.SHA2:
return SHA2_HEX_NONE
elif self._hashtype == HashTypes.SHA3:
return SHA3_HEX_NONE
elif self._hashtype == HashTypes.BLAKE2B_256:
return BLAKE2B_256_HEX_NONE
else:
raise NotImplementedError
else:
return str(binascii.b2a_hex(self._bin_hash), 'ascii')
@hex_hash.setter
def hex_hash(self, value):
"""
Set the hash associated with the MerkleNode as a hex value.
"""
if self._bin_hash:
raise RuntimeError('attempt to set non-null hash')
self._bin_hash = bytes(binascii.a2b_hex(value))
# def bind(self): pass
@property
def bin_hash(self):
"""
Return the hash associated with the MerkleNode as a binary value.
"""
return self._bin_hash
@bin_hash.setter
def bin_hash(self, value):
if self._bin_hash:
raise RuntimeError('attempt to set non-null hash')
self._bin_hash = value
# def bound(self):
# raise RuntimeError('not implemented')
def __eq__(self, other):
raise RuntimeError('subclass must implement')
@property
def is_leaf(self):
""" Return whether this MerkleNode is a MerkleLeaf. """
return self._is_leaf
@property
def name(self):
""" Return the name associated with the MerkleNode. """
return self._name
# def path(self):
# raise RuntimeError('not implemented')
def __str__(self):
raise RuntimeError('subclass must implement')
def hashtype(self):
""" Return the SHA hash type associated with the Node. """
return self._hashtype
# -------------------------------------------------------------------
class MerkleDoc(MerkleNode):
"""
The path to a tree, and the SHA hash of the path and the treehash.
"""
__slots__ = ['_bound', '_ex_re', '_match_re', '_path',
'_tree', '_hashtype', ]
# notice the terminating forward slash and lack of newlines or CR-LF
# THIS PATTERN WON"T CATCH SOME ERRORS; eg it permits '///' in paths
FIRST_LINE_RE_1 = re.compile(r'^([0-9a-f]{40}) ([a-z0-9_\-\./!:]+/)$',
re.IGNORECASE)
FIRST_LINE_RE_2 = re.compile(r'^([0-9a-f]{64}) ([a-z0-9_\-\./!:]+/)$',
re.IGNORECASE)
# XXX MUST ADD matchRE and exRE and test on their values at this level
def __init__(self, path, hashtype=HashTypes.SHA2, binding=False,
tree=None,
ex_re=None, # exclusions, which are Regular Expressions
match_re=None): # matches, also Regular Expressions
check_hashtype(hashtype)
if path is None:
raise RuntimeError("null MerkleDoc path")
if tree:
if not isinstance(tree, MerkleTree):
raise RuntimeError('tree is not a MerkleTree')
self._name = name = tree.name
elif not binding:
raise RuntimeError('null MerkleTree and not binding')
else:
raise RuntimeError("MerkleDoc binding not yet implemented")
super().__init__(name, is_leaf=False, hashtype=hashtype)
path = path.strip()
if not path:
raise RuntimeError("empty path")
if not path.endswith('/'):
path += '/'
self._path = path
self._tree = tree
if tree:
# DEBUG
# print("MerkleDoc.__init__: usingSHA = %s" % str(usingSHA))
# END
sha = get_hash_func(hashtype)
sha.update(bytes(tree.bin_hash))
sha.update(path.encode('utf-8'))
self._bin_hash = bytes(sha.digest()) # a binary value
self._ex_re = ex_re
self._match_re = match_re
if binding:
path_to_dir = os.path.join(path, tree.name)
if not os.path.exists(path_to_dir):
raise RuntimeError('no directory found at ' + path_to_dir)
else:
# XXX STUB: BIND THE TREE
self._bound = True
def __eq__(self, other):
"""ignore boundedness"""
return isinstance(other, MerkleDoc) and \
self._path == other.path and \
self._bin_hash == other.bin_hash and \
self._tree == other.tree
@property
def path(self):
"""
Return the path (in the file system) associated with a MerkleDoc.
"""
return self._path
@path.setter
def path(self, value):
# XXX CHECK value
"""
Set the path (in the file system) associated with a MerkleDoc.
"""
self._path = value
@property
def tree(self):
""" Return the MerkleTree associated with a MerkleDoc. """
return self._tree
@tree.setter
def tree(self, value):
# XXX CHECKS
self._tree = value
@property
def bound(self):
""" Whether a MerkleDoc is bound to a file. """
return self._bound
@bound.setter
def bound(self, value):
""" Set whether a MerkleDoc is bound to a file. """
# XXX validate
self._bound = value
@property
def hashtype(self):
return self._hashtype
# QUASI-CONSTRUCTORS ############################################
@staticmethod
def create_from_file_system(path_to_dir, hashtype=HashTypes.SHA2,
exclusions=None, matches=None):
"""
Create a MerkleDoc based on the information in the directory
at pathToDir. The name of the directory will be the last component
of pathToDir. Return the MerkleTree.
"""
check_hashtype(hashtype)
if not path_to_dir:
raise RuntimeError("cannot create a MerkleTree, no path set")
if not os.path.exists(path_to_dir):
raise RuntimeError(
"MerkleTree: directory '%s' does not exist" % path_to_dir)
path, _, _ = path_to_dir.rpartition('/')
if path == '':
raise RuntimeError("cannot parse inclusive path " + path_to_dir)
path += '/'
ex_re = None
if exclusions:
ex_re = make_ex_re(exclusions)
match_re = None
if matches:
match_re = make_match_re(matches)
tree = MerkleTree.create_from_file_system(path_to_dir, hashtype,
ex_re, match_re)
# creates the hash
doc = MerkleDoc(path, hashtype, False, tree, ex_re, match_re)
doc.bound = True
return doc
@staticmethod
def create_from_serialization(string, hashtype=HashTypes.SHA2):
"""
Create a MerkleDoc from string serialization (such as a file).
"""
check_hashtype(hashtype)
if string is None:
raise RuntimeError("MerkleDoc.createFromSerialization: no input")
s_array = string.split('\n') # note CR-LF
return MerkleDoc.create_from_string_array(s_array, hashtype)
@staticmethod
def create_from_string_array(string, hashtype=HashTypes.SHA2):
"""
The string array is expected to follow conventional indentation
rules, with zero indentation on the first line and some number
of leading spaces on all successive lines.
"""
check_hashtype(hashtype)
if string is None:
raise RuntimeError('null argument')
# XXX check TYPE - must be array of strings
if not string:
raise RuntimeError("empty string array")
(doc_hash, doc_path) =\
MerkleDoc.parse_first_line(string[0].rstrip())
len_hash = len(doc_hash)
if len_hash == SHA1_BIN_LEN:
if hashtype != HashTypes.SHA1:
raise RuntimeError("hash length %d inconsistent with %s" % (
len_hash, hashtype))
elif len_hash != SHA2_BIN_LEN:
raise RuntimeError("hash length %d inconsistent with %s" % (
len_hash, hashtype))
# DEBUG
# print("MerkleDoc.createFromStringArray:")
# print(" docHash = %s" % str(binascii.b2a_hex(docHash),'ascii'))
# print(" docPath = %s" % docPath)
# print(" usingSHA=%s" % str(usingSHA))
# END
tree = MerkleTree.create_from_string_array(string[1:], hashtype)
# def __init__ (self, path, binding = False, tree = None,
# exRE = None, # exclusions, which are Regular Expressions
# matchRE = None): # matches, also Regular Expressions
doc = MerkleDoc(doc_path, hashtype=hashtype, tree=tree)
return doc
# CLASS METHODS #################################################
@classmethod
def first_line_re_1(cls):
"""
Returns a reference to the regexp for SHA1 first lines. A
match finds (indent, treeHash, dirName), where indent is an
integer, the treeHash is a hex string, and dirName may have a
terminating slash.
"""
return MerkleDoc.FIRST_LINE_RE_1
@classmethod
def first_line_re_2(cls):
"""
Returns a reference to the regexp for SHA256 first lines. A
match finds (indent, treeHash, dirName), where indent is an
integer, the treeHash is a hex string, and dirName may have a
terminating slash.
"""
return MerkleDoc.FIRST_LINE_RE_2
@staticmethod
def parse_first_line(line):
""" returns binary docHash and string docPath"""
line = line.rstrip()
match_ = MerkleDoc.FIRST_LINE_RE_1.match(line)
if match_ is None:
match_ = MerkleDoc.FIRST_LINE_RE_2.match(line)
if match_ is None:
raise RuntimeError(
"MerkleDoc first line <%s> does not match expected pattern" %
line)
doc_hash = bytes(binascii.a2b_hex(match_.group(1)))
doc_path = match_.group(2) # includes terminating slash
return (doc_hash, doc_path)
@staticmethod
def make_ex_re(exclusions):
"""
#############################################################
THIS FUNCTION IS OBSOLETE AND SHOULD BE REPLACED WHEREVER USED
WITH xlutil::makeExRE(), WHICH USES GLOBS. This
function uses regular expressions instead of globs.
#############################################################
Compile a regular expression which ORs exclusion patterns.
"""
if exclusions is None:
exclusions = []
exclusions.append(r'^\.$')
exclusions.append(r'^\.\.$')
exclusions.append(r'^\.merkle$')
exclusions.append(r'^\.svn$') # subversion control data
# some might disagree with these:
exclusions.append(r'^junk')
exclusions.append(r'^\..*\.swp$') # vi editor files
ex_pat = '|'.join(exclusions)
return re.compile(ex_pat)
@staticmethod
def make_match_re(match_list):
"""
#############################################################
THIS FUNCTION IS OBSOLETE AND SHOULD BE REPLACED WHEREVER USED
WITH xlutil::makeMatchRE(), WHICH USES GLOBS. This
function uses regular expressions instead of globs.
#############################################################
Compile a regular expression which ORs match patterns.
"""
if match_list:
match_pat = '|'.join(match_list)
return re.compile(match_pat)
return None
# SERIALIZATION #################################################
def __str__(self):
return self.to_string()
# XXX indent is not used
def to_string(self, indent=0):
""" Convert MerkleDoc to string form. """
return ''.join([
"%s %s\n" % (self.hex_hash, self.path),
self._tree.to_string(indent)
])
# -------------------------------------------------------------------
class MerkleLeaf(MerkleNode):
""" Leaf form of MerkleNode. """
__slots__ = ['_name', '_hashtype', ]
def __init__(self, name, hashtype=HashTypes.SHA1, hash_=None):
super().__init__(name, is_leaf=True, hashtype=hashtype)
# JUNK
if name is None:
raise RuntimeError("MerkleLeaf: null MerkleLeaf name")
self._name = name.strip()
if not self._name:
raise RuntimeError("MerkleLeaf: null or empty name")
# END JUNK
# XXX VERIFY HASH IS WELL-FORMED
if hash_:
self._bin_hash = hash_
else:
self._bin_hash = None
# IMPLEMENTATIONS OF ABSTRACT METHODS ###########################
def __eq__(self, other):
return isinstance(other, MerkleLeaf) and \
self._name == other.name and \
self._bin_hash == other.bin_hash
def __str__(self):
return self.to_string(0) # that is, no indent
# OTHER METHODS AND PROPERTIES ##################################
@staticmethod
def create_from_file_system(path_to_file, name, hashtype=HashTypes.SHA2):
"""
Returns a MerkleLeaf. The name is part of pathToFile, but is
passed to simplify the code.
"""
def report_io_error(exc):
""" Report an I/O error to stdout. """
print("error reading file %s: %s" % (
path_to_file, exc), file=sys.stderr)
if not os.path.exists(path_to_file):
print(("INTERNAL ERROR: file does not exist: " + path_to_file))
# XXX we convert from binary to hex and then right back to binary !!
if hashtype == HashTypes.SHA1:
try:
hash_ = file_sha1bin(path_to_file)
except OSError as exc:
report_io_error(exc)
hash_ = SHA1_BIN_NONE
elif hashtype == HashTypes.SHA2:
try:
hash_ = file_sha2bin(path_to_file)
except OSError as exc:
report_io_error(exc)
hash_ = SHA2_BIN_NONE
elif hashtype == HashTypes.SHA3:
try:
hash_ = file_sha3bin(path_to_file)
except OSError as exc:
report_io_error(exc)
hash_ = SHA3_BIN_NONE
elif hashtype == HashTypes.BLAKE2B_256:
try:
hash_ = file_blake2b_256_bin(path_to_file)
except OSError as exc:
report_io_error(exc)
hash_ = BLAKE2B_256_BIN_NONE
else:
raise NotImplementedError
return MerkleLeaf(name, hashtype, hash_)
def to_string(self, indent=0):
""" Serialize MerkleLeaf as string . """
if self._bin_hash is None:
if self._hashtype == HashTypes.SHA1:
hash_ = SHA1_HEX_NONE
elif self._hashtype == HashTypes.SHA2:
hash_ = SHA2_HEX_NONE
elif self._hashtype == HashTypes.SHA3:
hash_ = SHA3_HEX_NONE
elif self._hashtype == HashTypes.BLAKE2B_256:
hash_ = BLAKE2B_256_HEX_NONE
else:
raise NotImplementedError
else:
hash_ = self.hex_hash
string = "%s%s %s\n" % (SP.get_spaces(indent), hash_, self.name)
return string
# THIS GETS REPLACED BY NLHTree XXX
# PAIRLIST FUNCTIONS ############################################
# def toPair(leaf):
# """
# Given a MerkleLeaf, return its name and binary hash as a pair
# """
# # DEBUG
# print("MerkleLeaf.toPair: %s %s" % (leaf.name, leaf.binHash))
# # END
# return (leaf.name, leaf.binHash)
# @staticmethod
# def createFromPair(p):
# """
# Given p, a name/hash pair, return a MerkleLeaf.
# """
# name = p[0]; hash = p[1]
# if len(hash) == SHA1_BIN_LEN:
# usingSHA = True
# elif len(hash) == SHA2_BIN_LEN:
# usingSHA = False
# else:
# raise RuntimeError('invalid SHA hash len')
# return MerkleLeaf(name, hash, usingSHA)
# -------------------------------------------------------------------
class MerkleTree(MerkleNode):
""" Tree subclass of MerkleNode. """
__slots__ = ['_bound', '_name', '_ex_re', '_bin_hash', '_match_re',
'_nodes', '_hashtype', ]
# notice the terminating forward slash and lack of newlines or CR-LF
FIRST_LINE_RE_1 = re.compile(
r'^( *)([0-9a-f]{40}) ([a-z0-9_\-\.:]+/)$', re.IGNORECASE)
OTHER_LINE_RE_1 = re.compile(
r'^([ XYZ]*)([0-9a-f]{40}) ([a-z0-9_\$\+\-\.:~]+/?)$', re.IGNORECASE)
FIRST_LINE_RE_2 = re.compile(
r'^( *)([0-9a-f]{64}) ([a-z0-9_\-\.:]+/)$', re.IGNORECASE)
OTHER_LINE_RE_2 = re.compile(
r'^([ XYZ]*)([0-9a-f]{64}) ([a-z0-9_\$\+\-\.:_]+/?)$',
re.IGNORECASE)
#################################################################
# exRE and matchRE must have been validated by the calling code
#################################################################
def __init__(self, name, hashtype=False,
ex_re=None, # exclusions Regular Expression
match_re=None): # matches Regular Expression
super().__init__(name, is_leaf=False, hashtype=hashtype)
self._ex_re = ex_re
self._match_re = match_re
self._nodes = []
# IMPLEMENTATIONS OF ABSTRACT METHODS ###########################
def __eq__(self, other):
"""
This is quite wasteful. Given the nature of the merkletree,
it should only be necessary to compare top-level hashes.
"""
if other is None:
return False
if (not isinstance(other, MerkleTree)) or \
(self._name != other.name) or \
self.hex_hash != other.hex_hash or \
self.hashtype != other.hashtype:
return False
my_nodes = self.nodes
other_nodes = other.nodes
if len(my_nodes) != len(other_nodes):
return False
for ndx, my_node in enumerate(my_nodes):
other_node = other_nodes[ndx]
if not my_node.__eq__(other_node): # RECURSES
return False
return True
def __str__(self):
return self.to_string(0)
@property
def hashtype(self):
return self._hashtype
#################################################################
# METHODS LIFTED FROM bindmgr/bindlib/MerkleTree.py
#################################################################
@staticmethod
def parse_first_line(line):
""" returns indent, binary treeHash, and str dirName """
line = line.rstrip()
match_ = MerkleTree.FIRST_LINE_RE_1.match(line)
if match_ is None:
match_ = MerkleTree.FIRST_LINE_RE_2.match(line)
if match_ is None:
raise RuntimeError(
"MerkleTree first line \"%s\" doesn't match expected pattern" %
line)
indent = len(match_.group(1)) # count of leading spaces
tree_hash = bytes(binascii.a2b_hex(match_.group(2)))
dir_name = match_.group(3) # includes terminating slash
dir_name = dir_name[0:len(dir_name) - 1]
return (indent, tree_hash, dir_name)
@staticmethod
def parse_other_line(line):
""" Parse a non-first line. """
match_ = re.match(MerkleTree.OTHER_LINE_RE_1, line)
if match_ is None:
match_ = re.match(MerkleTree.OTHER_LINE_RE_2, line)
if match_ is None:
raise RuntimeError(
"MerkleTree other line <%s> does not match expected pattern" %
line)
node_depth = len(match_.group(1))
node_hash = bytes(binascii.a2b_hex(match_.group(2)))
node_name = match_.group(3)
if node_name.endswith('/'):
node_name = node_name[0:len(node_name) - 1]
is_dir = True
else:
is_dir = False
return (node_depth, node_hash, node_name, is_dir)
@staticmethod
def create_from_string_array(strings, hashtype=HashTypes.SHA2):
"""
The strings array is expected to follow conventional indentation
rules, with zero indentation on the first line and some number
of leading spaces on all successive lines.
"""
# XXX should check TYPE - must be array of strings
if not strings:
raise RuntimeError("empty strings array")
(indent, tree_hash, dir_name) =\
MerkleTree.parse_first_line(strings[0].rstrip())
len_hash = len(tree_hash)
if len_hash == SHA1_BIN_LEN:
if hashtype != HashTypes.SHA1:
raise RuntimeError("hash length %d inconsistent with %s" % (
len_hash, hashtype))
elif len_hash != SHA2_BIN_LEN:
raise RuntimeError("hash length %d inconsistent with %s" % (
len_hash, hashtype))
root_tree = MerkleTree(dir_name, hashtype) # an empty tree
root_tree.bin_hash = tree_hash
if indent != 0:
print(("INTERNAL ERROR: initial line indent %d" % indent))
stack = []
stk_depth = 0
cur_tree = root_tree
stack.append(cur_tree) # rootTree
stk_depth += 1 # always step after pushing tree
for ndx in range(1, len(strings)):
line = strings[ndx].rstrip()
if not line:
continue
# XXX SHOULD/COULD CHECK THAT HASHES ARE OF THE RIGHT TYPE
line_indent, hash_, name, is_dir = MerkleTree.parse_other_line(
line)
if line_indent < stk_depth:
while line_indent < stk_depth:
stk_depth -= 1
stack.pop()
cur_tree = stack[-1]
if not stk_depth == line_indent:
print("ERROR: stkDepth != lineIndent")
if is_dir:
# create and set attributes of new node
new_tree = MerkleTree(name, hashtype) # , curTree)
new_tree.bin_hash = hash_
# add the new node into the existing tree
cur_tree.add_node(new_tree)
stack.append(new_tree)
stk_depth += 1
cur_tree = new_tree
else:
# create and set attributes of new node
new_node = MerkleLeaf(name, hashtype, hash_)
# add the new node into the existing tree
cur_tree.add_node(new_node)
return root_tree
@staticmethod
def create_from_serialization(string, hashtype=HashTypes.SHA2):
"""
Create a MerkleTree by parsing its serialization (a single string),
given the SHA hash type used to create the MerkleTree.
"""
if string is None:
raise RuntimeError("MerkleTree.createFromSerialization: no input")
if not isinstance(string, str):
string = str(string, 'utf-8')
s_array = string.split('\n') # note CR-LF
return MerkleTree.create_from_string_array(s_array, hashtype)
@staticmethod
def create_from_file(path_to_file, hashtype=HashTypes.SHA2):
"""
Create a MerkleTree by parsing its on-disk serialization,
given the SHA hash type used to create the MerkleTree.
"""
if not os.path.exists(path_to_file):
raise RuntimeError(
"MerkleTree.createFromFile: file '%s' does not exist" %
path_to_file)
with open(path_to_file, 'r') as file:
text = file.read()
return MerkleTree.create_from_serialization(text, hashtype)
@staticmethod
def create_from_file_system(path_to_dir, hashtype=HashTypes.SHA2,
ex_re=None, match_re=None):
"""
Create a MerkleTree based on the information in the directory
at pathToDir. The name of the directory will be the last component
of pathToDir. Return the MerkleTree.
"""
check_hashtype(hashtype)
if not path_to_dir:
raise RuntimeError("cannot create a MerkleTree, no path set")
if not os.path.exists(path_to_dir):
raise RuntimeError(
"MerkleTree: directory '%s' does not exist" % path_to_dir)
(path, _, name) = path_to_dir.rpartition('/')
if not path:
raise RuntimeError("can't parse inclusive path '%s'" % path_to_dir)
tree = MerkleTree(name, hashtype, ex_re, match_re)
tree.bin_hash = None
sha = get_hash_func(hashtype)
# Create data structures for constituent files and subdirectories
# These MUST BE SORTED by the bare name to meet specs.
files = sorted(os.listdir(path_to_dir))
if files:
sha_count = 0
for file in files:
# exclusions take priority over matches
if ex_re and ex_re.search(file):
continue
if match_re and not match_re.search(file):
continue
node = None
path_to_file = os.path.join(path_to_dir, file)
string = os.lstat(path_to_file) # ignores symlinks
# os.path.isdir(path) follows symbolic links
if S_ISDIR(string.st_mode):
node = MerkleTree.create_from_file_system(
path_to_file, hashtype, ex_re, match_re)
# S_ISLNK(mode) is true if symbolic link
# isfile(path) follows symbolic links
elif os.path.isfile(path_to_file): # S_ISREG(mode):
node = MerkleLeaf.create_from_file_system(
path_to_file, file, hashtype)
# otherwise, just ignore it ;-)
if node:
# update tree-level hash
if node.bin_hash:
# note empty file has null hash XXX NOT TRUE
sha_count += 1
sha.update(node.bin_hash)
# SKIP NEXT TO EASE GARBAGE COLLECTION ??? XXX
# but that won't be a good idea if we are
# invoking toString()
tree.nodes.append(node)
if sha_count:
tree.bin_hash = bytes(sha.digest())
return tree
# OTHER METHODS AND PROPERTIES ##################################
@classmethod
def first_line_re_1(cls):
"""
Returns a reference to the regexp for SHA1 first lines. A
match finds (indent, treeHash, dirName), where indent is an
integer, the treeHash is a hex string, and dirName may have a
terminating slash.
"""
return MerkleTree.FIRST_LINE_RE_1
@classmethod
def first_line_re_2(cls):
"""
Returns a reference to the regexp for SHA3 first lines. A
match finds (indent, treeHash, dirName), where indent is an
integer, the treeHash is a hex string, and dirName may have a
terminating slash.
"""
return MerkleTree.FIRST_LINE_RE_2
@property
def nodes(self):
"""
DANGEROUS: returns a reference to the MerkleTree's node list.
"""
return self._nodes
def add_node(self, node):
""" Add a MerkleNode to a MerkleTree. """
if node is None:
raise RuntimeError("attempt to add null node")
if not isinstance(node, MerkleTree)\
and not isinstance(node, MerkleLeaf):
raise RuntimeError("node being added not MerkleTree or MerkleLeaf")
self._nodes.append(node)
# SERIALIZATION #################################################
def to_string_not_top(self, indent=0):
""" indent is the indentation to be used for the top node"""
string = [] # a list of strings
spaces = SP.get_spaces(indent)
if self._bin_hash is None:
if self._hashtype == HashTypes.SHA1:
top = "%s%s %s/\n" % (spaces, SHA1_HEX_NONE, self.name)
elif self._hashtype == HashTypes.SHA2:
top = "%s%s %s/\n" % (spaces, SHA2_HEX_NONE, self.name)
elif self._hashtype == HashTypes.SHA3:
top = "%s%s %s/\n" % (spaces, SHA3_HEX_NONE, self.name)
elif self._hashtype == HashTypes.BLAKE2B_256:
top = "%s%s %s/\n" % (spaces, BLAKE2B_256_HEX_NONE, self.name)
else:
raise NotImplementedError
else:
top = "%s%s %s/\n" % (spaces, self.hex_hash, self.name)
string.append(top)
indent += 1 # <--- LEVEL 2+ NODE
for node in self.nodes:
if isinstance(node, MerkleLeaf):
string.append(node.to_string(indent))
else:
# recurse
string.append(node.to_string_not_top(indent))
return ''.join(string)
def to_string(self, indent=0):
"""
indent is the initial indentation of the serialized list, NOT the
extra indentation added at each recursion.
Using code should take into account that the last line is CR-LF
terminated, and so a split on CRLF will generate an extra blank line
"""
string = [] # a list of strings
spaces = SP.get_spaces(indent)
if self._bin_hash is None:
if self._hashtype == HashTypes.SHA1:
top = "%s%s %s/\n" % (spaces, SHA1_HEX_NONE, self.name)
elif self._hashtype == HashTypes.SHA2:
top = "%s%s %s/\n" % (spaces, SHA2_HEX_NONE, self.name)
elif self._hashtype == HashTypes.SHA3:
top = "%s%s %s/\n" % (spaces, SHA3_HEX_NONE, self.name)
elif self._hashtype == HashTypes.BLAKE2B_256:
top = "%s%s %s/\n" % (spaces, BLAKE2B_256_HEX_NONE, self.name)
else:
raise NotImplementedError
else:
top = "%s%s %s/\n" % (spaces, self.hex_hash, self.name)
string.append(top) # <--- LEVEL 0 NODE
my_indent = indent + 1 # <--- LEVEL 1 NODE
for node in self.nodes:
if isinstance(node, MerkleLeaf):
string.append(node.to_string(my_indent))
else:
# recurse
string.append(node.to_string_not_top(my_indent))
return ''.join(string)
```
#### File: merkletree/tests/test_merkle_tree.py
```python
import os
import shutil
import sys
import time
import unittest
from rnglib import SimpleRNG
from xlattice import (HashTypes, check_hashtype,
SHA1_HEX_NONE, SHA2_HEX_NONE, SHA3_HEX_NONE,
BLAKE2B_256_HEX_NONE)
from xlcrypto.hash import XLSHA1, XLSHA2, XLSHA3, XLBLAKE2B_256
from merkletree import MerkleTree, MerkleLeaf
ONE = 1
FOUR = 4
MAX_NAME_LEN = 8
class TestMerkleTree(unittest.TestCase):
""" Test package functionality at the Tree level. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions ---------------------------------------------
def get_two_unique_directory_names(self):
""" Make two different quasi-random directory names."""
dir_name1 = self.rng.next_file_name(MAX_NAME_LEN)
dir_name2 = dir_name1
while dir_name2 == dir_name1:
dir_name2 = self.rng.next_file_name(MAX_NAME_LEN)
self.assertTrue(len(dir_name1) > 0)
self.assertTrue(len(dir_name2) > 0)
self.assertTrue(dir_name1 != dir_name2)
return (dir_name1, dir_name2)
def make_one_named_test_directory(self, name, depth, width):
""" Make a directory tree with a specific name, depth and width."""
dir_path = "tmp/%s" % name
if os.path.exists(dir_path):
if os.path.isfile(dir_path):
os.unlink(dir_path)
elif os.path.isdir(dir_path):
shutil.rmtree(dir_path)
self.rng.next_data_dir(dir_path, depth, width, 32)
return dir_path
def make_two_test_directories(self, depth, width):
""" Create two test directories with different names. """
dir_name1 = self.rng.next_file_name(MAX_NAME_LEN)
dir_path1 = self.make_one_named_test_directory(dir_name1, depth, width)
dir_name2 = dir_name1
while dir_name2 == dir_name1:
dir_name2 = self.rng.next_file_name(MAX_NAME_LEN)
dir_path2 = self.make_one_named_test_directory(dir_name2, depth, width)
return (dir_name1, dir_path1, dir_name2, dir_path2)
def verify_leaf_sha(self, node, path_to_file, hashtype):
"""
Verify a leaf node is hashed correctly, using a specific SHA hash type.
"""
self.assertTrue(os.path.exists(path_to_file))
with open(path_to_file, "rb") as file:
data = file.read()
self.assertFalse(data is None)
if hashtype == HashTypes.SHA1:
sha = XLSHA1()
elif hashtype == HashTypes.SHA2:
sha = XLSHA2()
elif hashtype == HashTypes.SHA3:
sha = XLSHA3()
elif hashtype == HashTypes.BLAKE2B:
sha = XLBLAKE2B_256()
else:
raise NotImplementedError
sha.update(data)
hash_ = sha.digest()
self.assertEqual(hash_, node.bin_hash)
def verify_tree_sha(self, node, path_to_node, hashtype):
"""
Verify tree elements are hashed correctly, assuming that the node
is a MerkleTree, using a specific SHA hash type.
"""
if node.nodes is None:
self.assertEqual(None, node.bin_hash)
else:
hash_count = 0
if hashtype == HashTypes.SHA1:
sha = XLSHA1()
elif hashtype == HashTypes.SHA2:
sha = XLSHA2()
elif hashtype == HashTypes.SHA3:
sha = XLSHA3()
elif hashtype == HashTypes.BLAKE2B:
sha = XLBLAKE2B_256()
else:
raise NotImplementedError
for node_ in node.nodes:
path_to_file = os.path.join(path_to_node, node_.name)
if isinstance(node_, MerkleLeaf):
self.verify_leaf_sha(node_, path_to_file, hashtype)
elif isinstance(node_, MerkleTree):
self.verify_tree_sha(node_, path_to_file, hashtype)
else:
self.fail("unknown node type!")
if node_.bin_hash is not None:
hash_count += 1
sha.update(node_.bin_hash)
# take care to compare values of the same type;
# node.binHash is binary, node.hexHash is hex
if hash_count == 0:
self.assertEqual(None, node.bin_hash)
else:
self.assertEqual(sha.digest(), node.bin_hash)
# unit tests ----------------------------------------------------
def test_pathless_unbound(self):
"""
Test basic characteristics of very simple MerkleTrees created
using our standard SHA hash types.
"""
for using in [HashTypes.SHA1, HashTypes.SHA2,
HashTypes.SHA3, HashTypes.BLAKE2B]:
self.do_test_pathless_unbound(using)
def do_test_pathless_unbound(self, hashtype):
"""
Test basic characteristics of very simple MerkleTrees created
using a specific SHA hash type.
"""
(dir_name1, dir_name2) = self.get_two_unique_directory_names()
check_hashtype(hashtype)
tree1 = MerkleTree(dir_name1, hashtype)
self.assertEqual(dir_name1, tree1.name)
if hashtype == HashTypes.SHA1:
self.assertEqual(SHA1_HEX_NONE, tree1.hex_hash)
elif hashtype == HashTypes.SHA2:
self.assertEqual(SHA2_HEX_NONE, tree1.hex_hash)
elif hashtype == HashTypes.SHA3:
self.assertEqual(SHA3_HEX_NONE, tree1.hex_hash)
elif hashtype == HashTypes.BLAKE2B_256:
self.assertEqual(BLAKE2B_256_HEX_NONE, tree1.hex_hash)
else:
raise NotImplementedError
tree2 = MerkleTree(dir_name2, hashtype)
self.assertEqual(dir_name2, tree2.name)
# these tests remain skimpy
self.assertFalse(tree1 is None)
self.assertTrue(tree1 == tree1)
self.assertFalse(tree1 == tree2)
tree1_str = tree1.to_string(0)
# there should be no indent on the first line
self.assertFalse(tree1_str[0] == ' ')
# no extra lines should be added
lines = tree1_str.split('\n')
# this split generates an extra blank line, because the serialization
# ends with CR-LF
if lines[-1] == '':
lines = lines[:-1]
self.assertEqual(1, len(lines))
tree1_rebuilt = MerkleTree.create_from_serialization(
tree1_str, hashtype)
self.assertTrue(tree1 == tree1_rebuilt)
def test_bound_flat_dirs(self):
"""
Test handling of flat directories with a few data files
using varioush SHA hash types.
"""
for using in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:
self.do_test_bound_flat_dirs(using)
def do_test_bound_flat_dirs(self, hashtype):
"""test directory is single level, with four data files"""
check_hashtype(hashtype)
(dir_name1, dir_path1, dir_name2, dir_path2) =\
self.make_two_test_directories(ONE, FOUR)
tree1 = MerkleTree.create_from_file_system(dir_path1, hashtype)
self.assertEqual(dir_name1, tree1.name)
nodes1 = tree1.nodes
self.assertTrue(nodes1 is not None)
self.assertEqual(FOUR, len(nodes1))
self.verify_tree_sha(tree1, dir_path1, hashtype)
tree2 = MerkleTree.create_from_file_system(dir_path2, hashtype)
self.assertEqual(dir_name2, tree2.name)
nodes2 = tree2.nodes
self.assertTrue(nodes2 is not None)
self.assertEqual(FOUR, len(nodes2))
self.verify_tree_sha(tree2, dir_path2, hashtype)
self.assertFalse(tree1 is None)
self.assertTrue(tree1 == tree1)
self.assertFalse(tree1 == tree2)
tree1_str = tree1.to_string(0)
tree1_rebuilt = MerkleTree.create_from_serialization(
tree1_str, hashtype)
self.assertTrue(tree1 == tree1_rebuilt)
def test_bound_needle_dirs(self):
"""
Test directories four deep with various SHA hash types.
"""
for using in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:
self.do_test_bound_needle_dirs(using)
def do_test_bound_needle_dirs(self, hashtype):
"""test directories four deep with one data file at the lowest level"""
(dir_name1, dir_path1, dir_name2, dir_path2) =\
self.make_two_test_directories(FOUR, ONE)
tree1 = MerkleTree.create_from_file_system(dir_path1, hashtype)
self.assertEqual(dir_name1, tree1.name)
nodes1 = tree1.nodes
self.assertTrue(nodes1 is not None)
self.assertEqual(ONE, len(nodes1))
self.verify_tree_sha(tree1, dir_path1, hashtype)
tree2 = MerkleTree.create_from_file_system(dir_path2, hashtype)
self.assertEqual(dir_name2, tree2.name)
nodes2 = tree2.nodes
self.assertTrue(nodes2 is not None)
self.assertEqual(ONE, len(nodes2))
self.verify_tree_sha(tree2, dir_path2, hashtype)
self.assertTrue(tree1 == tree1)
self.assertFalse(tree1 == tree2)
tree1_str = tree1.to_string(0)
tree1_rebuilt = MerkleTree.create_from_serialization(
tree1_str, hashtype)
# # DEBUG
# print "NEEDLEDIR TREE1:\n" + tree1Str
# print "REBUILT TREE1:\n" + tree1Rebuilt.toString("")
# # END
self.assertTrue(tree1 == tree1_rebuilt)
# tests of bugs previously found --------------------------------
def test_gray_boxes_bug1(self):
"""
Verify that bug #1 in handling serialization of grayboxes website
has been corrected.
"""
serialization =\
'721a08022dd26e7be98b723f26131786fd2c0dc3 grayboxes.com/\n' +\
' fcd3973c66230b9078a86a5642b4c359fe72d7da images/\n' +\
' 15e47f4eb55197e1bfffae897e9d5ce4cba49623 grayboxes.gif\n' +\
' 2477b9ea649f3f30c6ed0aebacfa32cb8250f3df index.html\n'
# create from string array ----------------------------------
string = serialization.split('\n')
string = string[:-1]
self.assertEqual(4, len(string))
tree2 = MerkleTree.create_from_string_array(string, HashTypes.SHA1)
ser2 = tree2.to_string(0)
self.assertEqual(serialization, ser2)
# create from serialization ---------------------------------
tree1 = MerkleTree.create_from_serialization(
serialization, HashTypes.SHA1)
ser1 = tree1.to_string(0)
self.assertEqual(serialization, ser1)
self.assertTrue(tree1 == tree2)
# 2014-06-26 tagged this on here to test firstLineRE_1()
first_line = string[0]
match_ = MerkleTree.first_line_re_1().match(first_line)
self.assertTrue(match_ is not None)
self.assertEqual(match_.group(1), '') # indent
tree_hash = match_.group(2)
dir_name = match_.group(3)
self.assertEqual(tree_hash + ' ' + dir_name, first_line)
def test_xlattice_bug1(self):
"""
this test relies on dat.xlattice.org being locally present
and an internally consistent merkleization
"""
with open('tests/test_data/dat.xlattice.org', 'rb') as file:
serialization = str(file.read(), 'utf-8')
# create from serialization ---------------------------------
tree1 = MerkleTree.create_from_serialization(
serialization, HashTypes.SHA1)
# # DEBUG
# print "tree1 has %d nodes" % len(tree1.nodes)
# with open('junk.tree1', 'w') as t:
# t.write( tree1.toString(0) )
# # END
ser1 = tree1.to_string(0)
self.assertEqual(serialization, ser1)
# create from string array ----------------------------------
string = serialization.split('\n')
string = string[:-1]
self.assertEqual(2511, len(string))
tree2 = MerkleTree.create_from_string_array(string, HashTypes.SHA1)
ser2 = tree2.to_string(0)
self.assertEqual(serialization, ser2)
self.assertTrue(tree1 == tree2)
def test_gray_boxes_bug3(self):
""" Test solution to bug in handling grayboxes website. """
serialization =\
'088d0e391e1a4872329e0f7ac5d45b2025363e26c199a7' + \
'4ea39901d109afd6ba grayboxes.com/\n' +\
' 24652ddc14687866e6b1251589aee7e1e3079a87f80cd' + \
'7775214f6d837612a90 images/\n' +\
' 1eb774eef9be1e696f69a2f95711be37915aac283bb4' + \
'b34dcbaf7d032233e090 grayboxes.gif\n' +\
' 6eacebda9fd55b59c0d2e48e2ed59ce9fd683379592f8' + \
'e662b1de88e041f53c9 index.html\n'
# create from string array ----------------------------------
string = serialization.split('\n')
string = string[:-1]
self.assertEqual(4, len(string))
tree2 = MerkleTree.create_from_string_array(string, HashTypes.SHA2)
ser2 = tree2.to_string(0)
self.assertEqual(serialization, ser2)
# create from serialization ---------------------------------
tree1 = MerkleTree.create_from_serialization(
serialization, HashTypes.SHA2)
ser1 = tree1.to_string(0)
self.assertEqual(serialization, ser1)
self.assertTrue(tree1 == tree2)
# 2014-06-26 tagged this on here to test firstLineRE_1()
first_line = string[0]
match_ = MerkleTree.first_line_re_2().match(first_line)
self.assertTrue(match_ is not None)
self.assertEqual(match_.group(1), '') # indent
tree_hash = match_.group(2)
dir_name = match_.group(3)
self.assertEqual(tree_hash + ' ' + dir_name, first_line)
def test_xlattice_bug3(self):
"""
this test relies on dat2.xlattice.org being locally present
and an internally consistent merkleization
"""
with open('tests/test_data/dat2.xlattice.org', 'rb') as file:
serialization = str(file.read(), 'utf-8')
# create from serialization ---------------------------------
tree1 = MerkleTree.create_from_serialization(
serialization, HashTypes.SHA2)
ser1 = tree1.to_string(0)
self.assertEqual(serialization, ser1)
# create from string array ----------------------------------
string = serialization.split('\n')
string = string[:-1]
self.assertEqual(2511, len(string))
tree2 = MerkleTree.create_from_string_array(string, HashTypes.SHA2)
ser2 = tree2.to_string(0)
self.assertEqual(serialization, ser2)
self.assertTrue(tree1 == tree2)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/nlhtree_py",
"score": 3
}
|
#### File: nlhtree_py/tests/test_drop_from_u.py
```python
import os
import sys
import time
import unittest
from binascii import hexlify
import hashlib
from rnglib import SimpleRNG
from nlhtree import NLHTree, NLHLeaf
from xlattice import HashTypes
from xlu import UDir, DirStruc
if sys.version_info < (3, 6):
# pylint: disable=unused-import
import sha3 # monkey-patches hashlib
assert sha3 # prevent flakes warning
class TestDropFromU(unittest.TestCase):
""" Test the drop_from_u_dir functionality. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def populate_tree(self, tree, data_path, u_dir, hashtype):
"""
Generate nnn and nnn unique random values, where nnn is at least 16.
"""
nnn = 16 + self.rng.next_int16(16)
# DEBUG
# print("nnn = %d" % nnn)
# EnnnD
values = []
hashes = []
for count in range(nnn):
# generate datum ------------------------------
datum = self.rng.some_bytes(32 + self.rng.next_int16(32))
values.append(datum)
# generate hash = bin_key ----------------------
if hashtype == HashTypes.SHA1:
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
sha = hashlib.sha3_256()
elif hashtype == HashTypes.BLAKE2B:
sha = hashlib.blake2b(digest_size=32)
else:
raise NotImplementedError
sha.update(datum)
bin_key = sha.digest()
hex_key = sha.hexdigest()
hashes.append(bin_key)
# write data file -----------------------------
file_name = 'value%04d' % count
path_to_file = os.path.join(data_path, file_name)
with open(path_to_file, 'wb') as file:
# DEBUG
# print("writing %s to %s" % (hex_key, path_to_file))
# END
file.write(datum)
# insert leaf into tree -----------------------
# path_from_top = os.path.join(top_name, file_name)
leaf = NLHLeaf(file_name, bin_key, hashtype)
tree.insert(leaf)
# DEBUG
# print(" inserting <%s %s>" % (leaf.name, leaf.hex_hash))
# END
# write data into uDir ------------------------
u_dir.put_data(datum, hex_key)
return values, hashes
def generate_udt(self, struc, hashtype):
"""
Generate under ./tmp a data directory with random content,
a uDir containing the same data, and an NLHTree that matches.
uDir has the directory structure (DIR_FLAT, DIR16x16, DIR256x256,
etc requested. Hashes are SHA1 if using SHA1 is True, SHA256
otherwise.
values is a list of binary values, each the content of a file
under dataDir. Each value contains a non-zero number of bytes.
hashes is a list of the SHA hashes of the values. Each hash
is a binary value. If using SHA1 it consists of 20 bytes.
return uPath, data_path, tree, hashes, values
"""
# make a unique U directory under ./tmp/
os.makedirs('tmp', mode=0o755, exist_ok=True)
u_root_name = self.rng.next_file_name(8)
u_path = os.path.join('tmp', u_root_name)
while os.path.exists(u_path):
u_root_name = self.rng.next_file_name(8)
u_path = os.path.join('tmp', u_root_name)
# DEBUG
# print("u_root_name = %s" % u_root_name)
# END
# create uDir and the NLHTree
u_dir = UDir(u_path, struc, hashtype)
self.assertTrue(os.path.exists(u_path))
# make a unique data directory under tmp/
data_tmp = self.rng.next_file_name(8)
tmp_path = os.path.join('tmp', data_tmp)
while os.path.exists(tmp_path):
data_tmp = self.rng.next_file_name(8)
tmp_path = os.path.join('tmp', data_tmp)
# dataDir must have same base name as NLHTree
top_name = self.rng.next_file_name(8)
data_path = os.path.join(tmp_path, top_name)
os.makedirs(data_path, mode=0o755)
# DEBUG
# print("data_tmp = %s" % data_tmp)
# print("top_name = %s" % top_name)
# print('data_path = %s' % data_path)
# END
tree = NLHTree(top_name, hashtype)
values, hashes = self.populate_tree(tree, data_path, u_dir, hashtype)
return u_path, data_path, tree, hashes, values
# ---------------------------------------------------------------
def do_test_with_ephemeral_tree(self, struc, hashtype):
"""
Generate a tmp/ subdirectory containing a quasi-random data
directory and corresponding uDir and NLHTree serialization.
We use the directory strucure (struc) and hash type (hashtype)
indicated, running various consistency tests on the three.
"""
u_path, data_path, tree, hashes, values = self.generate_udt(
struc, hashtype)
# DEBUG
# print("TREE:\n%s" % tree)
# END
# verify that the dataDir matches the nlhTree
tree2 = NLHTree.create_from_file_system(data_path, hashtype)
# DEBUG
# print("TREE2:\n%s" % tree2)
# END
self.assertEqual(tree2, tree)
nnn = len(values) # number of values present
hex_hashes = []
for count in range(nnn):
hex_hashes.append(hexlify(hashes[count]).decode('ascii'))
ndxes = [ndx for ndx in range(nnn)] # indexes into lists
self.rng.shuffle(ndxes) # shuffled
kkk = self.rng.next_int16(nnn) # we will drop this many indexes
# DEBUG
# print("dropping %d from %d elements" % (kkk, nnn))
# END
drop_me = ndxes[0:kkk] # indexes of values to drop
keep_me = ndxes[kkk:] # of those which should still be present
# construct an NLHTree containing values to be dropped from uDir
clone = tree.clone()
for count in keep_me:
name = 'value%04d' % count
clone.delete(name) # the parameter is a glob !
# these values should be absent from q: they won't be dropped from uDir
for count in keep_me:
name = 'value%04d' % count
xxx = clone.find(name)
self.assertEqual(len(xxx), 0)
# these values shd still be present in clone: they'll be dropped from
# UDir
for count in drop_me:
name = 'value%04d' % count
xxx = clone.find(name)
self.assertEqual(len(xxx), 1)
# the clone subtree contains those elements which will be dropped
# from uDir
unmatched = clone.drop_from_u_dir(u_path) # was unmatched
# DEBUG
# for x in unmatched: # (relPath, hash)
# print("unmatched: %s %s" % (x[0], x[1]))
# END
self.assertEqual(len(unmatched), 0)
u_dir = UDir(u_path, struc, hashtype)
self.assertTrue(os.path.exists(u_path))
# these values should still be present in uDir
for count in keep_me:
hex_hash = hex_hashes[count]
self.assertTrue(u_dir.exists(hex_hash))
# these values should NOT be present in UDir
for count in drop_me:
hex_hash = hex_hashes[count]
self.assertFalse(u_dir.exists(hex_hash))
def test_with_ephemeral_tree(self):
"""
Generate tmp/ subdirectories containing a quasi-random data
directory and corresponding uDir and NLHTree serialization,
using various directory structures and hash types.
"""
for struc in DirStruc:
for hashtype in HashTypes:
self.do_test_with_ephemeral_tree(struc, hashtype)
if __name__ == '__main__':
unittest.main()
```
#### File: nlhtree_py/tests/test_nlh_base.py
```python
import hashlib
import sys
import time
import unittest
from rnglib import SimpleRNG
from nlhtree.base import NLHBase
from xlattice import HashTypes # , check_hashtype
if sys.version_info < (3, 6):
# pylint: disable=unused-import
import sha3 # monkey-patches hashlib
assert sha3 # suppress warning
class TestNLHBase(unittest.TestCase):
""" Test basic NLHTree functions. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def do_test_constructor(self, hashtype):
""" Check functionality of NLHBase constructor for specifc hash. """
name = self.rng.next_file_name(8)
base = NLHBase(name, hashtype)
self.assertEqual(base.name, name)
self.assertEqual(base.hashtype, hashtype)
root = base.root
curt = base.cur_tree
self.assertEqual(root.name, curt.name)
def test_constructor(self):
""" Check functionality of NLHBase constructor. """
for hashtype in HashTypes:
self.do_test_constructor(hashtype)
def do_test_with_simple_tree(self, hashtype):
""" XXX STUB: test simple tree with specific hash. """
if hashtype == HashTypes.SHA1:
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
# pylint:disable=no-member
sha = hashlib.sha3_256()
elif hashtype == HashTypes.BLAKE2B:
sha = hashlib.blake2b(digest_size=32)
else:
raise NotImplementedError
assert sha # suppress warning
def test_simple_tree(self):
""" XXX STUB: test building simple tree. """
for hashtype in HashTypes:
self.do_test_with_simple_tree(hashtype)
if __name__ == '__main__':
unittest.main()
```
#### File: nlhtree_py/tests/test_nlh_tree2.py
```python
import os
import shutil
# import sys
import time
import unittest
# import hashlib # unused
from rnglib import SimpleRNG
from xlattice import (HashTypes, check_hashtype)
from nlhtree import NLHTree
# if sys.version_info < (3, 6):
# # pylint: disable=unused-import
# import sha3 # monkey-patches hashlib
ONE = 1
FOUR = 4
MAX_NAME_LEN = 8
class TestNLHTree2(unittest.TestCase):
""" Test trees derived from various quasi-random directory structures. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions ---------------------------------------------
def get_two_unique_directory_names(self):
""" Make two unique directory names. """
dir_name1 = self.rng.next_file_name(MAX_NAME_LEN)
dir_name2 = dir_name1
while dir_name2 == dir_name1:
dir_name2 = self.rng.next_file_name(MAX_NAME_LEN)
self.assertTrue(len(dir_name1) > 0)
self.assertTrue(len(dir_name2) > 0)
self.assertTrue(dir_name1 != dir_name2)
return (dir_name1, dir_name2)
def make_one_named_test_directory(self, name, depth, width):
"""
Create a test directory below tmp/ with specified characteristics.
"""
dir_path = "tmp/%s" % name
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
self.rng.next_data_dir(dir_path, depth, width, 32)
return dir_path
def make_two_test_directories(self, depth, width):
""" Make two distinct quasi-random test directories below tmp/. """
dir_name1 = self.rng.next_file_name(MAX_NAME_LEN)
dir_path1 = self.make_one_named_test_directory(dir_name1, depth, width)
dir_name2 = dir_name1
while dir_name2 == dir_name1:
dir_name2 = self.rng.next_file_name(MAX_NAME_LEN)
dir_path2 = self.make_one_named_test_directory(dir_name2, depth, width)
return (dir_name1, dir_path1, dir_name2, dir_path2)
# unit tests ----------------------------------------------------
def test_pathless_unbound(self):
""" Test the constructor using various hash types. """
for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:
self.do_test_pathless_unbound(hashtype)
def do_test_pathless_unbound(self, hashtype):
"""
Test constructor using two directories and a specific hash type.
"""
(dir_name1, dir_name2) = self.get_two_unique_directory_names()
check_hashtype(hashtype)
tree1 = NLHTree(dir_name1, hashtype)
self.assertEqual(dir_name1, tree1.name)
self.assertEqual(tree1.hashtype, hashtype)
tree2 = NLHTree(dir_name2, hashtype)
self.assertEqual(dir_name2, tree2.name)
self.assertEqual(tree2.hashtype, hashtype)
self.assertTrue(tree1 == tree1)
self.assertFalse(tree1 == tree2)
self.assertFalse(tree1 is None)
tree1c = tree1.clone()
self.assertEqual(tree1c, tree1)
def test_bound_flat_dirs(self):
"""
Test directory is single level, with four data files, using
various hash types.
"""
for hashtype in HashTypes:
self.do_test_bound_flat_dirs(hashtype)
def do_test_bound_flat_dirs(self, hashtype):
"""
Test directory is single level, with four data files, using
specific hash type.
"""
(dir_name1, dir_path1, dir_name2, dir_path2) =\
self.make_two_test_directories(ONE, FOUR)
tree1 = NLHTree.create_from_file_system(dir_path1, hashtype)
self.assertEqual(dir_name1, tree1.name, True)
nodes1 = tree1.nodes
self.assertTrue(nodes1 is not None)
self.assertEqual(FOUR, len(nodes1))
tree2 = NLHTree.create_from_file_system(dir_path2, hashtype)
self.assertEqual(dir_name2, tree2.name)
nodes2 = tree2.nodes
self.assertTrue(nodes2 is not None)
self.assertEqual(FOUR, len(nodes2))
self.assertEqual(tree1, tree1)
self.assertFalse(tree1 == tree2)
self.assertFalse(tree1 is None)
tree1c = tree1.clone()
self.assertEqual(tree1c, tree1)
def test_bound_needle_dirs1(self):
"""
Test directories four deep with one data file at the lowest level
using various hash types.
"""
for hashtype in HashTypes:
self.do_test_bound_needle_dirs(hashtype)
def do_test_bound_needle_dirs(self, hashtype):
"""
Test directories four deep with one data file at the lowest level
using specific hash type.
"""
(dir_name1, dir_path1, dir_name2, dir_path2) =\
self.make_two_test_directories(FOUR, ONE)
tree1 = NLHTree.create_from_file_system(dir_path1, hashtype)
self.assertEqual(dir_name1, tree1.name)
nodes1 = tree1.nodes
self.assertTrue(nodes1 is not None)
self.assertEqual(ONE, len(nodes1))
tree2 = NLHTree.create_from_file_system(dir_path2, hashtype)
self.assertEqual(dir_name2, tree2.name)
nodes2 = tree2.nodes
self.assertTrue(nodes2 is not None)
self.assertEqual(ONE, len(nodes2))
self.assertTrue(tree1 == tree1)
self.assertFalse(tree1 == tree2)
tree1c = tree1.clone()
self.assertEqual(tree1c, tree1)
if __name__ == '__main__':
unittest.main()
```
#### File: nlhtree_py/tests/test_walker.py
```python
import unittest
# import hashlib
from xlattice import HashTypes, check_hashtype
from nlhtree import NLHTree, NLHLeaf
# if sys.version_info < (3, 6):
# # pylint: disable=unused-import
# import sha3 # monkey-patches hashlib
EXAMPLE1 = """dataDir
data1 34463aa26c4d7214a96e6e42c3a9e8f55727c695
data2 14193743b265973e5824ca5257eef488094e19e9
subDir1
data11 58089ce970b65940dd5bf07703cd81b4306cb8f0
data12 da39a3ee5e6b4b0d3255bfef95601890afd80709
subDir2
subDir3
data31 487607ec22ee1255cc31c35506c64b1819a48090
subDir4
subDir41
subDir411
data31 0b57d3ab229a69ce5f7fad62f9fe654fe96c51bb
"""
EXAMPLE2 = """dataDir
data1 023d6598659f6a6b044ee909f3f3e6c4343850a1c5c71ef3f873c8e46b68e898
data2 29223e6e7c63529feaa441773097b68951fe8652830098b3c5c2df72fd5b7821
subDir1
data11 9394e20adb8adf9727ee6d12377aa57230eb548eb2c718d117c2e9c3aecf0e33
data12 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
subDir2
subDir3
data31 9adc17b1d861fae64ddbc792fafb097c55d316a585359b6356af8fa8992aefac
subDir4
subDir41
subDir411
data31 4308da851a73798454e22ee6d71a4d0732b9fd1ab10e607da53bf8c88ad7d44b
"""
EXAMPLE3 = """dataDir
data1 adf6c7f792e8198631aacbbc8cee51181176f4c157d578ee226040d70f552db1
data2 c6e5bfc9f7189ef6276d0bf25f05c12c0e1dcdf10e1ac69f62a0642e9d7dfcc5
subDir1
data11 03ef2f36e12e9afaaabb71fe84c6db3a225714bfa0bd58440727932e23174886
data12 a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a
subDir2
subDir3
data31 9400dfa37b52665f2056c93071a851a5e4c3c2c9245d39c640d9de796fa3d530
subDir4
subDir41
subDir411
data31 360ba73957c140fc28b8d6a8b7033cd2f896158fc8988fc68bb4877e4e13a048
"""
EXAMPLE4 = """dataDir
data1 ebd4b136a9332be9f67ac4e92e1f6113b97407ed585c39257fccb26789bf0274
data2 039e6f4892eed350302fea6a38ed549ca30978a99d2e13060a07a489c4e20173
subDir1
data11 1cf4912d2eb4b7dc34e1d61293c00bf33ad42c4cd6742249432ea042cf0a08d5
data12 0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8
subDir2
subDir3
data31 00df8c11fde7c8e90b09bff1b1ae288dd83da2b272bf211b8d40a0bf7986e109
subDir4
subDir41
subDir411
data31 887aec1d8aebf76f56a3595cb33f25171e470584dbd7911d782da8744645889d
"""
class TestWalker(unittest.TestCase):
"""
Walk the example data structures (directory tree, content-keyed store,
and serialized NLHTree) verifying consistency.
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_spot_check_tree(self):
"""
Run spot checks on the example files for the supported range
of hash types.
"""
for hashtype in HashTypes:
self.do_test_spot_check_tree(hashtype)
def do_test_spot_check_tree(self, hashtype):
"""
Run spot checks on the example files for the specified hash type.
"""
check_hashtype(hashtype)
# DEBUG
# print("\nSPOT CHECKS")
# END
if hashtype == HashTypes.SHA1:
rel_path_to_data = 'example1/dataDir'
else:
rel_path_to_data = 'example2/dataDir'
tree = NLHTree.create_from_file_system(rel_path_to_data, hashtype)
self.assertIsNotNone(tree)
self.assertEqual(len(tree.nodes), 6)
self.assertEqual(tree.name, 'dataDir')
node0 = tree.nodes[0]
self.assertTrue(isinstance(node0, NLHLeaf))
self.assertEqual(node0.name, 'data1')
node1 = tree.nodes[1]
self.assertTrue(isinstance(node1, NLHLeaf))
self.assertEqual(node1.name, 'data2')
node2 = tree.nodes[2]
self.assertFalse(isinstance(node2, NLHLeaf))
self.assertEqual(node2.name, 'subDir1')
self.assertEqual(len(node2.nodes), 2)
node5 = tree.nodes[5]
self.assertFalse(isinstance(node5, NLHLeaf))
self.assertEqual(node5.name, 'subDir4')
self.assertEqual(len(node5.nodes), 1)
node50 = node5.nodes[0]
self.assertFalse(isinstance(node50, NLHLeaf))
self.assertEqual(node50.name, 'subDir41')
self.assertEqual(len(node50.nodes), 1)
node500 = node50.nodes[0]
self.assertFalse(isinstance(node500, NLHLeaf))
self.assertEqual(node500.name, 'subDir411')
self.assertEqual(len(node500.nodes), 1)
node5000 = node500.nodes[0]
self.assertTrue(isinstance(node5000, NLHLeaf))
self.assertEqual(node5000.name, 'data31')
def test_walkers(self):
""" Run the walker for a number of hash types. """
for hashtype in HashTypes:
self.do_test_walkers(hashtype)
def do_test_walkers(self, hashtype):
"""
Run the walker for a specific hash type.
"""
# DEBUG
# print("\ndo_test_walkers, %s" % hashtype)
# END
check_hashtype(hashtype)
if hashtype == HashTypes.SHA1:
rel_path_to_data = 'example1/dataDir'
rel_path_to_nlh = 'example1/example.nlh'
example = EXAMPLE1
elif hashtype == HashTypes.SHA2:
rel_path_to_data = 'example2/dataDir'
rel_path_to_nlh = 'example2/example.nlh'
example = EXAMPLE2
elif hashtype == HashTypes.SHA3:
rel_path_to_data = 'example3/dataDir'
rel_path_to_nlh = 'example3/example.nlh'
example = EXAMPLE3
elif hashtype == HashTypes.BLAKE2B:
rel_path_to_data = 'example4/dataDir'
rel_path_to_nlh = 'example4/example.nlh'
example = EXAMPLE4
else:
raise NotImplementedError
tree = NLHTree.create_from_file_system(rel_path_to_data, hashtype)
self.assertIsNotNone(tree)
string = tree.__str__()
self.assertEqual(example, string) # the serialized NLHTree
# The serialized NLHTree, the string s, is identical to the example1/2
# serialization above. So we should be able to walk example1/2,
# walk the disk file, and walk the in-memory object tree and get
# the same result.
from_disk = []
from_strings = []
from_str = []
from_obj = []
# -- walk on-disk representation ----------------------------
# DEBUG
# print("\nWALK FILE ON DISK")
# sys.stdout.flush()
# END
# a couple is a 2-tuple
for couple in NLHTree.walk_file(rel_path_to_nlh, hashtype):
if len(couple) == 1:
# print(" DIR: %s" % couple[0]) # DEBUG
from_disk.append(couple)
elif len(couple) == 2:
# print(' FILE: %s %s' % (couple[0], couple[1])) # DEBUG
from_disk.append(couple)
else:
print(' unexpected couple of length %d' % len(couple))
# -- walk list-of-strings representation -------------------
lines = example.split('\n')
if lines[-1] == '':
lines = lines[:-1] # drop last line if blank
# DEBUG
# print("\nWALK LIST OF STRINGS; %s; there are %d lines" % (
# hashtype, len(lines)))
# sys.stdout.flush()
# END
for couple in NLHTree.walk_strings(lines, hashtype):
if len(couple) == 1:
# print(" DIR: %s" % couple[0]) # DEBUG
from_strings.append(couple)
elif len(couple) == 2:
# print(' FILE: %s %s' % (couple[0], couple[1])) # DEBUG
from_strings.append(couple)
else:
print(' unexpected couple of length %d' % len(couple))
# -- walk string representation -----------------------------
# DEBUG
# print("\nWALK STRING")
# sys.stdout.flush()
# END
for couple in NLHTree.walk_string(example, hashtype):
if len(couple) == 1:
# print(" DIR: %s" % couple[0]) # DEBUG
from_str.append(couple)
elif len(couple) == 2:
# print(' FILE: %s %s' % (couple[0], couple[1])) # DEBUG
from_str.append(couple)
else:
print(' unexpected couple of length %d' % len(couple))
# -- walk NLHTree object ------------------------------------
# DEBUG
# print("\nWALK OBJECT")
# sys.stdout.flush()
# hasattr(tree, '__iter__')
# hasattr(tree, '__next__')
# END
for couple in tree:
if len(couple) == 1:
# print(" DIR: %s" % couple[0]) # DEBUG
from_obj.append(couple)
elif len(couple) == 2:
# print(' FILE: %s %s' % (couple[0], couple[1])) # DEBUG
from_obj.append(couple)
else:
print(' unexpected couple of length %d' % len(couple))
# -- verify the lists are identical -------------------------
# DEBUG
# print("\nIDENTITY CHECKS %s" % hashtype)
# sys.stdout.flush()
# END
def compare_lists(a_list, b_list):
""" Verify that two lists of tuples are the same. """
self.assertEqual(len(a_list), len(b_list))
for ndx, a_val in enumerate(a_list):
self.assertEqual(a_val, b_list[ndx])
# DEBUG
# # print("FROM_DISK:")
# for i in from_disk:
# if len(i) == 1:
# print(" %s" % (i[0]))
# else:
# print(" %s %s" % (i[0], i[1]))
# print("FROM_SS:")
# for i in from_strings:
# if len(i) == 1:
# print(" %s" % (i[0]))
# else:
# print(" %s %s" % (i[0], i[1]))
# END
compare_lists(from_disk, from_strings)
# DEBUG
# print("\ncomparing from_disk, from_str")
# END
compare_lists(from_disk, from_str)
# DEBUG
# print("\ncomparing from_disk, from_obj")
# END
compare_lists(from_disk, from_obj)
# -- verify that the operations are reversible, that you can
# recover the dataDir from the listings ---------------------
# XXX NOT YET IMPLEMENTED XXX
if __name__ == '__main__':
unittest.main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.