prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
##################################################################
# Radio Map Construction with Regression Kriging
# Written by <NAME>, Ph.D.
# Requirements:
# - Python 3.x
# - numpy
# - scipy
# - matplotlib
##################################################################
# The MIT License (MIT)
#
# Copyright (c) 2021 <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.optimize import minimize
''' sampling via multivariate normal distribution with no trend '''
"""
cov: variance-covariance matrix
"""
def gen_multivariate_normal(cov):
# print("--gen_multivariate_normal()--")
L = np.linalg.cholesky(cov)
z = np.random.standard_normal(len(cov))
return np.dot(L, z)
'''variance-covariance matrix for log-normal shadowing'''
"""
x, y: vector for measurement location
dcor: correlation distance of shadowing [m]
sigma: standard deviation [dB]
"""
def gen_varcov_matrix(x, y, dcor, sigma):
# print("--gen_varcov_matrix()--")
dmat = distance(x, y, x[:, np.newaxis], y[:, np.newaxis]) # distance matrix
tmp = 0.6931471805599453 / dcor # np.log(2.0)/dcor
return sigma * sigma * np.exp(-dmat * tmp)
'''for measurement location'''
"""
n_node: number of nodes
len_area: area length [m]
"""
def gen_location_vector(n_node, len_area):
# print("--gen_location_vector()--")
x = np.random.uniform(0.0, len_area, n_node)
y = np.random.uniform(0.0, len_area, n_node)
return x, y
def gen_location_vector_on_circle(n_node, r):
x = np.zeros(n_node)
y = np.zeros(n_node)
for i in range(n_node):
while True:
x_tmp = np.random.uniform(0.0, 2.0 * r)
y_tmp = np.random.uniform(0.0, 2.0 * r)
d = np.sqrt((x_tmp - r)**2 + (y_tmp - r)**2)
if d <= r:
break
x[i] = x_tmp
y[i] = y_tmp
return x, y
''' gen empirical semivariogram via binning '''
def gen_emprical_semivar(data, d_max, num):
def gen_combinations(arr):
r, c = np.triu_indices(len(arr), 1)
return np.stack((arr[r], arr[c]), 1)
# print("--gen_emprical_semivar()--")
d_semivar = np.linspace(0.0, d_max, num)
SPAN = d_semivar[1] - d_semivar[0]
indx = gen_combinations(np.arange(len(data)))
'''gen semivariogram clouds'''
d = distance(data[indx[:, 0], 0], data[indx[:, 0], 1], data[indx[:, 1], 0], data[indx[:, 1], 1])
indx = indx[d<=d_max]
d = d[d <= d_max]
semivar = (data[indx[:, 0], 2] - data[indx[:, 1], 2])**2
'''average semivariogram clouds via binning'''
semivar_avg = np.empty(num)
for i in range(num):
d1 = d_semivar[i] - 0.5*SPAN
d2 = d_semivar[i] + 0.5*SPAN
indx_tmp = (d1 < d) * (d <= d2) #index within calculation span
semivar_tr = semivar[indx_tmp]
# semivar_avg[i] = semivar_tr.mean()
if len(semivar_tr)>0:
semivar_avg[i] = semivar_tr.mean()
else:
semivar_avg[i] = np.nan
return d_semivar[np.isnan(semivar_avg) == False], 0.5 * semivar_avg[np.isnan(semivar_avg) == False]
'''theoretical semivariogram (exponential)'''
def semivar_exp(d, nug, sill, ran):
return np.abs(nug) +
|
np.abs(sill)
|
numpy.abs
|
import collections
import numpy as np
import time
import datetime
import os
import networkx as nx
import pytz
import cloudvolume
import pandas as pd
from multiwrapper import multiprocessing_utils as mu
from . import mincut
from google.api_core.retry import Retry, if_exception_type
from google.api_core.exceptions import Aborted, DeadlineExceeded, \
ServiceUnavailable
from google.auth import credentials
from google.cloud import bigtable
from google.cloud.bigtable.row_filters import TimestampRange, \
TimestampRangeFilter, ColumnRangeFilter, ValueRangeFilter, RowFilterChain, \
ColumnQualifierRegexFilter, RowFilterUnion, ConditionalRowFilter, \
PassAllFilter, BlockAllFilter, RowFilter
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
# global variables
HOME = os.path.expanduser("~")
N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max))
LOCK_EXPIRED_TIME_DELTA = datetime.timedelta(minutes=2, seconds=00)
UTC = pytz.UTC
# Setting environment wide credential path
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = \
HOME + "/.cloudvolume/secrets/google-secret.json"
def compute_indices_pandas(data) -> pd.Series:
""" Computes indices of all unique entries
Make sure to remap your array to a dense range starting at zero
https://stackoverflow.com/questions/33281957/faster-alternative-to-numpy-where
:param data: np.ndarray
:return: pandas dataframe
"""
d = data.ravel()
f = lambda x: np.unravel_index(x.index, data.shape)
return pd.Series(d).groupby(d).apply(f)
def log_n(arr, n):
""" Computes log to base n
:param arr: array or float
:param n: int
base
:return: return log_n(arr)
"""
if n == 2:
return np.log2(arr)
elif n == 10:
return np.log10(arr)
else:
return np.log(arr) / np.log(n)
def pad_node_id(node_id: np.uint64) -> str:
""" Pad node id to 20 digits
:param node_id: int
:return: str
"""
return "%.20d" % node_id
def serialize_uint64(node_id: np.uint64) -> bytes:
""" Serializes an id to be ingested by a bigtable table row
:param node_id: int
:return: str
"""
return serialize_key(pad_node_id(node_id)) # type: ignore
def deserialize_uint64(node_id: bytes) -> np.uint64:
""" De-serializes a node id from a BigTable row
:param node_id: bytes
:return: np.uint64
"""
return np.uint64(node_id.decode()) # type: ignore
def serialize_key(key: str) -> bytes:
""" Serializes a key to be ingested by a bigtable table row
:param key: str
:return: bytes
"""
return key.encode("utf-8")
def deserialize_key(key: bytes) -> str:
""" Deserializes a row key
:param key: bytes
:return: str
"""
return key.decode()
def row_to_byte_dict(row: bigtable.row.Row, f_id: str = None, idx: int = None
) -> Dict[int, Dict]:
""" Reads row entries to a dictionary
:param row: row
:param f_id: str
:param idx: int
:return: dict
"""
row_dict = {}
for fam_id in row.cells.keys():
row_dict[fam_id] = {}
for row_k in row.cells[fam_id].keys():
if idx is None:
row_dict[fam_id][deserialize_key(row_k)] = \
[c.value for c in row.cells[fam_id][row_k]]
else:
row_dict[fam_id][deserialize_key(row_k)] = \
row.cells[fam_id][row_k][idx].value
if f_id is not None and f_id in row_dict:
return row_dict[f_id]
elif f_id is None:
return row_dict
else:
raise Exception("Family id not found")
def compute_bitmasks(n_layers: int, fan_out: int) -> Dict[int, int]:
"""
:param n_layers: int
:return: dict
layer -> bits for layer id
"""
bitmask_dict = {}
for i_layer in range(n_layers, 0, -1):
if i_layer == 1:
# Lock this layer to an 8 bit layout to maintain compatibility with
# the exported segmentation
# n_bits_for_layers = np.ceil(log_n(fan_out**(n_layers - 2), fan_out))
n_bits_for_layers = 8
else:
n_bits_for_layers = max(1,
np.ceil(log_n(fan_out**(n_layers - i_layer),
fan_out)))
# n_bits_for_layers = fan_out ** int(np.ceil(log_n(n_bits_for_layers, fan_out)))
n_bits_for_layers = int(n_bits_for_layers)
assert n_bits_for_layers <= 8
bitmask_dict[i_layer] = n_bits_for_layers
return bitmask_dict
class ChunkedGraph(object):
def __init__(self,
table_id: str,
instance_id: str = "pychunkedgraph",
project_id: str = "neuromancer-seung-import",
chunk_size: Tuple[int, int, int] = None,
fan_out: Optional[int] = None,
n_layers: Optional[int] = None,
credentials: Optional[credentials.Credentials] = None,
client: bigtable.Client = None,
cv_path: str = None,
is_new: bool = False) -> None:
if client is not None:
self._client = client
else:
self._client = bigtable.Client(project=project_id, admin=True,
credentials=credentials)
self._instance = self.client.instance(instance_id)
self._table_id = table_id
self._table = self.instance.table(self.table_id)
if is_new:
self.check_and_create_table()
self._n_layers = self.check_and_write_table_parameters("n_layers",
n_layers)
self._fan_out = self.check_and_write_table_parameters("fan_out",
fan_out)
self._cv_path = self.check_and_write_table_parameters("cv_path",
cv_path)
self._chunk_size = self.check_and_write_table_parameters("chunk_size",
chunk_size)
self._bitmasks = compute_bitmasks(self.n_layers, self.fan_out)
self._cv = None
# Hardcoded parameters
self._n_bits_for_layer_id = 8
self._cv_mip = 3
@property
def client(self) -> bigtable.Client:
return self._client
@property
def instance(self) -> bigtable.instance.Instance:
return self._instance
@property
def table(self) -> bigtable.table.Table:
return self._table
@property
def table_id(self) -> str:
return self._table_id
@property
def instance_id(self):
return self.instance.instance_id
@property
def project_id(self):
return self.client.project
@property
def family_id(self) -> str:
return "0"
@property
def incrementer_family_id(self) -> str:
return "1"
@property
def log_family_id(self) -> str:
return "2"
@property
def cross_edge_family_id(self) -> str:
return "3"
@property
def fan_out(self) -> int:
return self._fan_out
@property
def chunk_size(self) -> np.ndarray:
return self._chunk_size
@property
def n_layers(self) -> int:
return self._n_layers
@property
def bitmasks(self) -> Dict[int, int]:
return self._bitmasks
@property
def cv_path(self) -> str:
return self._cv_path
@property
def cv_mip(self) -> int:
return self._cv_mip
@property
def cv(self) -> cloudvolume.CloudVolume:
if self._cv is None:
self._cv = cloudvolume.CloudVolume(self.cv_path, mip=self._cv_mip)
return self._cv
@property
def root_chunk_id(self):
return self.get_chunk_id(layer=int(self.n_layers), x=0, y=0, z=0)
def check_and_create_table(self) -> None:
""" Checks if table exists and creates new one if necessary """
table_ids = [t.table_id for t in self.instance.list_tables()]
if not self.table_id in table_ids:
self.table.create()
f = self.table.column_family(self.family_id)
f.create()
f_inc = self.table.column_family(self.incrementer_family_id,
gc_rule=MaxVersionsGCRule(1))
f_inc.create()
f_log = self.table.column_family(self.log_family_id)
f_log.create()
f_ce = self.table.column_family(self.cross_edge_family_id,
gc_rule=MaxVersionsGCRule(1))
f_ce.create()
print("Table created")
def check_and_write_table_parameters(self, param_key: str,
value: Optional[np.uint64] = None
) -> np.uint64:
""" Checks if a parameter already exists in the table. If it already
exists it returns the stored value, else it stores the given value. It
raises an exception if no value is passed and the parameter does not
exist, yet.
:param param_key: str
:param value: np.uint64
:return: np.uint64
value
"""
ser_param_key = serialize_key(param_key)
row = self.table.read_row(serialize_key("params"))
if row is None or ser_param_key not in row.cells[self.family_id]:
assert value is not None
if param_key in ["fan_out", "n_layers"]:
val_dict = {param_key: np.array(value,
dtype=np.uint64).tobytes()}
elif param_key in ["cv_path"]:
val_dict = {param_key: serialize_key(value)}
elif param_key in ["chunk_size"]:
val_dict = {param_key: np.array(value,
dtype=np.uint64).tobytes()}
else:
raise Exception("Unknown type for parameter")
row = self.mutate_row(serialize_key("params"), self.family_id,
val_dict)
self.bulk_write([row])
else:
value = row.cells[self.family_id][ser_param_key][0].value
if param_key in ["fan_out", "n_layers"]:
value = np.frombuffer(value, dtype=np.uint64)[0]
elif param_key in ["cv_path"]:
value = deserialize_key(value)
elif param_key in ["chunk_size"]:
value = np.frombuffer(value, dtype=np.uint64)
else:
raise Exception("Unknown key")
return value
def get_serialized_info(self):
""" Rerturns dictionary that can be used to load this ChunkedGraph
:return: dict
"""
info = {"table_id": self.table_id,
"instance_id": self.instance_id,
"project_id": self.project_id}
try:
info["credentials"] = self.client.credentials
except:
info["credentials"] = self.client._credentials
return info
def get_chunk_layer(self, node_or_chunk_id: np.uint64) -> int:
""" Extract Layer from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: int
"""
return int(node_or_chunk_id) >> 64 - self._n_bits_for_layer_id
def get_chunk_coordinates(self, node_or_chunk_id: np.uint64
) -> np.ndarray:
""" Extract X, Y and Z coordinate from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: Tuple(int, int, int)
"""
layer = self.get_chunk_layer(node_or_chunk_id)
bits_per_dim = self.bitmasks[layer]
x_offset = 64 - self._n_bits_for_layer_id - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
x = int(node_or_chunk_id) >> x_offset & 2 ** bits_per_dim - 1
y = int(node_or_chunk_id) >> y_offset & 2 ** bits_per_dim - 1
z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1
return np.array([x, y, z])
def get_chunk_id(self, node_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None) -> np.uint64:
""" (1) Extract Chunk ID from Node ID
(2) Build Chunk ID from Layer, X, Y and Z components
:param node_id: np.uint64
:param layer: int
:param x: int
:param y: int
:param z: int
:return: np.uint64
"""
assert node_id is not None or \
all(v is not None for v in [layer, x, y, z])
if node_id is not None:
layer = self.get_chunk_layer(node_id)
bits_per_dim = self.bitmasks[layer]
if node_id is not None:
chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim
return np.uint64((int(node_id) >> chunk_offset) << chunk_offset)
else:
if not(x < 2 ** bits_per_dim and
y < 2 ** bits_per_dim and
z < 2 ** bits_per_dim):
raise Exception("Chunk coordinate is out of range for"
"this graph on layer %d with %d bits/dim."
"[%d, %d, %d]; max = %d."
% (layer, bits_per_dim, x, y, z,
2 ** bits_per_dim))
layer_offset = 64 - self._n_bits_for_layer_id
x_offset = layer_offset - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
return np.uint64(layer << layer_offset | x << x_offset |
y << y_offset | z << z_offset)
def get_chunk_ids_from_node_ids(self, node_ids: Iterable[np.uint64]
) -> np.ndarray:
""" Extract a list of Chunk IDs from a list of Node IDs
:param node_ids: np.ndarray(dtype=np.uint64)
:return: np.ndarray(dtype=np.uint64)
"""
# TODO: measure and improve performance(?)
return np.array(list(map(lambda x: self.get_chunk_id(node_id=x),
node_ids)), dtype=np.uint64)
def get_segment_id_limit(self, node_or_chunk_id: np.uint64) -> np.uint64:
""" Get maximum possible Segment ID for given Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: np.uint64
"""
layer = self.get_chunk_layer(node_or_chunk_id)
bits_per_dim = self.bitmasks[layer]
chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim
return np.uint64(2 ** chunk_offset - 1)
def get_segment_id(self, node_id: np.uint64) -> np.uint64:
""" Extract Segment ID from Node ID
:param node_id: np.uint64
:return: np.uint64
"""
return node_id & self.get_segment_id_limit(node_id)
def get_node_id(self, segment_id: np.uint64,
chunk_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None) -> np.uint64:
""" (1) Build Node ID from Segment ID and Chunk ID
(2) Build Node ID from Segment ID, Layer, X, Y and Z components
:param segment_id: np.uint64
:param chunk_id: np.uint64
:param layer: int
:param x: int
:param y: int
:param z: int
:return: np.uint64
"""
if chunk_id is not None:
return chunk_id | segment_id
else:
return self.get_chunk_id(layer=layer, x=x, y=y, z=z) | segment_id
def get_unique_segment_id_range(self, chunk_id: np.uint64, step: int = 1
) -> np.ndarray:
""" Return unique Segment ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i" followed by the chunk id
row_key = serialize_key("i%s" % pad_node_id(chunk_id))
append_row = self.table.row(row_key, append=True)
append_row.increment_cell_value(self.incrementer_family_id,
counter_key, step)
# This increments the row entry and returns the value AFTER incrementing
latest_row = append_row.commit()
max_segment_id_b = latest_row[self.incrementer_family_id][counter_key][0][0]
max_segment_id = int.from_bytes(max_segment_id_b, byteorder="big")
min_segment_id = max_segment_id + 1 - step
segment_id_range = np.array(range(min_segment_id, max_segment_id + 1),
dtype=np.uint64)
return segment_id_range
def get_unique_segment_id(self, chunk_id: np.uint64) -> np.uint64:
""" Return unique Segment ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
return self.get_unique_segment_id_range(chunk_id=chunk_id, step=1)[0]
def get_unique_node_id_range(self, chunk_id: np.uint64, step: int = 1
) -> np.ndarray:
""" Return unique Node ID range for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
segment_ids = self.get_unique_segment_id_range(chunk_id=chunk_id,
step=step)
node_ids = np.array([self.get_node_id(segment_id, chunk_id)
for segment_id in segment_ids], dtype=np.uint64)
return node_ids
def get_unique_node_id(self, chunk_id: np.uint64) -> np.uint64:
""" Return unique Node ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:return: np.uint64
"""
return self.get_unique_node_id_range(chunk_id=chunk_id, step=1)[0]
def get_max_node_id(self, chunk_id: np.uint64) -> np.uint64:
""" Gets maximal node id in a chunk based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: uint64
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i"
row_key = serialize_key("i%s" % pad_node_id(chunk_id))
row = self.table.read_row(row_key)
# Read incrementer value
if row is not None:
max_node_id_b = row.cells[self.incrementer_family_id][counter_key][0].value
max_node_id = int.from_bytes(max_node_id_b, byteorder="big")
else:
max_node_id = 0
return np.uint64(max_node_id)
def get_unique_operation_id(self) -> np.uint64:
""" Finds a unique operation id
atomic counter
Operations essentially live in layer 0. Even if segmentation ids might
live in layer 0 one day, they would not collide with the operation ids
because we write information belonging to operations in a separate
family id.
:return: str
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i"
row_key = serialize_key("ioperations")
append_row = self.table.row(row_key, append=True)
append_row.increment_cell_value(self.incrementer_family_id,
counter_key, 1)
# This increments the row entry and returns the value AFTER incrementing
latest_row = append_row.commit()
operation_id_b = latest_row[self.incrementer_family_id][counter_key][0][0]
operation_id = int.from_bytes(operation_id_b, byteorder="big")
return np.uint64(operation_id)
def get_max_operation_id(self) -> np.uint64:
""" Gets maximal operation id based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: uint64
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i"
row_key = serialize_key("ioperations")
row = self.table.read_row(row_key)
# Read incrementer value
if row is not None:
max_operation_id_b = row.cells[self.incrementer_family_id][counter_key][0].value
max_operation_id = int.from_bytes(max_operation_id_b,
byteorder="big")
else:
max_operation_id = 0
return np.uint64(max_operation_id)
def get_cross_chunk_edges_layer(self, cross_edges):
if len(cross_edges) == 0:
return np.array([], dtype=np.int)
cross_chunk_edge_layers = np.ones(len(cross_edges), dtype=np.int) * 2
cross_edge_coordinates = []
for cross_edge in cross_edges:
cross_edge_coordinates.append(
[self.get_chunk_coordinates(cross_edge[0]),
self.get_chunk_coordinates(cross_edge[1])])
cross_edge_coordinates = np.array(cross_edge_coordinates, dtype=np.int)
for layer in range(3, self.n_layers):
cross_edge_coordinates = cross_edge_coordinates // self.fan_out
edge_diff = np.sum(np.abs(cross_edge_coordinates[:, 0] -
cross_edge_coordinates[:, 1]), axis=1)
cross_chunk_edge_layers[edge_diff > 0] += 1
return cross_chunk_edge_layers
def get_cross_chunk_edge_dict(self, cross_edges):
cce_layers = self.get_cross_chunk_edges_layer(cross_edges)
u_cce_layers = np.unique(cce_layers)
cross_edge_dict = {}
for l in range(2, self.n_layers):
cross_edge_dict[l] = \
np.array([], dtype=np.uint64).reshape(-1, 2)
val_dict = {}
for cc_layer in u_cce_layers:
layer_cross_edges = cross_edges[cce_layers == cc_layer]
if len(layer_cross_edges) > 0:
val_dict["atomic_cross_edges_%d" % cc_layer] = \
layer_cross_edges.tobytes()
cross_edge_dict[cc_layer] = layer_cross_edges
return cross_edge_dict
def read_row(self, node_id: np.uint64, key: str, idx: int = 0,
dtype: type = np.uint64, get_time_stamp: bool = False,
fam_id: str = None) -> Any:
""" Reads row from BigTable and takes care of serializations
:param node_id: uint64
:param key: table column
:param idx: column list index
:param dtype: np.dtype
:param get_time_stamp: bool
:param fam_id: str
:return: row entry
"""
key = serialize_key(key)
if fam_id is None:
fam_id = self.family_id
row = self.table.read_row(serialize_uint64(node_id),
filter_=ColumnQualifierRegexFilter(key))
if row is None or key not in row.cells[fam_id]:
if get_time_stamp:
return None, None
else:
return None
cell_entries = row.cells[fam_id][key]
if dtype is None:
cell_value = cell_entries[idx].value
else:
cell_value = np.frombuffer(cell_entries[idx].value, dtype=dtype)
if get_time_stamp:
return cell_value, cell_entries[idx].timestamp
else:
return cell_value
def mutate_row(self, row_key: bytes, column_family_id: str, val_dict: dict,
time_stamp: Optional[datetime.datetime] = None
) -> bigtable.row.Row:
""" Mutates a single row
:param row_key: serialized bigtable row key
:param column_family_id: str
serialized column family id
:param val_dict: dict
:param time_stamp: None or datetime
:return: list
"""
row = self.table.row(row_key)
for column, value in val_dict.items():
row.set_cell(column_family_id=column_family_id, column=column,
value=value, timestamp=time_stamp)
return row
def bulk_write(self, rows: Iterable[bigtable.row.DirectRow],
root_ids: Optional[Union[np.uint64,
Iterable[np.uint64]]] = None,
operation_id: Optional[np.uint64] = None,
slow_retry: bool = True,
block_size: int = 2000) -> bool:
""" Writes a list of mutated rows in bulk
WARNING: If <rows> contains the same row (same row_key) and column
key two times only the last one is effectively written to the BigTable
(even when the mutations were applied to different columns)
--> no versioning!
:param rows: list
list of mutated rows
:param root_ids: list if uint64
:param operation_id: uint64 or None
operation_id (or other unique id) that *was* used to lock the root
the bulk write is only executed if the root is still locked with
the same id.
:param slow_retry: bool
:param block_size: int
"""
if slow_retry:
initial = 5
else:
initial = 1
retry_policy = Retry(
predicate=if_exception_type((Aborted,
DeadlineExceeded,
ServiceUnavailable)),
initial=initial,
maximum=15.0,
multiplier=2.0,
deadline=LOCK_EXPIRED_TIME_DELTA.seconds)
if root_ids is not None and operation_id is not None:
if isinstance(root_ids, int):
root_ids = [root_ids]
if not self.check_and_renew_root_locks(root_ids, operation_id):
return False
for i_row in range(0, len(rows), block_size):
status = self.table.mutate_rows(rows[i_row: i_row + block_size],
retry=retry_policy)
if not all(status):
raise Exception(status)
return True
def _range_read_execution(self, start_id, end_id,
row_filter: RowFilter = None,
n_retries: int = 100):
""" Executes predefined range read (read_rows)
:param start_id: np.uint64
:param end_id: np.uint64
:param row_filter: BigTable RowFilter
:param n_retries: int
:return: dict
"""
# Set up read
range_read = self.table.read_rows(
start_key=serialize_uint64(start_id),
end_key=serialize_uint64(end_id),
# allow_row_interleaving=True,
end_inclusive=True,
filter_=row_filter)
range_read.consume_all()
# Execute read
consume_success = False
# Retry reading if any of the writes failed
i_tries = 0
while not consume_success and i_tries < n_retries:
try:
range_read.consume_all()
consume_success = True
except:
time.sleep(i_tries)
i_tries += 1
if not consume_success:
raise Exception("Unable to consume range read: "
"%d - %d -- n_retries = %d" %
(start_id, end_id, n_retries))
return range_read.rows
def range_read(self, start_id: np.uint64, end_id: np.uint64,
n_retries: int = 100, max_block_size: int = 50000,
row_keys: Optional[Iterable[str]] = None,
row_key_filters: Optional[Iterable[str]] = None,
time_stamp: datetime.datetime = datetime.datetime.max
) -> Union[
bigtable.row_data.PartialRowData,
Dict[bytes, bigtable.row_data.PartialRowData]]:
""" Reads all ids within a given range
:param start_id: np.uint64
:param end_id: np.uint64
:param n_retries: int
:param max_block_size: int
:param row_keys: list of str
more efficient read through row filters
:param row_key_filters: list of str
rows *with* this column will be ignored
:param time_stamp: datetime.datetime
:return: dict
"""
# Comply to resolution of BigTables TimeRange
time_stamp -= datetime.timedelta(
microseconds=time_stamp.microsecond % 1000)
# Create filters: time and id range
time_filter = TimestampRangeFilter(TimestampRange(end=time_stamp))
if row_keys is not None:
filters = []
for k in row_keys:
filters.append(ColumnQualifierRegexFilter(serialize_key(k)))
if len(filters) > 1:
row_filter = RowFilterUnion(filters)
else:
row_filter = filters[0]
else:
row_filter = None
if row_filter is None:
row_filter = time_filter
else:
row_filter = RowFilterChain([time_filter, row_filter])
if row_key_filters is not None:
for row_key in row_key_filters:
key_filter = ColumnRangeFilter(
column_family_id=self.family_id,
start_column=row_key,
end_column=row_key,
inclusive_start=True,
inclusive_end=True)
row_filter = ConditionalRowFilter(base_filter=key_filter,
false_filter=row_filter,
true_filter=BlockAllFilter(True))
max_block_size = np.uint64(max_block_size)
block_start_ids = range(start_id, end_id, max_block_size)
row_dict = {}
for block_start_id in block_start_ids:
block_end_id = np.uint64(block_start_id + max_block_size)
if block_end_id > end_id:
block_end_id = end_id
block_row_dict = self._range_read_execution(start_id=block_start_id,
end_id=block_end_id,
row_filter=row_filter,
n_retries=n_retries)
row_dict.update(block_row_dict)
return row_dict
def range_read_chunk(self, layer: int, x: int, y: int, z: int,
n_retries: int = 100, max_block_size: int = 1000000,
row_keys: Optional[Iterable[str]] = None,
row_key_filters: Optional[Iterable[str]] = None,
time_stamp: datetime.datetime = datetime.datetime.max,
) -> Union[
bigtable.row_data.PartialRowData,
Dict[bytes, bigtable.row_data.PartialRowData]]:
""" Reads all ids within a chunk
:param layer: int
:param x: int
:param y: int
:param z: int
:param n_retries: int
:param max_block_size: int
:param row_keys: list of str
more efficient read through row filters
:param row_key_filters: list of str
rows *with* this column will be ignored
:param time_stamp: datetime.datetime
:return: dict
"""
chunk_id = self.get_chunk_id(layer=layer, x=x, y=y, z=z)
if layer == 1:
max_segment_id = self.get_segment_id_limit(chunk_id)
max_block_size = max_segment_id + 1
else:
max_segment_id = self.get_max_node_id(chunk_id=chunk_id)
# Define BigTable keys
start_id = self.get_node_id(np.uint64(0), chunk_id=chunk_id)
end_id = self.get_node_id(max_segment_id, chunk_id=chunk_id)
try:
rr = self.range_read(start_id, end_id, n_retries=n_retries,
max_block_size=max_block_size,
row_keys=row_keys,
row_key_filters=row_key_filters,
time_stamp=time_stamp)
except:
raise Exception("Unable to consume range read: "
"[%d, %d, %d], l = %d, n_retries = %d" %
(x, y, z, layer, n_retries))
return rr
def range_read_operations(self,
time_start: datetime.datetime = datetime.datetime.min,
time_end: datetime.datetime = None,
start_id: np.uint64 = 0,
end_id: np.uint64 = None,
n_retries: int = 100,
row_keys: Optional[Iterable[str]] = None
) -> Dict[bytes, bigtable.row_data.PartialRowData]:
""" Reads all ids within a chunk
:param time_start: datetime
:param time_end: datetime
:param start_id: uint64
:param end_id: uint64
:param n_retries: int
:param row_keys: list of str
more efficient read through row filters
:return: list or yield of rows
"""
# Set defaults
if end_id is None:
end_id = self.get_max_operation_id()
if time_end is None:
time_end = datetime.datetime.utcnow()
if end_id < start_id:
return {}
# Comply to resolution of BigTables TimeRange
time_start -= datetime.timedelta(
microseconds=time_start.microsecond % 1000)
time_end -= datetime.timedelta(
microseconds=time_end.microsecond % 1000)
# Create filters: time and id range
time_filter = TimestampRangeFilter(TimestampRange(start=time_start,
end=time_end))
if row_keys is not None:
filters = []
for k in row_keys:
filters.append(ColumnQualifierRegexFilter(serialize_key(k)))
if len(filters) > 1:
row_filter = RowFilterUnion(filters)
else:
row_filter = filters[0]
else:
row_filter = None
if row_filter is None:
row_filter = time_filter
else:
row_filter = RowFilterChain([time_filter, row_filter])
# Set up read
range_read = self.table.read_rows(
start_key=serialize_uint64(start_id),
end_key=serialize_uint64(end_id),
end_inclusive=False,
filter_=row_filter)
range_read.consume_all()
# Execute read
consume_success = False
# Retry reading if any of the writes failed
i_tries = 0
while not consume_success and i_tries < n_retries:
try:
range_read.consume_all()
consume_success = True
except:
time.sleep(i_tries)
i_tries += 1
if not consume_success:
raise Exception("Unable to consume chunk range read: "
"n_retries = %d" % (n_retries))
return range_read.rows
def range_read_layer(self, layer_id: int):
""" Reads all ids within a layer
This can take a while depending on the size of the graph
:param layer_id: int
:return: list of rows
"""
raise NotImplementedError()
def test_if_nodes_are_in_same_chunk(self, node_ids: Sequence[np.uint64]
) -> bool:
""" Test whether two nodes are in the same chunk
:param node_ids: list of two ints
:return: bool
"""
assert len(node_ids) == 2
return self.get_chunk_id(node_id=node_ids[0]) == \
self.get_chunk_id(node_id=node_ids[1])
def get_chunk_id_from_coord(self, layer: int,
x: int, y: int, z: int) -> np.uint64:
""" Return ChunkID for given chunked graph layer and voxel coordinates.
:param layer: int -- ChunkedGraph layer
:param x: int -- X coordinate in voxel
:param y: int -- Y coordinate in voxel
:param z: int -- Z coordinate in voxel
:return: np.uint64 -- ChunkID
"""
base_chunk_span = int(self.fan_out) ** max(0, layer - 2)
return self.get_chunk_id(
layer=layer,
x=x // (int(self.chunk_size[0]) * base_chunk_span),
y=y // (int(self.chunk_size[1]) * base_chunk_span),
z=z // (int(self.chunk_size[2]) * base_chunk_span))
def get_atomic_id_from_coord(self, x: int, y: int, z: int,
parent_id: np.uint64, n_tries: int=5
) -> np.uint64:
""" Determines atomic id given a coordinate
:param x: int
:param y: int
:param z: int
:param parent_id: np.uint64
:param n_tries: int
:return: np.uint64 or None
"""
if self.get_chunk_layer(parent_id) == 1:
return parent_id
x /= 2**self.cv_mip
y /= 2**self.cv_mip
x = int(x)
y = int(y)
checked = []
atomic_id = None
root_id = self.get_root(parent_id)
for i_try in range(n_tries):
# Define block size -- increase by one each try
x_l = x - (i_try - 1)**2
y_l = y - (i_try - 1)**2
z_l = z - (i_try - 1)**2
x_h = x + 1 + (i_try - 1)**2
y_h = y + 1 + (i_try - 1)**2
z_h = z + 1 + (i_try - 1)**2
if x_l < 0:
x_l = 0
if y_l < 0:
y_l = 0
if z_l < 0:
z_l = 0
# Get atomic ids from cloudvolume
atomic_id_block = self.cv[x_l: x_h, y_l: y_h, z_l: z_h]
atomic_ids, atomic_id_count = np.unique(atomic_id_block,
return_counts=True)
# sort by frequency and discard those ids that have been checked
# previously
sorted_atomic_ids = atomic_ids[np.argsort(atomic_id_count)]
sorted_atomic_ids = sorted_atomic_ids[~np.in1d(sorted_atomic_ids,
checked)]
# For each candidate id check whether its root id corresponds to the
# given root id
for candidate_atomic_id in sorted_atomic_ids:
ass_root_id = self.get_root(candidate_atomic_id)
if ass_root_id == root_id:
# atomic_id is not None will be our indicator that the
# search was successful
atomic_id = candidate_atomic_id
break
else:
checked.append(candidate_atomic_id)
if atomic_id is not None:
break
# Returns None if unsuccessful
return atomic_id
def _create_split_log_row(self, operation_id: np.uint64, user_id: str,
root_ids: Sequence[np.uint64],
selected_atomic_ids: Sequence[np.uint64],
removed_edges: Sequence[np.uint64],
time_stamp: datetime.datetime
) -> bigtable.row.Row:
val_dict = {serialize_key("user"): serialize_key(user_id),
serialize_key("roots"):
np.array(root_ids, dtype=np.uint64).tobytes(),
serialize_key("atomic_ids"):
np.array(selected_atomic_ids).tobytes(),
serialize_key("removed_edges"):
np.array(removed_edges, dtype=np.uint64).tobytes()}
row = self.mutate_row(serialize_uint64(operation_id),
self.log_family_id, val_dict, time_stamp)
return row
def _create_merge_log_row(self, operation_id: np.uint64, user_id: str,
root_ids: Sequence[np.uint64],
selected_atomic_ids: Sequence[np.uint64],
time_stamp: datetime.datetime
) -> bigtable.row.Row:
val_dict = {serialize_key("user"):
serialize_key(user_id),
serialize_key("roots"):
np.array(root_ids, dtype=np.uint64).tobytes(),
serialize_key("atomic_ids"):
np.array(selected_atomic_ids).tobytes()}
row = self.mutate_row(serialize_uint64(operation_id),
self.log_family_id, val_dict, time_stamp)
return row
def add_atomic_edges_in_chunks(self, edge_id_dict: dict,
edge_aff_dict: dict, edge_area_dict: dict,
isolated_node_ids: Sequence[np.uint64],
verbose: bool = True,
time_stamp: Optional[datetime.datetime] = None):
""" Creates atomic nodes in first abstraction layer for a SINGLE chunk
and all abstract nodes in the second for the same chunk
Alle edges (edge_ids) need to be from one chunk and no nodes should
exist for this chunk prior to calling this function. All cross edges
(cross_edge_ids) have to point out the chunk (first entry is the id
within the chunk)
:param edge_ids: n x 2 array of uint64s
:param cross_edge_ids: m x 2 array of uint64s
:param edge_affs: float array of length n
:param cross_edge_affs: float array of length m
:param isolated_node_ids: list of uint64s
ids of nodes that have no edge in the chunked graph
:param verbose: bool
:param time_stamp: datetime
"""
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
edge_id_keys = ["in_connected", "in_disconnected", "cross",
"between_connected", "between_disconnected"]
edge_aff_keys = ["in_connected", "in_disconnected", "between_connected",
"between_disconnected"]
# Check if keys exist and include an empty array if not
n_edge_ids = 0
chunk_id = None
for edge_id_key in edge_id_keys:
if not edge_id_key in edge_id_dict:
empty_edges = np.array([], dtype=np.uint64).reshape(0, 2)
edge_id_dict[edge_id_key] = empty_edges
else:
n_edge_ids += len(edge_id_dict[edge_id_key])
if len(edge_id_dict[edge_id_key]) > 0:
node_id = edge_id_dict[edge_id_key][0, 0]
chunk_id = self.get_chunk_id(node_id)
for edge_aff_key in edge_aff_keys:
if not edge_aff_key in edge_aff_dict:
edge_aff_dict[edge_aff_key] = np.array([], dtype=np.float32)
time_start = time.time()
# Catch trivial case
if n_edge_ids == 0 and len(isolated_node_ids) == 0:
return 0
# Make parent id creation easier
if chunk_id is None:
chunk_id = self.get_chunk_id(isolated_node_ids[0])
chunk_id_c = self.get_chunk_coordinates(chunk_id)
parent_chunk_id = self.get_chunk_id(layer=2, x=chunk_id_c[0],
y=chunk_id_c[1], z=chunk_id_c[2])
# Get connected component within the chunk
chunk_node_ids = np.concatenate([
isolated_node_ids,
np.unique(edge_id_dict["in_connected"]),
np.unique(edge_id_dict["in_disconnected"]),
np.unique(edge_id_dict["cross"][:, 0]),
np.unique(edge_id_dict["between_connected"][:, 0]),
np.unique(edge_id_dict["between_disconnected"][:, 0])])
chunk_node_ids = np.unique(chunk_node_ids)
node_chunk_ids = np.array([self.get_chunk_id(c)
for c in chunk_node_ids],
dtype=np.uint64)
u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids,
return_counts=True)
if len(u_node_chunk_ids) > 1:
raise Exception("%d: %d chunk ids found in node id list. "
"Some edges might be in the wrong order. "
"Number of occurences:" %
(chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids)
chunk_g = nx.Graph()
chunk_g.add_nodes_from(chunk_node_ids)
chunk_g.add_edges_from(edge_id_dict["in_connected"])
ccs = list(nx.connected_components(chunk_g))
# if verbose:
# print("CC in chunk: %.3fs" % (time.time() - time_start))
# Add rows for nodes that are in this chunk
# a connected component at a time
node_c = 0 # Just a counter for the print / speed measurement
n_ccs = len(ccs)
parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs)
time_start = time.time()
time_dict = collections.defaultdict(list)
time_start_1 = time.time()
sparse_indices = {}
remapping = {}
for k in edge_id_dict.keys():
# Circumvent datatype issues
u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True)
mapped_ids = np.arange(len(u_ids), dtype=np.int32)
remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape)
sparse_indices[k] = compute_indices_pandas(remapped_arr)
remapping[k] = dict(zip(u_ids, mapped_ids))
time_dict["sparse_indices"].append(time.time() - time_start_1)
rows = []
for i_cc, cc in enumerate(ccs):
# if node_c % 1000 == 1 and verbose:
# dt = time.time() - time_start
# print("%5d at %5d - %.5fs " %
# (i_cc, node_c, dt / node_c), end="\r")
node_ids = np.array(list(cc))
u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids])
if len(u_chunk_ids) > 1:
print("Found multiple chunk ids:", u_chunk_ids)
raise Exception()
# Create parent id
parent_id = parent_ids[i_cc]
parent_id_b = np.array(parent_id, dtype=np.uint64).tobytes()
parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2)
# Add rows for nodes that are in this chunk
for i_node_id, node_id in enumerate(node_ids):
# Extract edges relevant to this node
# in chunk + connected
time_start_2 = time.time()
if node_id in remapping["in_connected"]:
row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]]
inv_column_ids = (column_ids + 1) % 2
connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids]
connected_affs = edge_aff_dict["in_connected"][row_ids]
connected_areas = edge_area_dict["in_connected"][row_ids]
time_dict["in_connected"].append(time.time() - time_start_2)
time_start_2 = time.time()
else:
connected_ids = np.array([], dtype=np.uint64)
connected_affs = np.array([], dtype=np.float32)
connected_areas = np.array([], dtype=np.uint64)
# in chunk + disconnected
if node_id in remapping["in_disconnected"]:
row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]]
inv_column_ids = (column_ids + 1) % 2
disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids]
disconnected_affs = edge_aff_dict["in_disconnected"][row_ids]
disconnected_areas = edge_area_dict["in_disconnected"][row_ids]
time_dict["in_disconnected"].append(time.time() - time_start_2)
time_start_2 = time.time()
else:
disconnected_ids = np.array([], dtype=np.uint64)
disconnected_affs = np.array([], dtype=np.float32)
disconnected_areas = np.array([], dtype=np.uint64)
# out chunk + connected
if node_id in remapping["between_connected"]:
row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]]
row_ids = row_ids[column_ids == 0]
column_ids = column_ids[column_ids == 0]
inv_column_ids = (column_ids + 1) % 2
time_dict["out_connected_mask"].append(time.time() - time_start_2)
time_start_2 = time.time()
connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]])
connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]])
connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]])
parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]])
time_dict["out_connected"].append(time.time() - time_start_2)
time_start_2 = time.time()
# out chunk + disconnected
if node_id in remapping["between_disconnected"]:
row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]]
row_ids = row_ids[column_ids == 0]
column_ids = column_ids[column_ids == 0]
inv_column_ids = (column_ids + 1) % 2
time_dict["out_disconnected_mask"].append(time.time() - time_start_2)
time_start_2 = time.time()
connected_ids = np.concatenate([connected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]])
connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_disconnected"][row_ids]])
connected_areas = np.concatenate([connected_areas, edge_area_dict["between_disconnected"][row_ids]])
time_dict["out_disconnected"].append(time.time() - time_start_2)
time_start_2 = time.time()
# cross
if node_id in remapping["cross"]:
row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]]
row_ids = row_ids[column_ids == 0]
column_ids = column_ids[column_ids == 0]
inv_column_ids = (column_ids + 1) % 2
time_dict["cross_mask"].append(time.time() - time_start_2)
time_start_2 = time.time()
connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]])
connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)])
connected_areas = np.concatenate([connected_areas, np.zeros((len(row_ids)), dtype=np.uint64)])
parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]])
time_dict["cross"].append(time.time() - time_start_2)
time_start_2 = time.time()
# Create node
val_dict = \
{"atomic_connected_partners": connected_ids.tobytes(),
"atomic_connected_affinities": connected_affs.tobytes(),
"atomic_connected_areas": connected_areas.tobytes(),
"atomic_disconnected_partners": disconnected_ids.tobytes(),
"atomic_disconnected_affinities": disconnected_affs.tobytes(),
"atomic_disconnected_areas": disconnected_areas.tobytes(),
"parents": parent_id_b}
rows.append(self.mutate_row(serialize_uint64(node_id),
self.family_id, val_dict,
time_stamp=time_stamp))
node_c += 1
time_dict["creating_lv1_row"].append(time.time() - time_start_2)
time_start_1 = time.time()
# Create parent node
rows.append(self.mutate_row(serialize_uint64(parent_id),
self.family_id,
{"children": node_ids.tobytes()},
time_stamp=time_stamp))
time_dict["creating_lv2_row"].append(time.time() - time_start_1)
time_start_1 = time.time()
cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges)
u_cce_layers = np.unique(cce_layers)
val_dict = {}
for cc_layer in u_cce_layers:
layer_cross_edges = parent_cross_edges[cce_layers == cc_layer]
if len(layer_cross_edges) > 0:
val_dict["atomic_cross_edges_%d" % cc_layer] = \
layer_cross_edges.tobytes()
if len(val_dict) > 0:
rows.append(self.mutate_row(serialize_uint64(parent_id),
self.cross_edge_family_id, val_dict,
time_stamp=time_stamp))
node_c += 1
time_dict["adding_cross_edges"].append(time.time() - time_start_1)
if len(rows) > 100000:
time_start_1 = time.time()
self.bulk_write(rows)
time_dict["writing"].append(time.time() - time_start_1)
if len(rows) > 0:
time_start_1 = time.time()
self.bulk_write(rows)
time_dict["writing"].append(time.time() - time_start_1)
if verbose:
print("Time creating rows: %.3fs for %d ccs with %d nodes" %
(time.time() - time_start, len(ccs), node_c))
for k in time_dict.keys():
print("%s -- %.3fms for %d instances -- avg = %.3fms" %
(k, np.sum(time_dict[k])*1000, len(time_dict[k]),
np.mean(time_dict[k])*1000))
def add_layer(self, layer_id: int,
child_chunk_coords: Sequence[Sequence[int]],
time_stamp: Optional[datetime.datetime] = None,
verbose: bool = True, n_threads: int = 20) -> None:
""" Creates the abstract nodes for a given chunk in a given layer
:param layer_id: int
:param child_chunk_coords: int array of length 3
coords in chunk space
:param time_stamp: datetime
:param verbose: bool
:param n_threads: int
"""
def _read_subchunks_thread(chunk_coord):
# Get start and end key
x, y, z = chunk_coord
row_keys = ["children"] + \
["atomic_cross_edges_%d" % l
for l in range(layer_id - 1, self.n_layers)]
range_read = self.range_read_chunk(layer_id - 1, x, y, z,
row_keys=row_keys)
# Due to restarted jobs some parents might be duplicated. We can
# find these duplicates only by comparing their children because
# each node has a unique id. However, we can use that more recently
# created nodes have higher segment ids. We are only interested in
# the latest version of any duplicated parents.
# Deserialize row keys and store child with highest id for
# comparison
row_cell_dict = {}
segment_ids = []
row_ids = []
max_child_ids = []
for row_id_b, row_data in range_read.items():
row_id = deserialize_uint64(row_id_b)
segment_id = self.get_segment_id(row_id)
cells = row_data.cells
cell_family = cells[self.family_id]
if self.cross_edge_family_id in cells:
row_cell_dict[row_id] = cells[self.cross_edge_family_id]
node_child_ids_b = cell_family[children_key][0].value
node_child_ids = np.frombuffer(node_child_ids_b,
dtype=np.uint64)
max_child_ids.append(np.max(node_child_ids))
segment_ids.append(segment_id)
row_ids.append(row_id)
segment_ids = np.array(segment_ids, dtype=np.uint64)
row_ids = np.array(row_ids)
max_child_ids = np.array(max_child_ids, dtype=np.uint64)
sorting = np.argsort(segment_ids)[::-1]
row_ids = row_ids[sorting]
max_child_ids = max_child_ids[sorting]
counter = collections.defaultdict(int)
max_child_ids_occ_so_far = np.zeros(len(max_child_ids),
dtype=np.int)
for i_row in range(len(max_child_ids)):
max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]]
counter[max_child_ids[i_row]] += 1
# Filter last occurences (we inverted the list) of each node
m = max_child_ids_occ_so_far == 0
row_ids = row_ids[m]
ll_node_ids.extend(row_ids)
# Loop through nodes from this chunk
for row_id in row_ids:
if row_id in row_cell_dict:
cross_edge_dict[row_id] = {}
cell_family = row_cell_dict[row_id]
for l in range(layer_id - 1, self.n_layers):
row_key = serialize_key("atomic_cross_edges_%d" % l)
if row_key in cell_family:
cross_edge_dict[row_id][l] = cell_family[row_key][0].value
if int(layer_id - 1) in cross_edge_dict[row_id]:
atomic_cross_edges_b = cross_edge_dict[row_id][layer_id - 1]
atomic_cross_edges = \
np.frombuffer(atomic_cross_edges_b,
dtype=np.uint64).reshape(-1, 2)
if len(atomic_cross_edges) > 0:
atomic_partner_id_dict[row_id] = \
atomic_cross_edges[:, 1]
new_pairs = zip(atomic_cross_edges[:, 0],
[row_id] * len(atomic_cross_edges))
atomic_child_id_dict_pairs.extend(new_pairs)
def _resolve_cross_chunk_edges_thread(args) -> None:
start, end = args
for i_child_key, child_key in\
enumerate(atomic_partner_id_dict_keys[start: end]):
this_atomic_partner_ids = atomic_partner_id_dict[child_key]
partners = {atomic_child_id_dict[atomic_cross_id]
for atomic_cross_id in this_atomic_partner_ids
if atomic_child_id_dict[atomic_cross_id] != 0}
if len(partners) > 0:
partners = np.array(list(partners), dtype=np.uint64)[:, None]
this_ids = np.array([child_key] * len(partners),
dtype=np.uint64)[:, None]
these_edges = np.concatenate([this_ids, partners], axis=1)
edge_ids.extend(these_edges)
def _write_out_connected_components(args) -> None:
start, end = args
n_ccs = int(end - start)
parent_ids = self.get_unique_node_id_range(chunk_id, step=n_ccs)
rows = []
for i_cc, cc in enumerate(ccs[start: end]):
node_ids = np.array(list(cc))
parent_id = parent_ids[i_cc]
parent_id_b = np.array(parent_id, dtype=np.uint64).tobytes()
parent_cross_edges_b = {}
for l in range(layer_id, self.n_layers):
parent_cross_edges_b[l] = b""
# Add rows for nodes that are in this chunk
for i_node_id, node_id in enumerate(node_ids):
if node_id in cross_edge_dict:
# Extract edges relevant to this node
for l in range(layer_id, self.n_layers):
if l in cross_edge_dict[node_id]:
parent_cross_edges_b[l] += \
cross_edge_dict[node_id][l]
# Create node
val_dict = {"parents": parent_id_b}
rows.append(self.mutate_row(serialize_uint64(node_id),
self.family_id, val_dict,
time_stamp=time_stamp))
# Create parent node
val_dict = {"children": node_ids.tobytes()}
rows.append(self.mutate_row(serialize_uint64(parent_id),
self.family_id, val_dict,
time_stamp=time_stamp))
val_dict = {}
for l in range(layer_id, self.n_layers):
if l in parent_cross_edges_b:
val_dict["atomic_cross_edges_%d" % l] = \
parent_cross_edges_b[l]
if len(val_dict) > 0:
rows.append(self.mutate_row(serialize_uint64(parent_id),
self.cross_edge_family_id,
val_dict,
time_stamp=time_stamp))
if len(rows) > 100000:
self.bulk_write(rows)
rows = []
if len(rows) > 0:
self.bulk_write(rows)
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
# 1 --------------------------------------------------------------------
# The first part is concerned with reading data from the child nodes
# of this layer and pre-processing it for the second part
time_start = time.time()
children_key = serialize_key("children")
atomic_partner_id_dict = {}
cross_edge_dict = {}
atomic_child_id_dict_pairs = []
ll_node_ids = []
multi_args = child_chunk_coords
n_jobs = np.min([n_threads, len(multi_args)])
if n_jobs > 0:
mu.multithread_func(_read_subchunks_thread, multi_args,
n_threads=n_jobs)
d = dict(atomic_child_id_dict_pairs)
atomic_child_id_dict = collections.defaultdict(np.uint64, d)
ll_node_ids = np.array(ll_node_ids, dtype=np.uint64)
if verbose:
print("Time iterating through subchunks: %.3fs" %
(time.time() - time_start))
time_start = time.time()
# Extract edges from remaining cross chunk edges
# and maintain unused cross chunk edges
edge_ids = []
# u_atomic_child_ids = np.unique(atomic_child_ids)
atomic_partner_id_dict_keys = \
np.array(list(atomic_partner_id_dict.keys()), dtype=np.uint64)
if n_threads > 1:
n_jobs = n_threads * 3 # Heuristic
else:
n_jobs = 1
n_jobs = np.min([n_jobs, len(atomic_partner_id_dict_keys)])
if n_jobs > 0:
spacing = np.linspace(0, len(atomic_partner_id_dict_keys),
n_jobs+1).astype(np.int)
starts = spacing[:-1]
ends = spacing[1:]
multi_args = list(zip(starts, ends))
mu.multithread_func(_resolve_cross_chunk_edges_thread, multi_args,
n_threads=n_threads)
if verbose:
print("Time resolving cross chunk edges: %.3fs" %
(time.time() - time_start))
time_start = time.time()
# 2 --------------------------------------------------------------------
# The second part finds connected components, writes the parents to
# BigTable and updates the childs
# Make parent id creation easier
x, y, z = np.min(child_chunk_coords, axis=0) // self.fan_out
chunk_id = self.get_chunk_id(layer=layer_id, x=x, y=y, z=z)
# Extract connected components
chunk_g = nx.from_edgelist(edge_ids)
# Add single node objects that have no edges
isolated_node_mask = ~np.in1d(ll_node_ids, np.unique(edge_ids))
add_ccs = list(ll_node_ids[isolated_node_mask][:, None])
ccs = list(nx.connected_components(chunk_g)) + add_ccs
if verbose:
print("Time connected components: %.3fs" %
(time.time() - time_start))
time_start = time.time()
# Add rows for nodes that are in this chunk
# a connected component at a time
if n_threads > 1:
n_jobs = n_threads * 3 # Heuristic
else:
n_jobs = 1
n_jobs = np.min([n_jobs, len(ccs)])
spacing = np.linspace(0, len(ccs), n_jobs+1).astype(np.int)
starts = spacing[:-1]
ends = spacing[1:]
multi_args = list(zip(starts, ends))
mu.multithread_func(_write_out_connected_components, multi_args,
n_threads=n_threads)
if verbose:
print("Time writing %d connected components in layer %d: %.3fs" %
(len(ccs), layer_id, time.time() - time_start))
def get_atomic_cross_edge_dict(self, node_id: np.uint64,
layer_ids: Sequence[int] = None,
deserialize_node_ids: bool = False,
reshape: bool = False):
""" Extracts all atomic cross edges and serves them as a dictionary
:param node_id: np.uitn64
:param layer_ids: list of ints
:param deserialize_node_ids: bool
:param reshape: bool
reshapes the list of node ids to an edge list (n x 2)
Only available when deserializing
:return: dict
"""
row = self.table.read_row(serialize_uint64(node_id))
if row is None:
return {}
atomic_cross_edges = {}
if isinstance(layer_ids, int):
layer_ids = [layer_ids]
if layer_ids is None:
layer_ids = range(2, self.n_layers)
if self.cross_edge_family_id in row.cells:
for l in layer_ids:
key = serialize_key("atomic_cross_edges_%d" % l)
row_cell = row.cells[self.cross_edge_family_id]
atomic_cross_edges[l] = []
if key in row_cell:
row_val = row_cell[key][0].value
if deserialize_node_ids:
atomic_cross_edges[l] = np.frombuffer(row_val,
dtype=np.uint64)
if reshape:
atomic_cross_edges[l] = \
atomic_cross_edges[l].reshape(-1, 2)
else:
atomic_cross_edges[l] = row_val
return atomic_cross_edges
def get_parent(self, node_id: np.uint64,
get_only_relevant_parent: bool = True,
time_stamp: Optional[datetime.datetime] = None) -> Union[
List[Tuple[np.uint64, datetime.datetime]],
np.uint64, None]:
""" Acquires parent of a node at a specific time stamp
:param node_id: uint64
:param get_only_relevant_parent: bool
True: return single parent according to time_stamp
False: return n x 2 list of all parents
((parent_id, time_stamp), ...)
:param time_stamp: datetime or None
:return: uint64 or None
"""
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
parent_key = serialize_key("parents")
all_parents = []
p_filter_ = ColumnQualifierRegexFilter(parent_key)
row = self.table.read_row(serialize_uint64(node_id), filter_=p_filter_)
if row and parent_key in row.cells[self.family_id]:
for parent_entry in row.cells[self.family_id][parent_key]:
if get_only_relevant_parent:
if parent_entry.timestamp > time_stamp:
continue
else:
return np.frombuffer(parent_entry.value,
dtype=np.uint64)[0]
else:
all_parents.append((np.frombuffer(parent_entry.value,
dtype=np.uint64)[0],
parent_entry.timestamp))
else:
return None
if len(all_parents) == 0:
raise Exception("Did not find a valid parent for %d with"
" the given time stamp" % node_id)
else:
return all_parents
def get_children(self, node_id: np.uint64) -> np.ndarray:
""" Returns all children of a node
:param node_id: np.uint64
:return: np.ndarray[np.uint64]
"""
children = self.read_row(node_id, "children", dtype=np.uint64)
if children is None:
return np.empty(0, dtype=np.uint64)
else:
return children
def get_latest_edge_affinity(self, atomic_edge: [np.uint64, np.uint64],
check: bool = False) -> np.float32:
""" Looks up the LATEST affinity of an edge
Future work should add a timestamp option
:param atomic_edge: [uint64, uint64]
:param check: bool
whether to look up affinity from both sides and compare
:return: float32
"""
edge_affinities = []
if check:
iter_max = 2
else:
iter_max = 1
for i in range(iter_max):
atomic_partners, atomic_affinities = \
self.get_atomic_partners(atomic_edge[i % 2],
include_connected_partners=True,
include_disconnected_partners=True)
edge_mask = atomic_partners == atomic_edge[(i + 1) % 2]
if len(edge_mask) == 0:
raise Exception("Edge does not exist")
edge_affinities.append(atomic_affinities[edge_mask][0])
if len(np.unique(edge_affinities)) == 1:
return edge_affinities[0]
else:
raise Exception("Different edge affinities found... Something went "
"horribly wrong.")
def get_latest_roots(self, time_stamp: Optional[datetime.datetime] = datetime.datetime.max,
n_threads: int = 1):
"""
:param time_stamp:
:return:
"""
def _read_root_rows(args) -> None:
start_seg_id, end_seg_id = args
start_id = self.get_node_id(segment_id=start_seg_id,
chunk_id=self.root_chunk_id)
end_id = self.get_node_id(segment_id=end_seg_id,
chunk_id=self.root_chunk_id)
range_read = self.table.read_rows(
start_key=serialize_uint64(start_id),
end_key=serialize_uint64(end_id),
# allow_row_interleaving=True,
end_inclusive=False,
filter_=time_filter)
range_read.consume_all()
rows = range_read.rows
for row_id, row_data in rows.items():
row_keys = row_data.cells[self.family_id]
if not serialize_key("new_parents") in row_keys:
root_ids.append(deserialize_uint64(row_id))
time_stamp -= datetime.timedelta(microseconds=time_stamp.microsecond % 1000)
time_filter = TimestampRangeFilter(TimestampRange(end=time_stamp))
max_seg_id = self.get_max_node_id(self.root_chunk_id) + 1
root_ids = []
n_blocks = np.min([n_threads*3+1, max_seg_id])
seg_id_blocks = np.linspace(1, max_seg_id, n_blocks, dtype=np.uint64)
multi_args = []
for i_id_block in range(0, len(seg_id_blocks) - 1):
multi_args.append([seg_id_blocks[i_id_block],
seg_id_blocks[i_id_block + 1]])
mu.multithread_func(
_read_root_rows, multi_args, n_threads=n_threads,
debug=False, verbose=True)
return root_ids
def get_root(self, node_id: np.uint64,
time_stamp: Optional[datetime.datetime] = None
) -> Union[List[np.uint64], np.uint64]:
""" Takes a node id and returns the associated agglomeration ids
:param atomic_id: uint64
:param time_stamp: None or datetime
:return: np.uint64
"""
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
early_finish = True
if self.get_chunk_layer(node_id) == self.n_layers:
raise Exception("node is already root")
parent_id = node_id
while early_finish:
parent_id = node_id
early_finish = False
for i_layer in range(self.get_chunk_layer(node_id)+1,
int(self.n_layers + 1)):
temp_parent_id = self.get_parent(parent_id, time_stamp=time_stamp)
if temp_parent_id is None:
early_finish = True
break
else:
parent_id = temp_parent_id
return parent_id
def get_all_parents(self, node_id: np.uint64,
time_stamp: Optional[datetime.datetime] = None
) -> Union[List[np.uint64], np.uint64]:
""" Takes a node id and returns all parents and parents' parents up to
the top
:param atomic_id: uint64
:param time_stamp: None or datetime
:return: np.uint64
"""
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
early_finish = True
parent_ids: List[np.uint64] = []
while early_finish:
parent_id = node_id
parent_ids = []
early_finish = False
for i_layer in range(self.get_chunk_layer(node_id)+1,
int(self.n_layers + 1)):
temp_parent_id = self.get_parent(parent_id,
time_stamp=time_stamp)
if temp_parent_id is None:
early_finish = True
break
else:
parent_id = temp_parent_id
parent_ids.append(parent_id)
return parent_ids
def lock_root_loop(self, root_ids: Sequence[np.uint64],
operation_id: np.uint64, max_tries: int = 1,
waittime_s: float = 0.5) -> Tuple[bool, np.ndarray]:
""" Attempts to lock multiple roots at the same time
:param root_ids: list of uint64
:param operation_id: uint64
:param max_tries: int
:param waittime_s: float
:return: bool, list of uint64s
success, latest root ids
"""
i_try = 0
while i_try < max_tries:
lock_acquired = False
# Collect latest root ids
new_root_ids: List[np.uint64] = []
for i_root_id in range(len(root_ids)):
future_root_ids = self.get_future_root_ids(root_ids[i_root_id])
if len(future_root_ids) == 0:
new_root_ids.append(root_ids[i_root_id])
else:
new_root_ids.extend(future_root_ids)
# Attempt to lock all latest root ids
root_ids = np.unique(new_root_ids)
for i_root_id in range(len(root_ids)):
print("operation id: %d - root id: %d" %
(operation_id, root_ids[i_root_id]))
lock_acquired = self.lock_single_root(root_ids[i_root_id],
operation_id)
# Roll back locks if one root cannot be locked
if not lock_acquired:
for j_root_id in range(len(root_ids)):
self.unlock_root(root_ids[j_root_id], operation_id)
break
if lock_acquired:
return True, root_ids
time.sleep(waittime_s)
i_try += 1
print(i_try)
return False, root_ids
def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64
) -> bool:
""" Attempts to lock the latest version of a root node
:param root_id: uint64
:param operation_id: uint64
an id that is unique to the process asking to lock the root node
:return: bool
success
"""
operation_id_b = serialize_uint64(operation_id)
lock_key = serialize_key("lock")
new_parents_key = serialize_key("new_parents")
# Build a column filter which tests if a lock was set (== lock column
# exists) and if it is still valid (timestamp younger than
# LOCK_EXPIRED_TIME_DELTA) and if there is no new parent (== new_parents
# exists)
time_cutoff = datetime.datetime.utcnow() - LOCK_EXPIRED_TIME_DELTA
# Comply to resolution of BigTables TimeRange
time_cutoff -= datetime.timedelta(
microseconds=time_cutoff.microsecond % 1000)
time_filter = TimestampRangeFilter(TimestampRange(start=time_cutoff))
# lock_key_filter = ColumnQualifierRegexFilter(lock_key)
# new_parents_key_filter = ColumnQualifierRegexFilter(new_parents_key)
lock_key_filter = ColumnRangeFilter(
column_family_id=self.family_id,
start_column=lock_key,
end_column=lock_key,
inclusive_start=True,
inclusive_end=True)
new_parents_key_filter = ColumnRangeFilter(
column_family_id=self.family_id,
start_column=new_parents_key,
end_column=new_parents_key,
inclusive_start=True,
inclusive_end=True)
# Combine filters together
chained_filter = RowFilterChain([time_filter, lock_key_filter])
combined_filter = ConditionalRowFilter(
base_filter=chained_filter,
true_filter=PassAllFilter(True),
false_filter=new_parents_key_filter)
# Get conditional row using the chained filter
root_row = self.table.row(serialize_uint64(root_id),
filter_=combined_filter)
# Set row lock if condition returns no results (state == False)
time_stamp = datetime.datetime.utcnow()
root_row.set_cell(self.family_id, lock_key, operation_id_b, state=False,
timestamp=time_stamp)
# The lock was acquired when set_cell returns False (state)
lock_acquired = not root_row.commit()
if not lock_acquired:
r = self.table.read_row(serialize_uint64(root_id))
l_operation_ids = []
for cell in r.cells[self.family_id][lock_key]:
l_operation_id = deserialize_uint64(cell.value)
l_operation_ids.append(l_operation_id)
print("Locked operation ids:", l_operation_ids)
return lock_acquired
def unlock_root(self, root_id: np.uint64, operation_id: np.uint64) -> bool:
""" Unlocks a root
This is mainly used for cases where multiple roots need to be locked and
locking was not sucessful for all of them
:param root_id: np.uint64
:param operation_id: uint64
an id that is unique to the process asking to lock the root node
:return: bool
success
"""
operation_id_b = serialize_uint64(operation_id)
lock_key = serialize_key("lock")
# Build a column filter which tests if a lock was set (== lock column
# exists) and if it is still valid (timestamp younger than
# LOCK_EXPIRED_TIME_DELTA) and if the given operation_id is still
# the active lock holder
time_cutoff = datetime.datetime.utcnow() - LOCK_EXPIRED_TIME_DELTA
# Comply to resolution of BigTables TimeRange
time_cutoff -= datetime.timedelta(
microseconds=time_cutoff.microsecond % 1000)
time_filter = TimestampRangeFilter(TimestampRange(start=time_cutoff))
# column_key_filter = ColumnQualifierRegexFilter(lock_key)
# value_filter = ColumnQualifierRegexFilter(operation_id_b)
column_key_filter = ColumnRangeFilter(
column_family_id=self.family_id,
start_column=lock_key,
end_column=lock_key,
inclusive_start=True,
inclusive_end=True)
value_filter = ValueRangeFilter(
start_value=operation_id_b,
end_value=operation_id_b,
inclusive_start=True,
inclusive_end=True)
# Chain these filters together
chained_filter = RowFilterChain([time_filter, column_key_filter,
value_filter])
# Get conditional row using the chained filter
root_row = self.table.row(serialize_uint64(root_id),
filter_=chained_filter)
# Delete row if conditions are met (state == True)
root_row.delete_cell(self.family_id, lock_key, state=True)
return root_row.commit()
def check_and_renew_root_locks(self, root_ids: Iterable[np.uint64],
operation_id: np.uint64) -> bool:
""" Tests if the roots are locked with the provided operation_id and
renews the lock to reset the time_stam
This is mainly used before executing a bulk write
:param root_ids: uint64
:param operation_id: uint64
an id that is unique to the process asking to lock the root node
:return: bool
success
"""
for root_id in root_ids:
if not self.check_and_renew_root_lock_single(root_id, operation_id):
print("check_and_renew_root_locks failed - %d" % root_id)
return False
return True
def check_and_renew_root_lock_single(self, root_id: np.uint64,
operation_id: np.uint64) -> bool:
""" Tests if the root is locked with the provided operation_id and
renews the lock to reset the time_stam
This is mainly used before executing a bulk write
:param root_id: uint64
:param operation_id: uint64
an id that is unique to the process asking to lock the root node
:return: bool
success
"""
operation_id_b = serialize_uint64(operation_id)
lock_key = serialize_key("lock")
new_parents_key = serialize_key("new_parents")
# Build a column filter which tests if a lock was set (== lock column
# exists) and if the given operation_id is still the active lock holder
# and there is no new parent (== new_parents column exists). The latter
# is not necessary but we include it as a backup to prevent things
# from going really bad.
# column_key_filter = ColumnQualifierRegexFilter(lock_key)
# value_filter = ColumnQualifierRegexFilter(operation_id_b)
column_key_filter = ColumnRangeFilter(
column_family_id=self.family_id,
start_column=lock_key,
end_column=lock_key,
inclusive_start=True,
inclusive_end=True)
value_filter = ValueRangeFilter(
start_value=operation_id_b,
end_value=operation_id_b,
inclusive_start=True,
inclusive_end=True)
new_parents_key_filter = ColumnRangeFilter(
column_family_id=self.family_id, start_column=new_parents_key,
end_column=new_parents_key, inclusive_start=True,
inclusive_end=True)
# Chain these filters together
chained_filter = RowFilterChain([column_key_filter, value_filter])
combined_filter = ConditionalRowFilter(
base_filter=chained_filter,
true_filter=new_parents_key_filter,
false_filter=PassAllFilter(True))
# Get conditional row using the chained filter
root_row = self.table.row(serialize_uint64(root_id),
filter_=combined_filter)
# Set row lock if condition returns a result (state == True)
root_row.set_cell(self.family_id, lock_key, operation_id_b, state=False)
# The lock was acquired when set_cell returns True (state)
lock_acquired = not root_row.commit()
return lock_acquired
def get_latest_root_id(self, root_id: np.uint64) -> np.ndarray:
""" Returns the latest root id associated with the provided root id
:param root_id: uint64
:return: list of uint64s
"""
id_working_set = [root_id]
new_parent_key = serialize_key("new_parents")
latest_root_ids = []
while len(id_working_set) > 0:
next_id = id_working_set[0]
del(id_working_set[0])
r = self.table.read_row(serialize_uint64(next_id))
# Check if a new root id was attached to this root id
if new_parent_key in r.cells[self.family_id]:
id_working_set.extend(
np.frombuffer(
r.cells[self.family_id][new_parent_key][0].value,
dtype=np.uint64))
else:
latest_root_ids.append(next_id)
return np.unique(latest_root_ids)
def get_future_root_ids(self, root_id: np.uint64,
time_stamp: Optional[datetime.datetime] =
datetime.datetime.max)-> np.ndarray:
""" Returns all future root ids emerging from this root
This search happens in a monotic fashion. At no point are past root
ids of future root ids taken into account.
:param root_id: np.uint64
:param time_stamp: None or datetime
restrict search to ids created before this time_stamp
None=search whole future
:return: array of uint64
"""
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
id_history = []
next_ids = [root_id]
while len(next_ids):
temp_next_ids = []
for next_id in next_ids:
ids, row_time_stamp = self.read_row(next_id,
key="new_parents",
dtype=np.uint64,
get_time_stamp=True)
if ids is None:
r, row_time_stamp = self.read_row(next_id,
key="children",
dtype=np.uint64,
get_time_stamp=True)
if row_time_stamp is None:
raise Exception("Something went wrong...")
if row_time_stamp < time_stamp:
if ids is not None:
temp_next_ids.extend(ids)
if next_id != root_id:
id_history.append(next_id)
next_ids = temp_next_ids
return np.unique(np.array(id_history, dtype=np.uint64))
def get_past_root_ids(self, root_id: np.uint64,
time_stamp: Optional[datetime.datetime] =
datetime.datetime.min) -> np.ndarray:
""" Returns all future root ids emerging from this root
This search happens in a monotic fashion. At no point are future root
ids of past root ids taken into account.
:param root_id: np.uint64
:param time_stamp: None or datetime
restrict search to ids created after this time_stamp
None=search whole future
:return: array of uint64
"""
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
id_history = []
next_ids = [root_id]
while len(next_ids):
temp_next_ids = []
for next_id in next_ids:
ids, row_time_stamp = self.read_row(next_id,
key="former_parents",
dtype=np.uint64,
get_time_stamp=True)
if ids is None:
_, row_time_stamp = self.read_row(next_id,
key="children",
dtype=np.uint64,
get_time_stamp=True)
if row_time_stamp is None:
raise Exception("Something went wrong...")
if row_time_stamp > time_stamp:
if ids is not None:
temp_next_ids.extend(ids)
if next_id != root_id:
id_history.append(next_id)
next_ids = temp_next_ids
return np.unique(np.array(id_history, dtype=np.uint64))
def get_root_id_history(self, root_id: np.uint64,
time_stamp_past:
Optional[datetime.datetime] = datetime.datetime.min,
time_stamp_future:
Optional[datetime.datetime] = datetime.datetime.max
) -> np.ndarray:
""" Returns all future root ids emerging from this root
This search happens in a monotic fashion. At no point are future root
ids of past root ids or past root ids of future root ids taken into
account.
:param root_id: np.uint64
:param time_stamp_past: None or datetime
restrict search to ids created after this time_stamp
None=search whole future
:param time_stamp_future: None or datetime
restrict search to ids created before this time_stamp
None=search whole future
:return: array of uint64
"""
past_ids = self.get_past_root_ids(root_id=root_id,
time_stamp=time_stamp_past)
future_ids = self.get_future_root_ids(root_id=root_id,
time_stamp=time_stamp_future)
history_ids = np.concatenate([past_ids,
np.array([root_id], dtype=np.uint64),
future_ids])
return history_ids
def get_subgraph(self, agglomeration_id: np.uint64,
bounding_box: Optional[Sequence[Sequence[int]]] = None,
bb_is_coordinate: bool = False,
stop_lvl: int = 1,
get_edges: bool = False, verbose: bool = True
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
""" Returns all edges between supervoxels belonging to the specified
agglomeration id within the defined bouning box
:param agglomeration_id: int
:param bounding_box: [[x_l, y_l, z_l], [x_h, y_h, z_h]]
:param bb_is_coordinate: bool
:param stop_lvl: int
:param get_edges: bool
:param verbose: bool
:return: edge list
"""
# Helper functions for multithreading
def _handle_subgraph_children_layer2_edges_thread(
child_ids: Iterable[np.uint64]) -> Tuple[List[np.ndarray],
List[np.float32]]:
_edges = []
_affinities = []
for child_id in child_ids:
this_edges, this_affinities = self.get_subgraph_chunk(
child_id, time_stamp=time_stamp)
_edges.extend(this_edges)
_affinities.extend(this_affinities)
return _edges, _affinities
def _handle_subgraph_children_layer2_thread(
child_ids: Iterable[np.uint64]) -> None:
for child_id in child_ids:
atomic_ids.extend(self.get_children(child_id))
def _handle_subgraph_children_higher_layers_thread(
child_ids: Iterable[np.uint64]) -> None:
for child_id in child_ids:
_children = self.get_children(child_id)
if bounding_box is not None:
chunk_ids = self.get_chunk_ids_from_node_ids(_children)
chunk_ids = np.array([self.get_chunk_coordinates(c)
for c in chunk_ids])
chunk_ids = np.array(chunk_ids)
bounding_box_layer = bounding_box / self.fan_out ** np.max([0, (layer - 3)])
bound_check = np.array([
np.all(chunk_ids < bounding_box_layer[1], axis=1),
np.all(chunk_ids + 1 > bounding_box_layer[0], axis=1)]).T
bound_check_mask = np.all(bound_check, axis=1)
_children = _children[bound_check_mask]
new_childs.extend(_children)
# Make sure that edges are not requested if we should stop on an
# intermediate level
assert stop_lvl == 1 or not get_edges
if get_edges:
time_stamp = self.read_row(agglomeration_id, "children",
get_time_stamp=True)[1]
if bounding_box is not None:
if bb_is_coordinate:
bounding_box = np.array(bounding_box,
dtype=np.float32) / self.chunk_size
bounding_box[0] = np.floor(bounding_box[0])
bounding_box[1] = np.ceil(bounding_box[1])
bounding_box = bounding_box.astype(np.int)
else:
bounding_box = np.array(bounding_box, dtype=np.int)
edges = np.array([], dtype=np.uint64).reshape(0, 2)
atomic_ids = []
affinities = np.array([], dtype=np.float32)
child_ids = [agglomeration_id]
time_start = time.time()
while len(child_ids) > 0:
new_childs = []
layer = self.get_chunk_layer(child_ids[0])
if stop_lvl == layer:
atomic_ids = child_ids
break
# Use heuristic to guess the optimal number of threads
n_child_ids = len(child_ids)
this_n_threads = np.min([int(n_child_ids // 20) + 1, mu.n_cpus])
if layer == 2:
if get_edges:
edges_and_affinities = mu.multithread_func(
_handle_subgraph_children_layer2_edges_thread,
np.array_split(child_ids, this_n_threads),
n_threads=this_n_threads, debug=this_n_threads == 1)
for edges_and_affinities_pair in edges_and_affinities:
_edges, _affinities = edges_and_affinities_pair
affinities = np.concatenate([affinities, _affinities])
edges = np.concatenate([edges, _edges])
else:
mu.multithread_func(
_handle_subgraph_children_layer2_thread,
np.array_split(child_ids, this_n_threads),
n_threads=this_n_threads, debug=this_n_threads == 1)
else:
mu.multithread_func(
_handle_subgraph_children_higher_layers_thread,
np.array_split(child_ids, this_n_threads),
n_threads=this_n_threads, debug=this_n_threads == 1)
child_ids = new_childs
if verbose:
print("Layer %d: %.3fms for %d children with %d threads" %
(layer, (time.time() - time_start) * 1000, n_child_ids,
this_n_threads))
# if len(child_ids) != len(np.unique(child_ids)):
# print("N children %d - %d" % (len(child_ids), len(np.unique(child_ids))))
# # print(agglomeration_id, child_ids)
#
# assert len(child_ids) == len(np.unique(child_ids))
time_start = time.time()
atomic_ids = np.array(atomic_ids, np.uint64)
if get_edges:
return edges, affinities
else:
return atomic_ids
def get_atomic_partners(self, atomic_id: np.uint64,
include_connected_partners=True,
include_disconnected_partners=False,
time_stamp: Optional[datetime.datetime] = None
) -> Tuple[np.ndarray, np.ndarray]:
""" Extracts the atomic partners and affinities for a given timestamp
:param atomic_id: np.uint64
:param include_connected_partners: bool
:param include_disconnected_partners: bool
:param time_stamp: None or datetime
:return: list of uint64, list of float32
"""
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
edge_keys = []
affinity_keys = []
area_keys = []
if include_connected_partners:
edge_keys.append(serialize_key('atomic_connected_partners'))
affinity_keys.append(serialize_key('atomic_connected_affinities'))
area_keys.append(serialize_key('atomic_connected_areas'))
if include_disconnected_partners:
edge_keys.append(serialize_key('atomic_disconnected_partners'))
affinity_keys.append(serialize_key('atomic_disconnected_affinities'))
area_keys.append(serialize_key('atomic_disconnected_areas'))
filters = [ColumnQualifierRegexFilter(k) for k in
edge_keys + affinity_keys if k is not None]
filter_ = RowFilterUnion(filters)
partners = np.array([], dtype=np.uint64)
affinities = np.array([], dtype=np.float32)
areas = np.array([], dtype=np.uint64)
r = self.table.read_row(serialize_uint64(atomic_id),
filter_=filter_)
for edge_key, affinity_key, area_key in \
zip(edge_keys, affinity_keys, area_keys):
# Shortcut for the trivial case that there have been no changes to
# the edges of this child:
if len(r.cells[self.family_id][edge_key]) == 0:
this_partners = \
np.frombuffer(r.cells[self.family_id][edge_key][0].value,
dtype=np.uint64)
partners = np.concatenate([partners, this_partners])
if affinity_key is None:
affinities = np.concatenate([
affinities, np.full((len(this_partners)), np.inf)])
else:
this_affinities = \
np.frombuffer(r.cells[self.family_id][affinity_key][0].value,
dtype=np.float32)
affinities = np.concatenate([affinities, this_affinities])
if area_key is None:
areas = np.concatenate([
areas, np.full((len(this_partners)), 0)])
else:
this_areas = \
np.frombuffer(r.cells[self.family_id][area_key][0].value,
dtype=np.uint64)
areas = np.concatenate([areas, this_areas])
# From new to old: Add partners that are not
# in the edge list of this child. This assures that more recent
# changes are prioritized. For each, check if the time_stamp
# is satisfied.
# Note: The creator writes one list of partners (edges) and
# affinities. Each edit makes only small edits (yet), hence,
# all but the oldest entry are short lists of length ~ 1-10
for i_edgelist in range(len(r.cells[self.family_id][edge_key])):
cell = r.cells[self.family_id][edge_key][i_edgelist]
if time_stamp >= cell.timestamp:
partner_batch_b = \
r.cells[self.family_id][edge_key][i_edgelist].value
partner_batch = np.frombuffer(partner_batch_b,
dtype=np.uint64)
partner_batch_m = ~np.in1d(partner_batch, partners)
this_partners = partner_batch[partner_batch_m]
partners = np.concatenate([partners, this_partners])
if affinity_key is None:
affinities = np.concatenate([
affinities, np.full((len(this_partners)), np.inf)])
else:
affinity_batch_b = \
r.cells[self.family_id][affinity_key][i_edgelist].value
affinity_batch = np.frombuffer(affinity_batch_b,
dtype=np.float32)
affinities = np.concatenate([
affinities, affinity_batch[partner_batch_m]])
# Take care of removed edges (affinity == 0)
partners_m = affinities > 0
partners = partners[partners_m]
affinities = affinities[partners_m]
return partners, affinities
def get_subgraph_chunk(self, parent_id: np.uint64, make_unique: bool = True,
time_stamp: Optional[datetime.datetime] = None
) -> Tuple[np.ndarray, np.ndarray]:
""" Takes an atomic id and returns the associated agglomeration ids
:param parent_id: np.uint64
:param make_unique: bool
:param time_stamp: None or datetime
:return: edge list
"""
def _read_atomic_partners(child_id_block: Iterable[np.uint64]
) -> Tuple[np.ndarray, np.ndarray]:
thread_edges = np.array([], dtype=np.uint64).reshape(0, 2)
thread_affinities = np.array([], dtype=np.float32)
for child_id in child_id_block:
node_edges, node_affinities = self.get_atomic_partners(
child_id, time_stamp=time_stamp,
include_connected_partners=True,
include_disconnected_partners=False)
# If we have edges add them to the chunk global edge list
if len(node_edges) > 0:
# Build n x 2 edge list from partner list
node_edges = \
np.concatenate([np.ones((len(node_edges), 1),
dtype=np.uint64) * child_id,
node_edges[:, None]], axis=1)
thread_edges = np.concatenate([thread_edges,
node_edges])
thread_affinities = np.concatenate([thread_affinities,
node_affinities])
return thread_edges, thread_affinities
if time_stamp is None:
time_stamp = datetime.datetime.utcnow()
if time_stamp.tzinfo is None:
time_stamp = UTC.localize(time_stamp)
child_ids = self.get_children(parent_id)
# Iterate through all children of this parent and retrieve their edges
edges = np.array([], dtype=np.uint64).reshape(0, 2)
affinities = np.array([], dtype=np.float32)
n_child_ids = len(child_ids)
this_n_threads = np.min([int(n_child_ids // 20) + 1, mu.n_cpus])
child_id_blocks = np.array_split(child_ids, this_n_threads)
edges_and_affinities = mu.multithread_func(_read_atomic_partners,
child_id_blocks,
n_threads=this_n_threads,
debug=this_n_threads == 1)
for edges_and_affinities_pairs in edges_and_affinities:
edges = np.concatenate([edges, edges_and_affinities_pairs[0]])
affinities = np.concatenate([affinities,
edges_and_affinities_pairs[1]])
# If requested, remove duplicate edges. Every edge is stored in each
# participating node. Hence, we have many edge pairs that look
# like [x, y], [y, x]. We solve this by sorting and calling np.unique
# row-wise
if make_unique:
edges, idx = np.unique(np.sort(edges, axis=1), axis=0,
return_index=True)
affinities = affinities[idx]
return edges, affinities
def add_edge(self, user_id: str, atomic_edge: Sequence[np.uint64],
affinity: Optional[np.float32] = None,
root_ids: Optional[Sequence[np.uint64]] = None,
n_tries: int = 20) -> np.uint64:
""" Adds an edge to the chunkedgraph
Multi-user safe through locking of the root node
This function acquires a lock and ensures that it still owns the
lock before executing the write.
:param user_id: str
unique id - do not just make something up, use the same id for the
same user every time
:param atomic_edge: list of two uint64s
:param affinity: float or None
will eventually be set to 1 if None
:param root_ids: list of uint64s
avoids reading the root ids again if already computed
:param n_tries: int
:return: uint64
if successful the new root id is send
else None
"""
# Sanity Checks
if atomic_edge[0] == atomic_edge[1]:
return None
if self.get_chunk_layer(atomic_edge[0]) != \
self.get_chunk_layer(atomic_edge[1]):
return None
# Lookup root ids
if root_ids is None:
root_ids = [self.get_root(atomic_edge[0]),
self.get_root(atomic_edge[1])]
# Get a unique id for this operation
operation_id = self.get_unique_operation_id()
i_try = 0
lock_root_ids = np.unique(root_ids)
while i_try < n_tries:
# Try to acquire lock and only continue if successful
lock_acquired, lock_root_ids = \
self.lock_root_loop(root_ids=lock_root_ids,
operation_id=operation_id)
if lock_acquired:
# Add edge and change hierarchy
new_root_id, rows, time_stamp = \
self._add_edge(operation_id=operation_id,
atomic_edge=atomic_edge, affinity=affinity)
# Add a row to the log
rows.append(self._create_merge_log_row(operation_id, user_id,
[new_root_id],
atomic_edge, time_stamp))
# Execute write (makes sure that we are still owning the lock)
if self.bulk_write(rows, lock_root_ids,
operation_id=operation_id, slow_retry=False):
return new_root_id
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id, operation_id)
i_try += 1
print("Waiting - %d" % i_try)
time.sleep(1)
return None
def _add_edge(self, operation_id: np.uint64,
atomic_edge: Sequence[np.uint64],
affinity: Optional[np.float32] = None
) -> Tuple[np.uint64, List[bigtable.row.Row],
datetime.datetime]:
""" Adds an atomic edge to the ChunkedGraph
:param operation_id: uint64
:param atomic_edge: list of two ints
:param affinity: float
:return: int
new root id
"""
time_stamp = datetime.datetime.utcnow()
if affinity is None:
affinity = np.float32(1.0)
rows = []
assert len(atomic_edge) == 2
# Walk up the hierarchy until a parent in the same chunk is found
original_parent_ids = [self.get_all_parents(atomic_edge[0]),
self.get_all_parents(atomic_edge[1])]
original_parent_ids = np.array(original_parent_ids).T
merge_layer = None
for i_layer in range(len(original_parent_ids)):
if self.test_if_nodes_are_in_same_chunk(original_parent_ids[i_layer]):
merge_layer = i_layer
break
if merge_layer is None:
raise Exception("No parents found. Did you set is_cg_id correctly?")
original_root = original_parent_ids[-1]
# Find a new node id and update all children
# circumvented_nodes = current_parent_ids.copy()
# chunk_id = self.get_chunk_id(node_id=original_parent_ids[merge_layer][0])
new_parent_id = self.get_unique_node_id(
self.get_chunk_id(node_id=original_parent_ids[merge_layer][0]))
new_parent_id_b = np.array(new_parent_id).tobytes()
current_node_id = None
for i_layer in range(merge_layer, len(original_parent_ids)):
# If an edge connects two supervoxel that were already conntected
# through another path, we will reach a point where we find the same
# parent twice.
current_parent_ids = np.unique(original_parent_ids[i_layer])
# Collect child ids of all nodes --> childs of new node
if current_node_id is None:
combined_child_ids = np.array([], dtype=np.uint64)
else:
combined_child_ids = np.array([current_node_id],
dtype=np.uint64).flatten()
for prior_parent_id in current_parent_ids:
child_ids = self.get_children(prior_parent_id)
# Exclude parent nodes from old hierarchy path
if i_layer > merge_layer:
child_ids = child_ids[~np.in1d(child_ids,
original_parent_ids)]
combined_child_ids = np.concatenate([combined_child_ids,
child_ids])
# Append new parent entry for all children
for child_id in child_ids:
val_dict = {"parents": new_parent_id_b}
rows.append(self.mutate_row(serialize_uint64(child_id),
self.family_id,
val_dict,
time_stamp=time_stamp))
# Create new parent node
val_dict = {"children": combined_child_ids.tobytes()}
current_node_id = new_parent_id # Store for later
if i_layer < len(original_parent_ids) - 1:
new_parent_id = self.get_unique_node_id(
self.get_chunk_id(
node_id=original_parent_ids[i_layer + 1][0]))
new_parent_id_b = np.array(new_parent_id).tobytes()
val_dict["parents"] = new_parent_id_b
else:
val_dict["former_parents"] = np.array(original_root).tobytes()
val_dict["operation_id"] = serialize_uint64(operation_id)
rows.append(self.mutate_row(serialize_uint64(original_root[0]),
self.family_id,
{"new_parents": new_parent_id_b},
time_stamp=time_stamp))
rows.append(self.mutate_row(serialize_uint64(original_root[1]),
self.family_id,
{"new_parents": new_parent_id_b},
time_stamp=time_stamp))
rows.append(self.mutate_row(serialize_uint64(current_node_id),
self.family_id, val_dict,
time_stamp=time_stamp))
# Read original cross chunk edges
atomic_cross_edges_b = {}
for l in range(i_layer + 2, self.n_layers):
atomic_cross_edges_b[l] = b""
for original_parent_id in original_parent_ids[i_layer]:
this_atomic_cross_edges = \
self.table.read_row(serialize_uint64(original_parent_id))
if self.cross_edge_family_id in this_atomic_cross_edges.cells:
for l in range(i_layer + 2, self.n_layers):
key = serialize_key("atomic_cross_edges_%d" % l)
if key in this_atomic_cross_edges.cells[self.cross_edge_family_id]:
atomic_cross_edges_b[l] += this_atomic_cross_edges.cells[self.cross_edge_family_id][key][0].value
val_dict = {}
for l in range(i_layer + 2, self.n_layers):
if len(atomic_cross_edges_b[l]) > 0:
val_dict["atomic_cross_edges_%d" % l] = atomic_cross_edges_b[l]
if len(val_dict):
rows.append(self.mutate_row(serialize_uint64(current_node_id),
self.cross_edge_family_id, val_dict,
time_stamp=time_stamp))
# Atomic edge
for i_atomic_id in range(2):
val_dict = \
{"atomic_connected_partners":
np.array([atomic_edge[(i_atomic_id + 1) % 2]]).tobytes(),
"atomic_connected_affinities":
np.array([affinity], dtype=np.float32).tobytes(),
"atomic_connected_areas":
np.array([1], dtype=np.uint64).tobytes(),
"atomic_disconnected_partners":
np.array([atomic_edge[(i_atomic_id + 1) % 2]]).tobytes(),
"atomic_disconnected_affinities":
np.array([0], dtype=np.float32).tobytes(),
"atomic_disconnected_areas":
np.array([0], dtype=np.uint64).tobytes()
}
rows.append(self.mutate_row(serialize_uint64(
atomic_edge[i_atomic_id]), self.family_id, val_dict,
time_stamp=time_stamp))
return new_parent_id, rows, time_stamp
def remove_edges(self,
user_id: str,
source_id: np.uint64,
sink_id: np.uint64,
source_coord: Optional[Sequence[int]] = None,
sink_coord: Optional[Sequence[int]] = None,
mincut: bool = True,
bb_offset: Tuple[int, int, int] = (240, 240, 24),
root_ids: Optional[Sequence[np.uint64]] = None,
n_tries: int = 20) -> Sequence[np.uint64]:
""" Removes edges - either directly or after applying a mincut
Multi-user safe through locking of the root node
This function acquires a lock and ensures that it still owns the
lock before executing the write.
:param user_id: str
unique id - do not just make something up, use the same id for the
same user every time
:param source_id: uint64
:param sink_id: uint64
:param source_coord: list of 3 ints
[x, y, z] coordinate of source supervoxel
:param sink_coord: list of 3 ints
[x, y, z] coordinate of sink supervoxel
:param mincut:
:param bb_offset: list of 3 ints
[x, y, z] bounding box padding beyond box spanned by coordinates
:param root_ids: list of uint64s
:param n_tries: int
:return: list of uint64s or None if no split was performed
"""
# Sanity Checks
if source_id == sink_id:
print("source == sink")
return None
if self.get_chunk_layer(source_id) != \
self.get_chunk_layer(sink_id):
print("layer(source) !== layer(sink)")
return None
if mincut:
assert source_coord is not None
assert sink_coord is not None
if root_ids is None:
root_ids = [self.get_root(source_id),
self.get_root(sink_id)]
if root_ids[0] != root_ids[1]:
print("root(source) != root(sink):", root_ids)
return None
# Get a unique id for this operation
operation_id = self.get_unique_operation_id()
i_try = 0
while i_try < n_tries:
# Try to acquire lock and only continue if successful
lock_root_ids = np.unique(root_ids)
lock_acquired, lock_root_ids = \
self.lock_root_loop(root_ids=lock_root_ids,
operation_id=operation_id)
if lock_acquired:
# (run mincut) and remove edges + update hierarchy
if mincut:
success, result = \
self._remove_edges_mincut(operation_id=operation_id,
source_id=source_id,
sink_id=sink_id,
source_coord=source_coord,
sink_coord=sink_coord,
bb_offset=bb_offset)
if success:
new_root_ids, rows, removed_edges, time_stamp = result
else:
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id,
operation_id=operation_id)
return None
else:
success, result = \
self._remove_edges(operation_id=operation_id,
atomic_edges=[(source_id, sink_id)])
if success:
new_root_ids, rows, time_stamp = result
removed_edges = [[source_id, sink_id]]
else:
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id,
operation_id=operation_id)
return None
# Add a row to the log
rows.append(self._create_split_log_row(operation_id, user_id,
new_root_ids,
[source_id, sink_id],
removed_edges,
time_stamp))
# Execute write (makes sure that we are still owning the lock)
if self.bulk_write(rows, lock_root_ids,
operation_id=operation_id, slow_retry=False):
return new_root_ids
for lock_root_id in lock_root_ids:
self.unlock_root(lock_root_id, operation_id=operation_id)
i_try += 1
print("Waiting - %d" % i_try)
time.sleep(1)
return None
def _remove_edges_mincut(self, operation_id: np.uint64, source_id: np.uint64,
sink_id: np.uint64, source_coord: Sequence[int],
sink_coord: Sequence[int],
bb_offset: Tuple[int, int, int] = (120, 120, 12)
) -> Tuple[
bool, # success
Optional[Tuple[
List[np.uint64], # new_roots
List[bigtable.row.Row], # rows
np.ndarray, # removed_edges
datetime.datetime]]]: # timestamp
""" Computes mincut and removes edges accordingly
:param operation_id: uint64
:param source_id: uint64
:param sink_id: uint64
:param source_coord: list of 3 ints
[x, y, z] coordinate of source supervoxel
:param sink_coord: list of 3 ints
[x, y, z] coordinate of sink supervoxel
:param bb_offset: list of 3 ints
[x, y, z] bounding box padding beyond box spanned by coordinates
:return: list of uint64s if successful, or None if no valid split
new root ids
"""
time_start = time.time() # ------------------------------------------
bb_offset = np.array(list(bb_offset))
source_coord = np.array(source_coord)
sink_coord = np.array(sink_coord)
# Decide a reasonable bounding box (NOT guaranteed to be successful!)
coords = np.concatenate([source_coord[:, None],
sink_coord[:, None]], axis=1).T
bounding_box = [np.min(coords, axis=0), np.max(coords, axis=0)]
bounding_box[0] -= bb_offset
bounding_box[1] += bb_offset
root_id_source = self.get_root(source_id)
root_id_sink = self.get_root(source_id)
# Verify that sink and source are from the same root object
if root_id_source != root_id_sink:
print("root(source) != root(sink)")
return False, None
print("Get roots and check: %.3fms" %
((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
root_id = root_id_source
# Get edges between local supervoxels
n_chunks_affected = np.product((np.ceil(bounding_box[1] / self.chunk_size)).astype(np.int) -
(np.floor(bounding_box[0] / self.chunk_size)).astype(np.int))
print("Number of affected chunks: %d" % n_chunks_affected)
print("Bounding box:", bounding_box)
print("Bounding box padding:", bb_offset)
print("Atomic ids: %d - %d" % (source_id, sink_id))
print("Root id:", root_id)
edges, affs = self.get_subgraph(root_id, get_edges=True,
bounding_box=bounding_box,
bb_is_coordinate=True)
print(
"Get edges and affs: %.3fms" % ((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
# Compute mincut
atomic_edges = mincut.mincut(edges, affs, source_id, sink_id)
print("Mincut: %.3fms" % ((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
if len(atomic_edges) == 0:
print("WARNING: Mincut failed. Try again...")
return False, None
# Check if any edge in the cutset is infinite (== between chunks)
# We would prevent such a cut
atomic_edges_flattened_view = atomic_edges.view(dtype='u8,u8')
edges_flattened_view = edges.view(dtype='u8,u8')
cutset_mask = np.in1d(edges_flattened_view, atomic_edges_flattened_view)
if np.any(np.isinf(affs[cutset_mask])):
print("inf in cutset")
return False, None
# Remove edges
success, result = self._remove_edges(operation_id, atomic_edges)
if not success:
print("remove edges failed")
return False, None
new_roots, rows, time_stamp = result
print("Remove edges: %.3fms" % ((time.time() - time_start) * 1000))
time_start = time.time() # ------------------------------------------
return True, (new_roots, rows, atomic_edges, time_stamp)
def _remove_edges(self, operation_id: np.uint64,
atomic_edges: Sequence[Tuple[np.uint64, np.uint64]]
) -> Tuple[bool, # success
Optional[Tuple[
List[np.uint64], # new_roots
List[bigtable.row.Row], # rows
datetime.datetime]]]: # timestamp
""" Removes atomic edges from the ChunkedGraph
:param operation_id: uint64
:param atomic_edges: list of two uint64s
:return: list of uint64s
new root ids
"""
time_stamp = datetime.datetime.utcnow()
# Make sure that we have a list of edges
if isinstance(atomic_edges[0], np.uint64):
atomic_edges = [atomic_edges]
atomic_edge_affinities = np.array([], dtype=np.float32)
for atomic_edge in atomic_edges:
atomic_edge_affinities = np.concatenate([
atomic_edge_affinities,
[self.get_latest_edge_affinity(atomic_edge)]])
if np.any(np.isinf(atomic_edge_affinities)):
return False, None
atomic_edges = np.array(atomic_edges)
u_atomic_ids = np.unique(atomic_edges)
# Get number of layers and the original root
original_parent_ids = self.get_all_parents(atomic_edges[0, 0])
original_root = original_parent_ids[-1]
# Find lowest level chunks that might have changed
chunk_ids = self.get_chunk_ids_from_node_ids(u_atomic_ids)
u_chunk_ids, u_chunk_ids_idx = np.unique(chunk_ids,
return_index=True)
involved_chunk_id_dict = dict(zip(u_chunk_ids,
u_atomic_ids[u_chunk_ids_idx]))
# Note: After removing the atomic edges, we basically need to build the
# ChunkedGraph for these chunks from the ground up.
# involved_chunk_id_dict stores a representative for each chunk that we
# can use to acquire the parent that knows about all atomic nodes in the
# chunk.
rows = []
# Remove atomic edges
# Removing edges nodewise. We cannot remove edges edgewise because that
# would add up multiple changes to each node (row). Unfortunately,
# the batch write (mutate_rows) from BigTable cannot handle multiple
# changes to the same row within a batch write and only executes
# one of them.
for u_atomic_id in np.unique(atomic_edges):
partners = np.concatenate([atomic_edges[atomic_edges[:, 0] ==
u_atomic_id][:, 1],
atomic_edges[atomic_edges[:, 1] ==
u_atomic_id][:, 0]])
val_dict = {"atomic_connected_partners": partners.tobytes(),
"atomic_connected_affinities":
np.zeros(len(partners), dtype=np.float32).tobytes(),
"atomic_disconnected_partners": partners.tobytes(),
"atomic_disconnected_affinities":
atomic_edge_affinities.tobytes(),
}
rows.append(self.mutate_row(serialize_uint64(u_atomic_id),
self.family_id, val_dict,
time_stamp=time_stamp))
# Dictionaries keeping temporary information about the ChunkedGraph
# while updates are not written to BigTable yet
new_layer_parent_dict = {}
cross_edge_dict = {}
old_id_dict = collections.defaultdict(list)
# This view of the to be removed edges helps us to compute the mask
# of the retained edges in each chunk
double_atomic_edges = np.concatenate([atomic_edges,
atomic_edges[:, ::-1]],
axis=0)
double_atomic_edges_view = double_atomic_edges.view(dtype='u8,u8')
double_atomic_edges_view = \
double_atomic_edges_view.reshape(double_atomic_edges.shape[0])
nodes_in_removed_edges = np.unique(atomic_edges)
# For each involved chunk we need to compute connected components
for chunk_id in involved_chunk_id_dict.keys():
# Get the local subgraph
node_id = involved_chunk_id_dict[chunk_id]
old_parent_id = self.get_parent(node_id)
edges, _ = self.get_subgraph_chunk(old_parent_id, make_unique=False)
# These edges still contain the removed edges.
# For consistency reasons we can only write to BigTable one time.
# Hence, we have to evict the to be removed "atomic_edges" from the
# queried edges.
retained_edges_mask =\
~np.in1d(edges.view(dtype='u8,u8').reshape(edges.shape[0]),
double_atomic_edges_view)
edges = edges[retained_edges_mask]
# The cross chunk edges are passed on to the parents to compute
# connected components in higher layers.
cross_edge_mask = self.get_chunk_ids_from_node_ids(
np.ascontiguousarray(edges[:, 1])) != \
self.get_chunk_id(node_id=node_id)
cross_edges = edges[cross_edge_mask]
edges = edges[~cross_edge_mask]
isolated_nodes = list(filter(
lambda x: x not in edges and self.get_chunk_id(x) == chunk_id,
nodes_in_removed_edges))
# Build the local subgraph and compute connected components
G = nx.from_edgelist(edges)
G.add_nodes_from(isolated_nodes)
ccs = nx.connected_components(G)
# For each connected component we create one new parent
for cc in ccs:
cc_node_ids = np.array(list(cc), dtype=np.uint64)
# Get the associated cross edges
cc_cross_edges = cross_edges[np.in1d(cross_edges[:, 0],
cc_node_ids)]
# Get a new parent id
new_parent_id = self.get_unique_node_id(
self.get_chunk_id(node_id=old_parent_id))
new_parent_id_b =
|
np.array(new_parent_id)
|
numpy.array
|
from os.path import join
import pickle
from csbdeep.models import Config, CARE
import numpy as np
import json
from scipy import ndimage
from numba import jit
@jit
def pixel_sharing_bipartite(lab1, lab2):
assert lab1.shape == lab2.shape
psg = np.zeros((lab1.max()+1, lab2.max()+1), dtype=np.int)
for i in range(lab1.size):
psg[lab1.flat[i], lab2.flat[i]] += 1
return psg
def intersection_over_union(psg):
rsum = np.sum(psg, 0, keepdims=True)
csum = np.sum(psg, 1, keepdims=True)
return psg / (rsum + csum - psg)
def matching_iou(psg, fraction=0.5):
iou = intersection_over_union(psg)
matching = iou > 0.5
matching[:,0] = False
matching[0,:] = False
return matching
def precision(lab_gt, lab, iou=0.5, partial_dataset=False):
"""
precision = TP / (TP + FP + FN) i.e. "intersection over union" for a graph matching
"""
psg = pixel_sharing_bipartite(lab_gt, lab)
matching = matching_iou(psg, fraction=iou)
assert matching.sum(0).max() < 2
assert matching.sum(1).max() < 2
n_gt = len(set(np.unique(lab_gt)) - {0})
n_hyp = len(set(np.unique(lab)) - {0})
n_matched = matching.sum()
if partial_dataset:
return n_matched , (n_gt + n_hyp - n_matched)
else:
return n_matched / (n_gt + n_hyp - n_matched)
def normalize(img, mean, std):
zero_mean = img - mean
return zero_mean / std
def denormalize(x, mean, std):
return x * std + mean
with open('experiment.json', 'r') as f:
exp_params = json.load(f)
train_files = np.load(join('..', '..', '..', *exp_params['train_path'].split('/')[4:]))
X_train = train_files['X_train']
mean, std = np.mean(X_train),
|
np.std(X_train)
|
numpy.std
|
# Copyright (c) 2018 <NAME>
import numpy as np
from numba import njit, prange
from skimage import measure
try:
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
FUSION_GPU_MODE = 1
except Exception as err:
print('Warning: {}'.format(err))
print('Failed to import PyCUDA. Running fusion in CPU mode.')
FUSION_GPU_MODE = 0
class TSDFVolume:
"""Volumetric TSDF Fusion of RGB-D Images.
"""
def __init__(self, vol_bnds, voxel_size, use_gpu=True):
"""Constructor.
Args:
vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the
xyz bounds (min/max) in meters.
voxel_size (float): The volume discretization in meters.
"""
vol_bnds = np.asarray(vol_bnds)
assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)."
# Define voxel volume parameters
self._vol_bnds = vol_bnds
self._voxel_size = float(voxel_size)
self._trunc_margin = 5 * self._voxel_size # truncation on SDF
self._color_const = 256 * 256
# Adjust volume bounds and ensure C-order contiguous
self._vol_dim = np.ceil((self._vol_bnds[:,1]-self._vol_bnds[:,0])/self._voxel_size).copy(order='C').astype(int)
self._vol_bnds[:,1] = self._vol_bnds[:,0]+self._vol_dim*self._voxel_size
self._vol_origin = self._vol_bnds[:,0].copy(order='C').astype(np.float32)
print("Voxel volume size: {} x {} x {} - # points: {:,}".format(
self._vol_dim[0], self._vol_dim[1], self._vol_dim[2],
self._vol_dim[0]*self._vol_dim[1]*self._vol_dim[2])
)
# Initialize pointers to voxel volume in CPU memory
self._tsdf_vol_cpu = np.ones(self._vol_dim).astype(np.float32)
# for computing the cumulative moving average of observations per voxel
self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
self.gpu_mode = use_gpu and FUSION_GPU_MODE
# Copy voxel volumes to GPU
if self.gpu_mode:
self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)
cuda.memcpy_htod(self._tsdf_vol_gpu,self._tsdf_vol_cpu)
self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)
cuda.memcpy_htod(self._weight_vol_gpu,self._weight_vol_cpu)
self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)
cuda.memcpy_htod(self._color_vol_gpu,self._color_vol_cpu)
# Cuda kernel function (C++)
self._cuda_src_mod = SourceModule("""
__global__ void integrate(float * tsdf_vol,
float * weight_vol,
float * color_vol,
float * vol_dim,
float * vol_origin,
float * cam_intr,
float * cam_pose,
float * other_params,
float * color_im,
float * depth_im) {
// Get voxel index
int gpu_loop_idx = (int) other_params[0];
int max_threads_per_block = blockDim.x;
int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x;
int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x;
int vol_dim_x = (int) vol_dim[0];
int vol_dim_y = (int) vol_dim[1];
int vol_dim_z = (int) vol_dim[2];
if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z)
return;
// Get voxel grid coordinates (note: be careful when casting)
float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z)));
float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z));
float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z);
// Voxel grid coordinates to world coordinates
float voxel_size = other_params[1];
float pt_x = vol_origin[0]+voxel_x*voxel_size;
float pt_y = vol_origin[1]+voxel_y*voxel_size;
float pt_z = vol_origin[2]+voxel_z*voxel_size;
// World coordinates to camera coordinates
float tmp_pt_x = pt_x-cam_pose[0*4+3];
float tmp_pt_y = pt_y-cam_pose[1*4+3];
float tmp_pt_z = pt_z-cam_pose[2*4+3];
float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z;
float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z;
float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z;
// Camera coordinates to image pixels
int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]);
int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]);
// Skip if outside view frustum
int im_h = (int) other_params[2];
int im_w = (int) other_params[3];
if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0)
return;
// Skip invalid depth
float depth_value = depth_im[pixel_y*im_w+pixel_x];
if (depth_value == 0)
return;
// Integrate TSDF
float trunc_margin = other_params[4];
float depth_diff = depth_value-cam_pt_z;
if (depth_diff < -trunc_margin)
return;
float dist = fmin(1.0f,depth_diff/trunc_margin);
float w_old = weight_vol[voxel_idx];
float obs_weight = other_params[5];
float w_new = w_old + obs_weight;
weight_vol[voxel_idx] = w_new;
tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new;
// Integrate color
float old_color = color_vol[voxel_idx];
float old_b = floorf(old_color/(256*256));
float old_g = floorf((old_color-old_b*256*256)/256);
float old_r = old_color-old_b*256*256-old_g*256;
float new_color = color_im[pixel_y*im_w+pixel_x];
float new_b = floorf(new_color/(256*256));
float new_g = floorf((new_color-new_b*256*256)/256);
float new_r = new_color-new_b*256*256-new_g*256;
new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f);
new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f);
new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f);
color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r;
}""")
self._cuda_integrate = self._cuda_src_mod.get_function("integrate")
# Determine block/grid size on GPU
gpu_dev = cuda.Device(0)
self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK
n_blocks = int(np.ceil(float(np.prod(self._vol_dim))/float(self._max_gpu_threads_per_block)))
grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X,int(np.floor(np.cbrt(n_blocks))))
grid_dim_y = min(gpu_dev.MAX_GRID_DIM_Y,int(np.floor(np.sqrt(n_blocks/grid_dim_x))))
grid_dim_z = min(gpu_dev.MAX_GRID_DIM_Z,int(np.ceil(float(n_blocks)/float(grid_dim_x*grid_dim_y))))
self._max_gpu_grid_dim = np.array([grid_dim_x,grid_dim_y,grid_dim_z]).astype(int)
self._n_gpu_loops = int(np.ceil(float(
|
np.prod(self._vol_dim)
|
numpy.prod
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for baseline_mechanisms."""
from absl.testing import absltest
import numpy as np
from dp_topk import baseline_mechanisms
from dp_topk.differential_privacy import NeighborType
class BaselineMechanismsTest(absltest.TestCase):
def test_sorted_top_k_returns_correct_when_input_is_ascending(self):
counts = np.arange(10)
k = 3
expected_items = np.array([9, 8, 7])
items = baseline_mechanisms.sorted_top_k(counts, k)
self.assertSequenceAlmostEqual(items, expected_items)
def test_sorted_top_k_returns_correct_when_input_is_descending(self):
counts = np.arange(10)[::-1]
k = 3
expected_items = np.arange(k)
items = baseline_mechanisms.sorted_top_k(counts, k)
self.assertSequenceAlmostEqual(items, expected_items)
def test_laplace_no_noise_add_remove(self):
counts = np.arange(2)
k = 1
c = 10
expected_items = [1]
items = baseline_mechanisms.laplace_mechanism(
counts, k, c, epsilon=1e6, neighbor_type=NeighborType.ADD_REMOVE)
self.assertSequenceAlmostEqual(items, expected_items)
def test_laplace_no_noise_swap(self):
counts = np.arange(2)
k = 1
c = 10
expected_items = [1]
items = baseline_mechanisms.laplace_mechanism(
counts, k, c, epsilon=1e6, neighbor_type=NeighborType.SWAP)
self.assertSequenceAlmostEqual(items, expected_items)
def test_laplace_with_noise_add_remove(self):
k = 1
epsilon = 1.1
trials = 100000
z1 = 1 + np.random.laplace(0, k / epsilon, size=trials)
z2 = np.random.laplace(0, k / epsilon, size=trials)
expected_correctness = sum(z1 > z2) / trials
counts = np.array([1, 0])
correct = 0
for _ in range(trials):
items = baseline_mechanisms.laplace_mechanism(
item_counts=counts,
k=k,
c=2,
epsilon=epsilon,
neighbor_type=NeighborType.ADD_REMOVE)
if items[0] == 0:
correct += 1
real_correctness = correct / trials
self.assertAlmostEqual(expected_correctness, real_correctness, places=2)
def test_laplace_with_noise_swap(self):
k = 1
epsilon = 1.1
trials = 100000
z1 = 1 + np.random.laplace(0, 2 * k / epsilon, size=trials)
z2 = np.random.laplace(0, 2 * k / epsilon, size=trials)
expected_correctness = sum(z1 > z2) / trials
counts = np.array([1, 0])
correct = 0
for _ in range(trials):
items = baseline_mechanisms.laplace_mechanism(
item_counts=counts,
k=k,
c=2,
epsilon=epsilon,
neighbor_type=NeighborType.SWAP)
if items[0] == 0:
correct += 1
real_correctness = correct / trials
self.assertAlmostEqual(expected_correctness, real_correctness, places=2)
def test_em_epsilon_cdp_delta_zero(self):
k = 10
epsilon = 1.1
delta = 0
local_epsilon = baseline_mechanisms.em_epsilon_cdp(epsilon, delta, k)
self.assertAlmostEqual(epsilon / k, local_epsilon, places=4)
def test_em_epsilon_cdp_k_one(self):
k = 1
epsilon = 1.1
delta = 0.1
local_epsilon = baseline_mechanisms.em_epsilon_cdp(epsilon, delta, k)
self.assertAlmostEqual(epsilon, local_epsilon, places=4)
def test_em_epsilon_cdp_k_ten(self):
k = 10
epsilon = 1.1
delta = 0.1
local_epsilon = baseline_mechanisms.em_epsilon_cdp(epsilon, delta, k)
self.assertAlmostEqual(0.29264, local_epsilon, places=4)
def test_cdp_peeling_no_noise(self):
counts = np.arange(2)
k = 1
expected_items = [1]
items = baseline_mechanisms.cdp_peeling_mechanism(
counts, k, epsilon=1e6, delta=0.1)
self.assertEqual(items, expected_items)
def test_cdp_peeling_with_noise(self):
k = 1
epsilon = 1.1
trials = 100000
counts = np.array([1, 0])
probs = np.exp(epsilon * counts)
probs = probs / sum(probs)
expected_correctness = probs[0]
correct = 0
for _ in range(trials):
items = baseline_mechanisms.cdp_peeling_mechanism(
item_counts=counts, k=k, epsilon=epsilon, delta=0.1)
if items[0] == 0:
correct += 1
real_correctness = correct / trials
self.assertAlmostEqual(expected_correctness, real_correctness, places=2)
def test_pnf_peeling_no_noise(self):
counts =
|
np.arange(2)
|
numpy.arange
|
from cemc.mcmc import NetworkObserver
import h5py as h5
import numpy as np
from ase.visualize import view
from scipy.stats import linregress
import os
from ase.units import kB
class Mode(object):
bring_system_into_window = 0
sample_in_window = 1
equillibriate = 2
transition_path_sampling = 3
class NucleationSampler(object):
"""
Class that do the book-keeping needed for free energy calculations of
nucleation
:Keyword arguments:
* *size_window_width* Size range in each window
* *max_cluster_size* Maximmum cluster size
* *merge_strategy* How to perform the actual merging (Recommended to
use the default)
* *max_one_cluster* Ensure that there is only *one* cluster present in
the system. For larger cluster sizes this should not matter.
"""
def __init__(self, **kwargs):
self.size_window_width = kwargs.pop("size_window_width")
self.max_cluster_size = kwargs.pop("max_cluster_size")
self.merge_strategy = "normalize_overlap"
self.max_one_cluster = False
self.allow_solutes = True
if "merge_strategy" in kwargs.keys():
self.merge_strategy = kwargs.pop("merge_strategy")
if "max_one_cluster" in kwargs.keys():
self.max_one_cluster = kwargs.pop("max_one_cluster")
allowed_merge_strategies = ["normalize_overlap", "fit"]
if self.merge_strategy not in allowed_merge_strategies:
msg = "Merge strategy has to be one of {}".format(
allowed_merge_strategies)
raise ValueError(msg)
chem_pot = kwargs.pop("chemical_potential")
self.n_bins = self.size_window_width
self.n_windows = int(self.max_cluster_size / self.size_window_width)
self.histograms = []
self.singlets = []
n_singlets = len(chem_pot.keys())
for i in range(self.n_windows):
if i == 0:
self.histograms.append(np.ones(self.n_bins))
self.singlets.append(np.zeros((self.n_bins, n_singlets)))
else:
self.histograms.append(np.ones(self.n_bins + 1))
self.singlets.append(np.zeros((self.n_bins + 1, n_singlets)))
self.current_window = 0
self.mode = Mode.bring_system_into_window
self.current_cluster_size = 0
def _get_window_boundaries(self, num):
"""
Return the upper and lower boundary of the windows
:param num: Window index
"""
if num == 0:
lower = 0
else:
lower = (num * self.size_window_width) - 1
if num == self.n_windows - 1:
upper = self.max_cluster_size
else:
upper = (num + 1) * self.size_window_width
return int(lower), int(upper)
def is_in_window(self, network, retstat=False):
"""
Check if the current network state belongs to the current window
:param network: Instance of :py:class:`cemc.mcmc.NetworkObserver`
:param retstat: If true it will also return the network statistics
"""
network.reset()
network(None) # Explicitly call the network observer
stat = network.get_statistics()
lower, upper = self._get_window_boundaries(self.current_window)
max_size_ok = stat["max_size"] >= lower and stat["max_size"] < upper
n_clusters_ok = True
if self.max_one_cluster:
n_clusters = stat["number_of_clusters"]
n_clusters_ok = (n_clusters == 1)
if self.mode == Mode.transition_path_sampling:
max_size_ok = True # Do not restrict the window size in this case
n_clusters_ok = True
network.reset()
if retstat:
return max_size_ok and n_clusters_ok, stat
return max_size_ok and n_clusters_ok
def bring_system_into_window(self, network):
"""
Brings the system into the current window
:param network: Instance of :py:class:`cemc.mcmc.NetworkObserver`
"""
lower, upper = self._get_window_boundaries(self.current_window)
size = int(0.5 * (lower + upper) + 1)
# TODO: Network observers no longer has a grow_cluster function
# should be handle in python
network.grow_cluster(size)
network(None)
stat = network.get_statistics()
network.reset()
if stat["max_size"] != size:
msg = "Size of created cluster does not match the one requested!\n"
msg += "Size of created: {}. ".format(stat["max_size"])
msg += "Requested: {}".format(size)
raise RuntimeError(msg)
if stat["number_of_clusters"] != 1:
msg = "More than one cluster exists!\n"
msg += "Supposed to create 1 cluster, "
msg += "created {}".format(stat["number_of_clusters"])
raise RuntimeError(msg)
self.current_cluster_size = stat["max_size"]
def _get_indx(self, size):
"""
Get the corresponding bin
:param size: The size of which its corresponding bin number should be
computed
"""
lower, upper = self._get_window_boundaries(self.current_window)
# indx = int( (size-lower)/float(upper-lower) )
indx = int(size - lower)
return indx
def update_histogram(self, mc_obj):
"""
Update the histogram
:param mc_obj: Instance of the sampler
(typically `cemc.mcmc.SGCNucleation`)
"""
stat = mc_obj.network.get_statistics()
indx = self._get_indx(stat["max_size"])
if indx < 0:
lower, upper = self._get_window_boundaries(self.current_window)
msg = "Given size: {}. ".format(stat["max_size"])
msg += "Boundaries: [{},{})".format(lower, upper)
raise ValueError(msg)
self.histograms[self.current_window][indx] += 1
if mc_obj.name == "SGCMonteCarlo":
new_singlets = np.zeros_like(mc_obj.averager.singlets)
new_singlets = mc_obj.atoms._calc.get_singlets()
self.singlets[self.current_window][indx, :] += new_singlets
def helmholtz_free_energy(self, singlets, hist):
"""
Compute the Helmholtz Free Energy barrier
:param singlets: Thermal average singlet terms
:param hist: Histogram of visits
"""
# N = len(self.atoms)
# TODO: Fix this
N = 1000
beta_gibbs = -
|
np.log(hist)
|
numpy.log
|
# pylint:disable=no-name-in-module, import-error
import aiofiles
import time
from fastapi import APIRouter, File, UploadFile, Response, status, Depends
import app.api.utils_com as utils_com
from app.api import classes
from app import crud, fileserver_requests
from app.api.dependencies import get_db
from app.api import napari_viewer
from fastapi.responses import JSONResponse
from app.api import utils_import, utils_paths, utils_export
import numpy as np
import zarr
import pathlib
from sqlalchemy.orm import Session
import warnings
import asyncio
from app.api.com.api_request_models import (ViewImageRequest, DeleteRequest, UpdateHintRequest, UpdateChannelNamesRequest, UpdateNameRequest,
ReadFromPathRequest)
router = APIRouter()
# GET
@router.get("/api/images/fetch_all", status_code=200)
async def fetch_all_images(sess: Session = Depends(get_db)):
"""
API request to return a list of all images
"""
image_list = utils_com.get_com_image_list(sess)
return image_list
@router.get("/api/images/fetch_by_id/{image_uid}", status_code=200)
async def fetch_image_by_id(
image_uid: str
):
"""
API request to return a single image by uid
"""
image_uid = int(image_uid)
image = utils_com.get_com_image_by_uid(image_uid)
return image
@router.get("/api/images/fetch_thumbnail_path/{image_uid}", status_code=200)
async def fetch_thumbnail_path(image_uid: str):
'''
API request to return the path of an images thumbnail
'''
path = utils_paths.make_thumbnail_path(int(image_uid))
path = "http://" + utils_paths.static_fileserver + path.as_posix()
return {"path": path}
# return utils_paths.fileserver.joinpath()
@router.get("/api/images/export_mistos_image/{image_uid}", status_code=201)
async def export_image(image_uid: str):
'''
API request to export an image to the export folder
'''
path = utils_export.export_mistos_image(int(image_uid))
return {"path": path}
# POST
@router.post("/api/images/view_by_id", status_code=200)
async def view_image_by_id(post: ViewImageRequest):
'''
API expects a json of format {
"image_id": int,
"display_result_layers": bool,
"display_background_layers":bool}.
It reads the image by id from the database and opens it with the napari viewer.
'''
c_int_image = crud.read_image_by_uid(post.image_id)
napari_viewer.view(
c_int_image,
post.display_result_layers,
post.display_background_layers
)
return JSONResponse(content={
"imageId": c_int_image.uid,
"imageClosed": True
})
@router.post("/api/images/upload", status_code=201)
async def upload_image(file: UploadFile = File(...)):
'''
API Request to upload an image.
'''
path = utils_paths.make_tmp_file_path(file.filename)
path = utils_paths.fileserver.joinpath(path).as_posix()
async with aiofiles.open(path, 'wb') as out_file:
while content := await file.read(1024): # async read chunk
await out_file.write(content) # async write chunk
image_list, metadata_dict, metadata_OMEXML = utils_import.read_image_file(
path)
metadata_dict["original_filename"] = metadata_dict["original_filename"][2:]
print(metadata_dict)
for image, i in image_list:
img_zarr = zarr.creation.array(image)
int_image = classes.IntImage(
uid=-1,
series_index=i,
name=metadata_dict["original_filename"], # .replace("\#", "_"),
metadata=metadata_dict, # This is not the finished metadata!
data=img_zarr,
metadata_omexml=metadata_OMEXML
)
int_image.on_init()
fileserver_requests.delete_file(path)
return {"Result": "OK"}
@router.post("/api/images/upload_to_group_{group_id}", status_code=201)
async def upload_images_to_group(group_id: str, file: UploadFile = File(...)):
'''
API Request to upload an image.
'''
path = utils_paths.make_tmp_file_path(file.filename)
path = utils_paths.fileserver.joinpath(path).as_posix()
async with aiofiles.open(path, 'wb') as out_file:
while content := await file.read(1024): # async read chunk
await out_file.write(content) # async write chunk
# open experiment group
group_id = int(group_id)
db_experiment_group = crud.read_experiment_db_group_by_uid(group_id)
image_ids = [image.uid for image in db_experiment_group.images]
image_list, metadata_dict, metadata_OMEXML = utils_import.read_image_file(
path)
metadata_dict["original_filename"] = metadata_dict["original_filename"][2:]
for image, i in image_list:
img_zarr = zarr.creation.array(image)
int_image = classes.IntImage(
uid=-1,
series_index=i,
name=metadata_dict["original_filename"], # .replace("\#", "_"),
metadata=metadata_dict, # This is not the finished metadata!
data=img_zarr,
metadata_omexml=metadata_OMEXML
)
int_image.on_init()
image_ids.append(int_image.uid)
db_experiment_group.update_images(image_ids)
fileserver_requests.delete_file(path)
return {"Result": "OK"}
@router.post("/api/images/upload_max_z_projection", status_code=201)
async def upload_image_max_z_projection(file: UploadFile = File(...)):
'''
API Request to upload an image and save its max z projection.
'''
path = utils_paths.make_tmp_file_path(file.filename)
path = utils_paths.fileserver.joinpath(path).as_posix()
async with aiofiles.open(path, 'wb') as out_file:
while content := await file.read(1024): # async read chunk
await out_file.write(content) # async write chunk
image_list, metadata_dict, metadata_OMEXML = utils_import.read_image_file(
path)
for image, i in image_list:
img_zarr = zarr.creation.array(image)
img_zarr = np.array(img_zarr).max(axis=0)
img_zarr = img_zarr[np.newaxis, ...]
int_image = classes.IntImage(
uid=-1,
series_index=i,
name=metadata_dict["original_filename"],
metadata=metadata_dict, # This is not the finished metadata!
data=img_zarr,
metadata_omexml=metadata_OMEXML
)
int_image.on_init()
fileserver_requests.delete_file(path)
return {"Result": "OK"}
# img = file.file
@router.post("/api/images/read_from_path", status_code=201)
async def read_from_path(read_image_from_path_request: ReadFromPathRequest, response: Response):
'''
API Request to import an image from a filepath
'''
path = pathlib.Path(read_image_from_path_request.path)
print(path)
if path.exists():
path = path.as_posix()
image_list, metadata_dict, metadata_OMEXML = utils_import.read_image_file(
path)
for image, i in image_list:
img_zarr = zarr.creation.array(image)
int_image = classes.IntImage(
uid=-1,
series_index=i,
name=metadata_dict["original_filename"],
metadata=metadata_dict, # This is not the finished metadata!
data=img_zarr,
metadata_omexml=metadata_OMEXML
)
int_image.on_init()
return {"Result": "OK"}
else:
response.status_code = status.HTTP_404_NOT_FOUND
return {"Result": "File not found"}
@router.post("/api/images/read_from_path_max_z_projection", status_code=201)
async def read_from_path_max_z_projection(read_image_from_path_request: ReadFromPathRequest, response: Response):
'''
API Request to import an image as max-z-projection from a filepath
'''
path = pathlib.Path(read_image_from_path_request.path)
if path.exists():
path = path.as_posix()
image_list, metadata_dict, metadata_OMEXML = utils_import.read_image_file(
path)
for image, i in image_list:
img_zarr = zarr.creation.array(image)
img_zarr =
|
np.array(img_zarr)
|
numpy.array
|
import numpy as np
from numpy.matlib import repmat
import cv2
from scipy.ndimage import map_coordinates
from lib.utils import cos_window,gaussian2d_rolled_labels
from lib.fft_tools import fft2,ifft2
from cftracker.base import BaseCF
from cftracker.feature import extract_hog_feature,extract_cn_feature,extract_cn_feature_byw2c
from skimage.feature.peak import peak_local_max
from lib.utils import APCE
def mod_one(a, b):
y = np.mod(a - 1, b) + 1
return y
def cf_confidence(response_cf):
peak_loc_indices = peak_local_max(response_cf, min_distance=1,indices=True)
max_peak_val=0
secondmax_peak_val=0
max_peak_val_indice=[0,0]
secondmax_peak_val_indice=[0,0]
for indice in peak_loc_indices:
if response_cf[indice]>max_peak_val:
max_peak_val_indice=indice
max_peak_val=response_cf[indice]
elif response_cf[indice]>secondmax_peak_val:
secondmax_peak_val_indice=indice
secondmax_peak_val=response_cf[indice]
pass
def confidence_cf_apce(response_cf):
apce=APCE(response_cf)
conf=np.clip(apce/50,a_min=0,a_max=1)
return conf
# max val at the bottom right loc
def gaussian2d_rolled_labels_staple(sz, sigma):
halfx, halfy = int(np.floor((sz[0] - 1) / 2)), int(np.floor((sz[1] - 1) / 2))
x_range = np.arange(-halfx, halfx + 1)
y_range = np.arange(-halfy, halfy + 1)
i, j = np.meshgrid(y_range, x_range)
i_mod_range = mod_one(i, sz[1])
j_mod_range = mod_one(j, sz[0])
labels = np.zeros((sz[1], sz[0]))
labels[i_mod_range - 1, j_mod_range - 1] = np.exp(-(i ** 2 + j ** 2) / (2 * sigma ** 2))
return labels
def crop_filter_response(response_cf, response_sz):
h, w = response_cf.shape[:2]
half_width = int(np.floor(response_sz[0] / 2))
half_height = int(np.floor(response_sz[1] / 2))
range_i, range_j = np.arange(-half_height, half_height + 1), np.arange(-half_width, half_width + 1)
i, j = np.meshgrid(mod_one(range_i, h), mod_one(range_j, w))
new_responses = response_cf[i - 1, j - 1]
return new_responses.T
# pad [h,w] format
def pad(img,pad):
h,w=img.shape[:2]
delta=(int((pad[0]-h)/2),int((pad[1]-w)/2))
c=img.shape[2]
r=np.zeros((pad[0],pad[1],c))
idy=[delta[0],delta[0]+h]
idx=[delta[1],delta[1]+w]
r[idy[0]:idy[1], idx[0]:idx[1], :] = img
return r
def parameters_to_projective_matrix(p):
"""
:param p: [s,rot,x,y]
:return:
"""
s,rot,x,y=p
R=np.array([[np.cos(rot),-np.sin(rot)],
[np.sin(rot),np.cos(rot)]])
T=np.diag([1.,1.,1.])
T[:2,:2]=s*R
T[0,2]=x
T[1,2]=y
return T
def getLKcorner(warp_p,sz):
template_nx,template_ny=sz
nx=(sz[0]-1)/2
ny=(sz[1]-1)/2
tmplt_pts=np.array([[-nx,-ny],
[-nx,template_ny-ny],
[template_nx-nx,template_ny-ny],
[template_nx-nx,-ny]]).T
if warp_p.shape[0]==2:
M=np.concatenate((warp_p,np.array([0,0,1])),axis=0)
M[0,0]=M[0,0]+1
M[1,1]=M[1,1]+1
else:
M=warp_p
warp_pts=M.dot(np.concatenate((tmplt_pts,np.ones((1,4))),axis=0))
c=np.array([[(1+template_nx)/2],[(1+template_ny)/2],[1]])
warp_pts=warp_pts[:2,:]
return warp_pts
def PSR(response,rate):
max_response=np.max(response)
h,w=response.shape
k=4/(h*w)
yy,xx=np.unravel_index(np.argmax(response, axis=None),response.shape)
idx=np.arange(w)-xx
idy=np.arange(h)-yy
idx=repmat(idx,h,1)
idy=repmat(idy,w,1).T
t=idx**2+idy**2
delta=1-np.exp(-k*t.astype(np.float32))
r=(max_response-response)/delta
r[np.isnan(r)]=np.inf
return np.min(r)
def get_center_likelihood(likelihood_map, sz):
h,w=likelihood_map.shape[:2]
n1= h - sz[1] + 1
n2= w - sz[0] + 1
sat=cv2.integral(likelihood_map)
i,j=np.arange(n1),
|
np.arange(n2)
|
numpy.arange
|
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
import scipy.io as sio
import os
from numpy.linalg import inv
import torch
import cv2
import argparse
import IPython
import platform
PYTHON2 = True
if platform.python_version().startswith("3"):
PYTHON2 = False
# if PYTHON2:
from . import _init_paths
import PyKDL
from .kdl_parser import kdl_tree_from_urdf_model
from .urdf_parser_py.urdf import URDF
np.random.seed(233)
def rotX(rotx):
RotX = np.array(
[
[1, 0, 0, 0],
[0, np.cos(rotx), -
|
np.sin(rotx)
|
numpy.sin
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
import time
import ctypes
import tempfile
import numpy
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.cc import ccsd
from pyscf.cc import _ccsd
#
# JCP, 95, 2623
# JCP, 95, 2639
#
def gamma1_intermediates(mycc, t1, t2, l1, l2):
nocc, nvir = t1.shape
doo =-numpy.einsum('ja,ia->ij', l1, t1)
dvv = numpy.einsum('ia,ib->ab', l1, t1)
dvo = l1.T
xtv = numpy.einsum('ie,me->im', t1, l1)
dov = t1 - numpy.einsum('im,ma->ia', xtv, t1)
#:doo -= numpy.einsum('jkab,ikab->ij', l2, theta)
#:dvv += numpy.einsum('jica,jicb->ab', l2, theta)
#:xt1 = numpy.einsum('mnef,inef->mi', l2, make_theta(t2))
#:xt2 = numpy.einsum('mnaf,mnef->ea', l2, make_theta(t2))
#:dov += numpy.einsum('imae,me->ia', make_theta(t2), l1)
#:dov -= numpy.einsum('ma,ie,me->ia', t1, t1, l1)
#:dov -= numpy.einsum('mi,ma->ia', xt1, t1)
#:dov -= numpy.einsum('ie,ae->ia', t1, xt2)
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc*nvir**2
blksize = max(ccsd.BLKMIN, int(max_memory*.95e6/8/unit))
for p0, p1 in prange(0, nocc, blksize):
theta = make_theta(t2[p0:p1])
doo[p0:p1] -= lib.dot(theta.reshape(p1-p0,-1), l2.reshape(nocc,-1).T)
dov[p0:p1] += numpy.einsum('imae,me->ia', theta, l1)
xt1 = lib.dot(l2.reshape(nocc,-1), theta.reshape(p1-p0,-1).T)
dov[p0:p1] -= numpy.einsum('mi,ma->ia', xt1, t1)
xt2 = lib.dot(theta.reshape(-1,nvir).T, l2[p0:p1].reshape(-1,nvir))
dov -= numpy.einsum('ie,ae->ia', t1, xt2)
dvv += lib.dot(l2[p0:p1].reshape(-1,nvir).T, theta.reshape(-1,nvir))
return doo, dov, dvo, dvv
# gamma2 intermediates in Chemist's notation
def gamma2_intermediates(mycc, t1, t2, l1, l2):
tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
with h5py.File(tmpfile.name, 'w') as f:
gamma2_outcore(mycc, t1, t2, l1, l2, f)
nocc, nvir = f['dovov'].shape[:2]
nov = nocc * nvir
dovvv = numpy.empty((nocc,nvir,nvir,nvir))
ao2mo.outcore._load_from_h5g(f['dovvv'], 0, nov, dovvv.reshape(nov,-1))
dvvov = None
d2 = (f['dovov'].value, f['dvvvv'].value, f['doooo'].value,
f['doovv'].value, f['dovvo'].value, dvvov, dovvv,
f['dooov'].value)
for key in f.keys():
del(f[key])
return d2
def gamma2_outcore(mycc, t1, t2, l1, l2, h5fobj):
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
nov = nocc * nvir
nvir_pair = nvir * (nvir+1) //2
dovov = h5fobj.create_dataset('dovov', (nocc,nvir,nocc,nvir), 'f8')
dvvvv = h5fobj.create_dataset('dvvvv', (nvir_pair,nvir_pair), 'f8')
doooo = h5fobj.create_dataset('doooo', (nocc,nocc,nocc,nocc), 'f8')
doovv = h5fobj.create_dataset('doovv', (nocc,nocc,nvir,nvir), 'f8')
dovvo = h5fobj.create_dataset('dovvo', (nocc,nvir,nvir,nocc), 'f8')
dooov = h5fobj.create_dataset('dooov', (nocc,nocc,nocc,nvir), 'f8')
_tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fswap = h5py.File(_tmpfile.name)
mOvOv = fswap.create_dataset('mOvOv', (nocc,nvir,nocc,nvir), 'f8')
mOVov = fswap.create_dataset('mOVov', (nocc,nvir,nocc,nvir), 'f8')
moo = numpy.empty((nocc,nocc))
mvv = numpy.zeros((nvir,nvir))
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc*nvir**2 * 5
blksize = max(ccsd.BLKMIN, int(max_memory*.95e6/8/unit))
log.debug1('rdm intermediates pass 1: block size = %d, nocc = %d in %d blocks',
blksize, nocc, int((nocc+blksize-1)/blksize))
time1 = time.clock(), time.time()
for istep, (p0, p1) in enumerate(prange(0, nocc, blksize)):
#:theta = make_theta(t2[p0:p1])
#:pOvOv = numpy.einsum('ikca,jkcb->jbia', l2, t2[p0:p1])
#:pOVov = -numpy.einsum('ikca,jkbc->jbia', l2, t2[p0:p1])
#:pOVov += numpy.einsum('ikac,jkbc->jbia', l2, theta)
pOvOv = numpy.empty((nocc,p1-p0,nvir,nvir))
pOVov = numpy.empty((nocc,p1-p0,nvir,nvir))
t2a = numpy.empty((p1-p0,nvir,nocc,nvir))
t2b = numpy.empty((p1-p0,nvir,nocc,nvir))
theta = make_theta(t2[p0:p1])
tmp = numpy.empty_like(t2a)
for i in range(p1-p0):
t2a[i] = t2[p0+i].transpose(2,0,1)
t2b[i] = t2[p0+i].transpose(1,0,2)
tmp[i] = theta[i].transpose(1,0,2)
t2a = t2a.reshape(-1,nov)
t2b = t2b.reshape(-1,nov)
theta, tmp = tmp.reshape(-1,nov), None
for i in range(nocc):
pOvOv[i] = lib.dot(t2a, l2[i].reshape(nov,-1)).reshape(-1,nvir,nvir)
pOVov[i] = lib.dot(t2b, l2[i].reshape(nov,-1), -1).reshape(-1,nvir,nvir)
pOVov[i] += lib.dot(theta, _cp(l2[i].transpose(0,2,1).reshape(nov,-1))).reshape(-1,nvir,nvir)
theta = t2a = t2b = None
mOvOv[p0:p1] = pOvOv.transpose(1,2,0,3)
mOVov[p0:p1] = pOVov.transpose(1,2,0,3)
fswap['mvOvO/%d'%istep] = pOvOv.transpose(3,1,2,0)
fswap['mvOVo/%d'%istep] = pOVov.transpose(3,1,2,0)
moo[p0:p1] =(numpy.einsum('ljdd->jl', pOvOv) * 2
+ numpy.einsum('ljdd->jl', pOVov))
mvv +=(numpy.einsum('llbd->bd', pOvOv[p0:p1]) * 2
+ numpy.einsum('llbd->bd', pOVov[p0:p1]))
pOvOv = pOVov = None
time1 = log.timer_debug1('rdm intermediates pass1 [%d:%d]'%(p0, p1), *time1)
mia =(numpy.einsum('kc,ikac->ia', l1, t2) * 2
- numpy.einsum('kc,ikca->ia', l1, t2))
mab = numpy.einsum('kc,kb->cb', l1, t1)
mij = numpy.einsum('kc,jc->jk', l1, t1) + moo*.5
gooov = numpy.einsum('ji,ka->jkia', moo*-.5, t1)
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc**3 + nocc**2*nvir + nocc*nvir**2*6
blksize = max(ccsd.BLKMIN, int(max_memory*.95e6/8/unit))
log.debug1('rdm intermediates pass 2: block size = %d, nocc = %d in %d blocks',
blksize, nocc, int((nocc+blksize-1)/blksize))
for p0, p1 in prange(0, nocc, blksize):
tau = _ccsd.make_tau(t2[p0:p1], t1[p0:p1], t1)
#:goooo = numpy.einsum('ijab,klab->klij', l2, tau)*.5
goooo = lib.dot(tau.reshape(-1,nvir**2), l2.reshape(-1,nvir**2).T, .5)
goooo = goooo.reshape(-1,nocc,nocc,nocc)
h5fobj['doooo'][p0:p1] = make_theta(goooo).transpose(0,2,1,3)
#:gooov[p0:p1] -= numpy.einsum('ib,jkba->jkia', l1, tau)
#:gooov[p0:p1] -= numpy.einsum('jkba,ib->jkia', l2[p0:p1], t1)
#:gooov[p0:p1] += numpy.einsum('jkil,la->jkia', goooo, t1*2)
for i in range(p0,p1):
gooov[i] -= lib.dot(_cp(tau[i-p0].transpose(0,2,1).reshape(-1,nvir)),
l1.T).reshape(nocc,nvir,nocc).transpose(0,2,1)
gooov[i] -= lib.dot(_cp(l2[i].transpose(0,2,1).reshape(-1,nvir)),
t1.T).reshape(nocc,nvir,nocc).transpose(0,2,1)
lib.dot(goooo.reshape(-1,nocc), t1, 2, gooov[p0:p1].reshape(-1,nvir), 1)
#:goovv -= numpy.einsum('jk,ikab->ijab', mij, tau)
goovv = numpy.einsum('ia,jb->ijab', mia[p0:p1], t1)
for i in range(p1-p0):
lib.dot(mij, tau[i].reshape(nocc,-1), -1, goovv[i].reshape(nocc,-1), 1)
goovv[i] += .5 * l2[p0+i]
goovv[i] += .5 * tau[i]
#:goovv -= numpy.einsum('cb,ijac->ijab', mab, t2[p0:p1])
#:goovv -= numpy.einsum('bd,ijad->ijab', mvv*.5, tau)
lib.dot(t2[p0:p1].reshape(-1,nvir), mab, -1, goovv.reshape(-1,nvir), 1)
lib.dot(tau.reshape(-1,nvir), mvv.T, -.5, goovv.reshape(-1,nvir), 1)
tau = None
#==== mem usage nocc**3 + nocc*nvir**2
pOvOv = _cp(mOvOv[p0:p1])
pOVov = _cp(mOVov[p0:p1])
#:gooov[p0:p1,:] += numpy.einsum('jaic,kc->jkia', pOvOv, t1)
#:gooov[:,p0:p1] -= numpy.einsum('kaic,jc->jkia', pOVov, t1)
tmp = lib.dot(pOvOv.reshape(-1,nvir), t1.T).reshape(p1-p0,-1,nocc,nocc)
gooov[p0:p1,:] += tmp.transpose(0,3,2,1)
lib.dot(t1, pOVov.reshape(-1,nvir).T, 1, tmp.reshape(nocc,-1), 0)
gooov[:,p0:p1] -= tmp.reshape(nocc,p1-p0,nvir,nocc).transpose(0,1,3,2)
#:tmp = numpy.einsum('ikac,jc->jika', l2, t1[p0:p1])
#:gOvVo -= numpy.einsum('jika,kb->jabi', tmp, t1)
#:gOvvO = numpy.einsum('jkia,kb->jabi', tmp, t1) + pOvOv.transpose(0,3,1,2)
tmp = tmp.reshape(-1,nocc,nocc,nvir)
lib.dot(t1[p0:p1], l2.reshape(-1,nvir).T, 1, tmp.reshape(p1-p0,-1))
gOvVo = numpy.einsum('ia,jb->jabi', l1, t1[p0:p1])
gOvvO = numpy.empty((p1-p0,nvir,nvir,nocc))
for i in range(p1-p0):
gOvVo[i] -= lib.dot(_cp(tmp[i].transpose(0,2,1).reshape(-1,nocc)),
t1).reshape(nocc,nvir,-1).transpose(1,2,0)
gOvVo[i] += pOVov[i].transpose(2,0,1)
gOvvO[i] = lib.dot(tmp[i].reshape(nocc,-1).T,
t1).reshape(nocc,nvir,-1).transpose(1,2,0)
gOvvO[i] += pOvOv[i].transpose(2,0,1)
tmp = None
#==== mem usage nocc**3 + nocc*nvir**6
dovvo[p0:p1] = (gOvVo*2 + gOvvO).transpose(0,2,1,3)
gOvvO *= -2
gOvvO -= gOvVo
doovv[p0:p1] = gOvvO.transpose(0,3,1,2)
gOvvO = gOvVo = None
for j0, j1 in prange(0, nocc, blksize):
tau2 = _ccsd.make_tau(t2[j0:j1], t1[j0:j1], t1)
#:goovv += numpy.einsum('ijkl,klab->ijab', goooo[:,:,j0:j1], tau2)
lib.dot(goooo[:,:,j0:j1].copy().reshape((p1-p0)*nocc,-1),
tau2.reshape(-1,nvir**2), 1, goovv.reshape(-1,nvir**2), 1)
tau2 += numpy.einsum('ia,jb->ijab', t1[j0:j1], t1)
tau2 = _cp(tau2.transpose(0,3,1,2).reshape(-1,nov))
#:goovv[:,j0:j1] += numpy.einsum('ibld,jlda->ijab', pOvOv, tau2) * .5
#:goovv[:,j0:j1] -= numpy.einsum('iald,jldb->ijab', pOVov, tau2) * .5
goovv[:,j0:j1] += lib.dot(pOvOv.reshape(-1,nov), tau2.T,
.5).reshape(p1-p0,nvir,-1,nvir).transpose(0,2,3,1)
goovv[:,j0:j1] += lib.dot(pOVov.reshape(-1,nov), tau2.T,
-.5).reshape(p1-p0,nvir,-1,nvir).transpose(0,2,1,3)
tau2 = None
#==== mem usage nocc**3 + nocc*nvir**2*7
#:goovv += numpy.einsum('iald,jlbd->ijab', pOVov*2+pOvOv, t2) * .5
pOVov *= 2
pOVov += pOvOv
for j in range(nocc):
tmp = lib.dot(pOVov.reshape(-1,nov),
_cp(t2[j].transpose(0,2,1).reshape(-1,nvir)), .5)
goovv[:,j] += tmp.reshape(-1,nvir,nvir)
tmp = None
dovov[p0:p1] = make_theta(goovv).transpose(0,2,1,3)
goooo = goovv = pOvOv = pOVov = None
time1 = log.timer_debug1('rdm intermediates pass2 [%d:%d]'%(p0, p1), *time1)
h5fobj['dooov'][:] = gooov.transpose(0,2,1,3)*2 - gooov.transpose(1,2,0,3)
gooov = None
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = max(nocc**2*nvir*2+nocc*nvir**2*2, nvir**3*2+nocc*nvir**2)
blksize = max(ccsd.BLKMIN, int(max_memory*.95e6/8/unit))
iobuflen = int(256e6/8/blksize)
log.debug1('rdm intermediates pass 3: block size = %d, nvir = %d in %d blocks',
blksize, nocc, int((nvir+blksize-1)/blksize))
h5fobj.create_group('dovvv')
for istep, (p0, p1) in enumerate(prange(0, nvir, blksize)):
pvOvO = numpy.empty((p1-p0,nocc,nvir,nocc))
pvOVo = numpy.empty((p1-p0,nocc,nvir,nocc))
ao2mo.outcore._load_from_h5g(fswap['mvOvO'], p0, p1, pvOvO)
ao2mo.outcore._load_from_h5g(fswap['mvOVo'], p0, p1, pvOVo)
#:gvovv -= numpy.einsum('aibk,kc->aibc', pvOvO, t1)
#:gvovv += numpy.einsum('aick,kb->aibc', pvOVo, t1)
gvovv = lib.dot(pvOVo.reshape(-1,nocc), t1).reshape(-1,nocc,nvir,nvir)
for i in range(p1-p0):
gvovv[i] = gvovv[i].transpose(0,2,1)
lib.dot(pvOvO.reshape(-1,nocc), t1, -1, gvovv.reshape(-1,nvir), 1)
pvOvO = pvOVo = None
#==== mem usage nocc**2*nvir*2 + nocc*nvir**2*2
l2tmp = l2[:,:,p0:p1] * .5
#:gvvvv = numpy.einsum('ijab,ijcd->abcd', l2tmp, t2)
#:jabc = numpy.einsum('ijab,ic->jabc', l2tmp, t1)
#:gvvvv += numpy.einsum('jabc,jd->abcd', jabc, t1)
gvvvv = lib.dot(l2tmp.reshape(nocc**2,-1).T, t2.reshape(nocc**2,-1))
jabc = lib.dot(l2tmp.reshape(nocc,-1).T, t1)
lib.dot(jabc.reshape(nocc,-1).T, t1, 1, gvvvv.reshape(-1,nvir), 1)
gvvvv = gvvvv.reshape(-1,nvir,nvir,nvir)
l2tmp = jabc = None
#:gvovv = numpy.einsum('ja,jibc->aibc', l1[:,p0:p1], t2)
#:gvovv += numpy.einsum('jibc,ja->aibc', l2, t1[:,p0:p1])
lib.dot(l1[:,p0:p1].copy().T, t2.reshape(nocc,-1), 1, gvovv.reshape(p1-p0,-1), 1)
lib.dot(t1[:,p0:p1].copy().T, l2.reshape(nocc,-1), 1, gvovv.reshape(p1-p0,-1), 1)
tmp = numpy.einsum('ja,jb->ab', l1[:,p0:p1], t1)
gvovv += numpy.einsum('ab,ic->aibc', tmp, t1)
gvovv += numpy.einsum('ba,ic->aibc', mvv[:,p0:p1]*.5, t1)
#:gvovv -= numpy.einsum('adbc,id->aibc', gvvvv, t1*2)
for j in range(p1-p0):
lib.dot(t1, gvvvv[j].reshape(nvir,-1), -2,
gvovv[j].reshape(nocc,-1), 1)
# symmetrize dvvvv because it is symmetrized in ccsd_grad and make_rdm2 anyway
#:dvvvv = .5*(gvvvv+gvvvv.transpose(0,1,3,2))
#:dvvvv = .5*(dvvvv+dvvvv.transpose(1,0,3,2))
# now dvvvv == dvvvv.transpose(2,3,0,1) == dvvvv.transpose(0,1,3,2) == dvvvv.transpose(1,0,3,2)
tmp = numpy.empty((nvir,nvir,nvir))
tmp1 = numpy.empty((nvir,nvir,nvir))
tmpvvvv = numpy.empty((p1-p0,nvir,nvir_pair))
for i in range(p1-p0):
make_theta(gvvvv[i:i+1], out=tmp)
tmp1[:] = tmp.transpose(1,0,2)
_ccsd.precontract(tmp1, diag_fac=2, out=tmpvvvv[i])
# tril of (dvvvv[p0:p1,p0:p1]+dvvvv[p0:p1,p0:p1].T)
for i in range(p0, p1):
for j in range(p0, i):
tmpvvvv[i-p0,j] += tmpvvvv[j-p0,i]
tmpvvvv[i-p0,i] *= 2
for i in range(p0, p1):
off = i * (i+1) // 2
if p0 > 0:
tmpvvvv[i-p0,:p0] += dvvvv[off:off+p0]
dvvvv[off:off+i+1] = tmpvvvv[i-p0,:i+1] * .25
for i in range(p1, nvir):
off = i * (i+1) // 2
dvvvv[off+p0:off+p1] = tmpvvvv[:,i]
tmp = tmp1 = tmpvvvv = None
#==== mem usage nvir**3 + nocc*nvir**2
gvvov = make_theta(gvovv).transpose(0,2,1,3)
ao2mo.outcore._transpose_to_h5g(h5fobj, 'dovvv/%d'%istep,
gvvov.reshape(-1,nov), iobuflen)
gvvvv = None
gvovv = None
time1 = log.timer_debug1('rdm intermediates pass3 [%d:%d]'%(p0, p1), *time1)
del(fswap['mOvOv'])
del(fswap['mOVov'])
del(fswap['mvOvO'])
del(fswap['mvOVo'])
fswap.close()
_tmpfile = None
return (h5fobj['dovov'], h5fobj['dvvvv'], h5fobj['doooo'], h5fobj['doovv'],
h5fobj['dovvo'], None, h5fobj['dovvv'], h5fobj['dooov'])
def make_rdm1(mycc, t1, t2, l1, l2, d1=None):
if d1 is None:
doo, dov, dvo, dvv = gamma1_intermediates(mycc, t1, t2, l1, l2)
else:
doo, dov, dvo, dvv = d1
nocc, nvir = t1.shape
nmo = nocc + nvir
dm1 = numpy.empty((nmo,nmo))
dm1[:nocc,:nocc] = doo + doo.T
dm1[:nocc,nocc:] = dov + dvo.T
dm1[nocc:,:nocc] = dm1[:nocc,nocc:].T
dm1[nocc:,nocc:] = dvv + dvv.T
for i in range(nocc):
dm1[i,i] += 2
return dm1
# rdm2 in Chemist's notation
def make_rdm2(mycc, t1, t2, l1, l2, d1=None, d2=None):
if d1 is None:
doo, dov, dvo, dvv = gamma1_intermediates(mycc, t1, t2, l1, l2)
else:
doo, dov, dvo, dvv = d1
if d2 is None:
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = \
gamma2_intermediates(mycc, t1, t2, l1, l2)
else:
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
nocc, nvir = t1.shape
nmo = nocc + nvir
dm2 = numpy.empty((nmo,nmo,nmo,nmo))
dm2[:nocc,nocc:,:nocc,nocc:] = \
(dovov +dovov.transpose(2,3,0,1))
dm2[nocc:,:nocc,nocc:,:nocc] = \
(dovov.transpose(1,0,3,2)+dovov.transpose(3,2,1,0))
dm2[:nocc,:nocc,nocc:,nocc:] = \
(doovv.transpose(0,1,3,2)+doovv.transpose(1,0,2,3))
dm2[nocc:,nocc:,:nocc,:nocc] = \
(doovv.transpose(3,2,0,1)+doovv.transpose(2,3,1,0))
dm2[:nocc,nocc:,nocc:,:nocc] = \
(dovvo +dovvo.transpose(3,2,1,0))
dm2[nocc:,:nocc,:nocc,nocc:] = \
(dovvo.transpose(2,3,0,1)+dovvo.transpose(1,0,3,2))
dm2[nocc:,nocc:,nocc:,nocc:] = ao2mo.restore(1, dvvvv, nvir)
dm2[nocc:,nocc:,nocc:,nocc:] *= 4
dm2[:nocc,:nocc,:nocc,:nocc] =(doooo+doooo.transpose(1,0,3,2)) * 2
dm2[:nocc,nocc:,nocc:,nocc:] = dovvv
dm2[nocc:,nocc:,:nocc,nocc:] = dovvv.transpose(2,3,0,1)
dm2[nocc:,nocc:,nocc:,:nocc] = dovvv.transpose(3,2,1,0)
dm2[nocc:,:nocc,nocc:,nocc:] = dovvv.transpose(1,0,3,2)
dm2[:nocc,:nocc,:nocc,nocc:] = dooov
dm2[:nocc,nocc:,:nocc,:nocc] = dooov.transpose(2,3,0,1)
dm2[:nocc,:nocc,nocc:,:nocc] = dooov.transpose(1,0,3,2)
dm2[nocc:,:nocc,:nocc,:nocc] = dooov.transpose(3,2,1,0)
dm1 = numpy.zeros((nmo,nmo))
dm1[:nocc,:nocc] = doo + doo.T
dm1[:nocc,nocc:] = dov + dvo.T
dm1[nocc:,:nocc] = dm1[:nocc,nocc:].T
dm1[nocc:,nocc:] = dvv + dvv.T
for i in range(nocc):
dm2[i,i,:,:] += dm1 * 2
dm2[:,:,i,i] += dm1 * 2
dm2[:,i,i,:] -= dm1
dm2[i,:,:,i] -= dm1
for i in range(nocc):
for j in range(nocc):
dm2[i,i,j,j] += 4
dm2[i,j,j,i] -= 2
return dm2
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
def _cp(a):
return numpy.array(a, copy=False, order='C')
def make_theta(t2, out=None):
return _ccsd.make_0132(t2, t2, 2, -1, out)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf.cc import ccsd
from pyscf import ao2mo
mol = gto.M()
mf = scf.RHF(mol)
mcc = ccsd.CCSD(mf)
numpy.random.seed(2)
nocc = 5
nmo = 12
nvir = nmo - nocc
eri0 = numpy.random.random((nmo,nmo,nmo,nmo))
eri0 = ao2mo.restore(1, ao2mo.restore(8, eri0, nmo), nmo)
fock0 = numpy.random.random((nmo,nmo))
fock0 = fock0 + fock0.T + numpy.diag(range(nmo))*2
t1 = numpy.random.random((nocc,nvir))
t2 = numpy.random.random((nocc,nocc,nvir,nvir))
t2 = t2 + t2.transpose(1,0,3,2)
l1 = numpy.random.random((nocc,nvir))
l2 = numpy.random.random((nocc,nocc,nvir,nvir))
l2 = l2 + l2.transpose(1,0,3,2)
h1 = fock0 - (numpy.einsum('kkpq->pq', eri0[:nocc,:nocc])*2
- numpy.einsum('pkkq->pq', eri0[:,:nocc,:nocc]))
eris = lambda:None
eris.oooo = eri0[:nocc,:nocc,:nocc,:nocc].copy()
eris.ooov = eri0[:nocc,:nocc,:nocc,nocc:].copy()
eris.ovoo = eri0[:nocc,nocc:,:nocc,:nocc].copy()
eris.oovv = eri0[:nocc,:nocc,nocc:,nocc:].copy()
eris.ovov = eri0[:nocc,nocc:,:nocc,nocc:].copy()
eris.ovvo = eri0[:nocc,nocc:,nocc:,:nocc].copy()
eris.ovvv = eri0[:nocc,nocc:,nocc:,nocc:].copy()
eris.vvvv = eri0[nocc:,nocc:,nocc:,nocc:].copy()
eris.fock = fock0
doo, dov, dvo, dvv = gamma1_intermediates(mcc, t1, t2, l1, l2)
print((numpy.einsum('ij,ij', doo, fock0[:nocc,:nocc]))*2+20166.329861034799)
print((numpy.einsum('ab,ab', dvv, fock0[nocc:,nocc:]))*2-58078.964019246778)
print((numpy.einsum('ia,ia', dov, fock0[:nocc,nocc:]))*2+74994.356886784764)
print((numpy.einsum('ai,ai', dvo, fock0[nocc:,:nocc]))*2-34.010188025702391)
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = \
gamma2_intermediates(mcc, t1, t2, l1, l2)
dvvvv = ao2mo.restore(1, dvvvv, nvir)
print('doooo',numpy.einsum('kilj,kilj', doooo, eris.oooo)*2-15939.9007625418)
print('dvvvv',numpy.einsum('acbd,acbd', dvvvv, eris.vvvv)*2-37581.823919588 )
print('dooov',numpy.einsum('jkia,jkia', dooov, eris.ooov)*2-128470.009687716)
print('dovvv',numpy.einsum('icba,icba', dovvv, eris.ovvv)*2+166794.225195056)
print('dovov',numpy.einsum('iajb,iajb', dovov, eris.ovov)*2+719279.812916893)
print('dovvo',numpy.einsum('jbai,jbia', dovvo, eris.ovov)*2
+numpy.einsum('jiab,jiba', doovv, eris.oovv)*2+53634.0012286654)
dm1 = make_rdm1(mcc, t1, t2, l1, l2)
dm2 = make_rdm2(mcc, t1, t2, l1, l2)
e2 =(numpy.einsum('ijkl,ijkl', doooo, eris.oooo)*2
+numpy.einsum('acbd,acbd', dvvvv, eris.vvvv)*2
+numpy.einsum('jkia,jkia', dooov, eris.ooov)*2
+numpy.einsum('icba,icba', dovvv, eris.ovvv)*2
+numpy.einsum('iajb,iajb', dovov, eris.ovov)*2
+numpy.einsum('jbai,jbia', dovvo, eris.ovov)*2
+numpy.einsum('ijab,ijab', doovv, eris.oovv)*2
+
|
numpy.einsum('ij,ij', doo, fock0[:nocc,:nocc])
|
numpy.einsum
|
import numpy as np
from classes.model import Model
# An object with a set of methods to generate and train multiple models and compare clusters
# Uses monte carlo like methods to approximately explore the optima of the marginal distributions of observed variables given the latent variable
# Generate a given number of models, trains them starting from different initial conditions (define how to do this exactly) and then outputs the different results and their frequencies
class Collection:
def __init__(self, name, num_models, var_dict, data, num_clusters, df=True, hidden_init='uniform'):
self.name = name
# Can be an int or a np.ndarray of integers with all number of clusters to explore
self.c = num_clusters
self.trained = False
self.multiple_init = False
# The initial distribution for the latent variable can either be random, uniform or defined
self.hidden_init = hidden_init
self.N = num_models
# If it is defined, there is 2 possibilities:
## if hidden_init is a 1 x self.c array : then use it for all models
## else if it is a k x self.c array : use each distribution in the array as many times as possible given num_models
if isinstance(hidden_init, np.ndarray):
if hidden_init.shape[0] > 1:
self.multiple_init = True
num_series = num_models % hidden_init.shape[0]
self.hidden_init = np.tile(hidden_init, num_series)
self.N = self.hidden_init.shape[0]
self.multiple_c = False
# Set up models
if self.multiple_init:
self.models = [Model(f'{self.name}_{n}', self.c, var_dict, data=data, df=df, hidden_init=self.hidden_init[n,:]) for n in np.arange(self.N)]
else:
if isinstance(self.c, list):
self.models = []
for cluster in self.c:
self.models.append([Model(f'{self.name}_{n}', cluster, var_dict, data=data, df=df, hidden_init=self.hidden_init) for n in np.arange(self.N)])
self.multiple_c = True
else:
self.models = [Model(f'{self.name}_{n}', self.c, var_dict, data=data, df=df, hidden_init=self.hidden_init) for n in np.arange(self.N)]
# Trains all models in self.models
def train_models(self, n_iter, sig=0):
print(f'Training {self.N} models with {self.c} clusters...')
if self.multiple_c:
for i, clusters in enumerate(self.c):
for n in np.arange(self.N):
self.models[i][n].run_EM(n_iter=n_iter)
if n % 10 == 0:
print(f'c={clusters}, model {n}...')
else:
for n in
|
np.arange(self.N)
|
numpy.arange
|
"""
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from .utils import *
class MeanAveragePrecision:
""" Mean Average Precision for object detection.
Arguments:
num_classes (int): number of classes.
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self._init()
def reset(self):
"""Reset stored data."""
self._init()
def add(self, preds, gt):
""" Add sample to evaluation.
Arguments:
preds (np.array): predicted boxes.
gt (np.array): ground truth boxes.
Input format:
preds: [xmin, ymin, xmax, ymax, class_id, confidence]
gt: [xmin, ymin, xmax, ymax, class_id, difficult]
"""
class_counter = np.zeros((1, self.num_classes), dtype=np.int32)
for c in range(self.num_classes):
preds_c = preds[preds[:, 4] == c]
gt_c = gt[gt[:, 4] == c]
class_counter[0, c] = gt_c.shape[0]
match_table = compute_match_table(preds_c, gt_c, self.imgs_counter)
self.match_table[c] =
|
np.concatenate((self.match_table[c], match_table), axis=0)
|
numpy.concatenate
|
import numpy as np
import pytest
from simba.utils.linalg import cosine, compute_pc, normalise_rows
@pytest.mark.parametrize(
'x, y, expected',
[
([1, 2, 3], [2, 4, 6], 1),
([1, 1, 1, 1], [-1, -1, -1, -1], -1),
([1, 1], [1, -1], 0),
([1, 1], [1, 0], np.cos(np.pi / 4)),
]
)
def test_cosine(x, y, expected):
result = cosine(x, y)
np.testing.assert_allclose(result, expected)
def test_cosine_zero_vector():
x = np.random.random(3)
y = np.zeros_like(x)
with pytest.warns(RuntimeWarning):
result = cosine(x, y)
assert np.isnan(result)
def test_compute_pc():
X =
|
np.random.random((4, 5))
|
numpy.random.random
|
# Utilities supporting gaze calibration
import numpy as np
import cv2
import pandas as pd
import os
def onoff_from_binary(data, return_duration=True):
"""Converts a binary variable data into onsets, offsets, and optionally durations
This may yield unexpected behavior if the first value of `data` is true.
Parameters
----------
data : array-like, 1D
binary array from which onsets and offsets should be extracted
"""
data = data.astype(np.float).copy()
ddata = np.hstack([[0], np.diff(data)])
(onsets,) =
|
np.nonzero(ddata > 0)
|
numpy.nonzero
|
import numpy as np
import math
from scipy.special import gamma
import scipy
import scipy.ndimage
def paired_product(new_im):
shift1 = np.roll(new_im.copy(), 1, axis=1)
shift2 = np.roll(new_im.copy(), 1, axis=0)
shift3 = np.roll(np.roll(new_im.copy(), 1, axis=0), 1, axis=1)
shift4 = np.roll(np.roll(new_im.copy(), 1, axis=0), -1, axis=1)
H_img = shift1 * new_im
V_img = shift2 * new_im
D1_img = shift3 * new_im
D2_img = shift4 * new_im
return (H_img, V_img, D1_img, D2_img)
def gen_gauss_window(lw, sigma):
sd = np.float32(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * np.float32(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
def estimateggdparam(vec):
gam = np.asarray([x / 1000.0 for x in range(200, 10000, 1)])
r_gam = (gamma(1.0/gam)*gamma(3.0/gam))/((gamma(2.0/gam))**2)
# print(np.mean(vec))
sigma_sq = np.mean(vec**2) #-(np.mean(vec))**2
sigma = np.sqrt(sigma_sq)
E = np.mean(np.abs(vec))
rho = sigma_sq / (E**2 + 1e-6)
array_position = (np.abs(rho - r_gam)).argmin()
alphaparam = gam[array_position]
return alphaparam, sigma
def compute_image_mscn_transform(image, C=1, avg_window=None, extend_mode='constant'):
if avg_window is None:
avg_window = gen_gauss_window(3, 7.0/6.0)
assert len(np.shape(image)) == 2
h, w = np.shape(image)
mu_image = np.zeros((h, w), dtype=np.float32)
var_image =
|
np.zeros((h, w), dtype=np.float32)
|
numpy.zeros
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import cPickle as pickle
import numpy as np
import cv2
import os
import mmcv
from mask_rcnn.utils.compute_flow import flow_to_flow_img
from mask_rcnn.core.config import cfg
def get_image_blob(im, target_scale, target_max_size):
"""Convert an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_scale], target_max_size
)
blob = im_list_to_blob(processed_im)
# NOTE: this height and width may be larger than actual scaled input image
# due to the FPN.COARSEST_STRIDE related padding in im_list_to_blob. We are
# maintaining this behavior for now to make existing results exactly
# reproducible (in practice using the true input image height and width
# yields nearly the same results, but they are sometimes slightly different
# because predictions near the edge of the image will be pruned more
# aggressively).
height, width = blob.shape[2], blob.shape[3]
im_info =
|
np.hstack((height, width, im_scale))
|
numpy.hstack
|
import numpy as np
import config
class Trajectory:
def __init__(self, q_f, delay=0):
self.delay = delay
k_v = config.k_v
k_a = config.k_a
self.k_v = np.array(k_v)
self.k_a = np.array(k_a)
self.q_f = np.array(q_f)
self.num_joints = len(q_f)
self.done = False
#self.t_prev = None
def sign(self, x):
if (x == 0):
return 0
else:
return x/abs(x)
def set_initial_conditions(self, q_i, t_i):
self.q_i = np.array(q_i)
self.t_i = t_i
self.D = self.q_f - self.q_i
self.sign_D = np.zeros(8)
for i in range(len(self.D)):
self.sign_D[i] = self.sign(self.D[i])
#v_sign = np.vectorize(sign, otypes=[np.float])
#self.sign_D = v_sign(self.D)
self.k_v, self.k_a, self.tau, self.t_f = self.synchronize(self.k_v, self.k_a)
def synchronize(self, k_v, k_a):
mask = (abs(self.D) <= k_v**2/k_a) #This being true, means that the joint will not be able to achieve its max velocity.
k_v[mask] = np.sqrt(abs(self.D[mask]) * k_a[mask]) #With these elements, we set k_v to the maximum achievable velocity
tau = k_v / k_a
travel_time = np.zeros(self.num_joints)
mask = self.k_v != 0 #The result of correcting the k_v elements, is that some of them are 0, if D is 0
travel_time[mask] = tau[mask] + abs(self.D[mask])/k_v[mask] #The elements where k_v is not 0 is calculated normally, the others are left at 0 travel time
#print(f"Travel_time : {travel_time}\n\n")
max_travel_time = max(travel_time)
max_travel_time_index =
|
np.where(travel_time == max_travel_time)
|
numpy.where
|
import unittest
import numpy as np
import cspyce.typemap_samples as ts
def flatten(array):
return tuple(tuple(array.ravel()))
# noinspection PyTypeChecker
class test_array1_1(unittest.TestCase):
# %apply (int IN_ARRAY1[ANY]) {int arg[3]}
# cs.in_array1_1 just returns whatever 3 integers it was passed as a numpy array
def test_basic_test_tuple(self):
self.assertEqual((1, 2, 3), ts.in_array1_1((1, 2, 3)))
def test_basic_test_array(self):
array = np.arange(10, 13, dtype="int32")
self.assertEqual(tuple(array), ts.in_array1_1(array))
def test_non_contiguous_array(self):
array = np.arange(9, dtype='int32').reshape((3, 3))
self.assertEqual((0, 3, 6), ts.in_array1_1(array[:, 0]))
def test_requires_three_elements(self):
with self.assertRaises(ValueError):
ts.in_array1_1((1, 2, 3, 4))
def test_requires_integer_array(self):
with self.assertRaises(ValueError):
ts.in_array1_1(np.arange(3.0))
def test_requires_one_dimensional_array(self):
array = np.zeros((3, 3), dtype="int32")
with self.assertRaises(ValueError):
ts.in_array1_1(array)
def test_requires_non_null(self):
with self.assertRaises(TypeError):
ts.in_array1_1(None)
class test_array1_2(unittest.TestCase):
# %apply (int* IN_ARRAY1, int DIM1) {(int* arg, int dim)};
# This function returns them as a list.
def test_basic_test_tuple(self):
self.assertEqual((1, 2, 3, 4, 5), ts.in_array1_2([1, 2, 3, 4, 5]))
def test_basic_test_array(self):
arg = np.arange(1, 10, dtype='int32')
self.assertEqual(flatten(arg), ts.in_array1_2(arg))
def test_okay_to_pass_empty_list(self):
self.assertEqual((), ts.in_array1_2(()))
def test_non_contiguous_array(self):
array = np.arange(12, dtype='int32').reshape((4, 3))[:, 1]
self.assertEqual(flatten(array), ts.in_array1_2(array))
def test_requires_integer_array(self):
with self.assertRaises(ValueError):
ts.in_array1_2(np.arange(10.0))
def test_requires_one_dimensional_array(self):
array = np.zeros((3, 3), dtype="int32")
with self.assertRaises(ValueError):
ts.in_array1_2(array)
def test_requires_one_dimensional_int_array(self):
array = np.zeros((3, 3), dtype="double")
with self.assertRaises(ValueError):
ts.in_array1_2(array)
def test_requires_non_null(self):
with self.assertRaises(TypeError):
ts.in_array1_2(None)
class test_array1_3(unittest.TestCase):
# %apply (int* IN_ARRAY1, int DIM1) {(int* arg)};
# This function is exactly like test_array_1_2, except the length is passed separately
def test_basic_test_tuple(self):
self.assertEqual((1, 2, 3, 4, 5), ts.in_array1_3([1, 2, 3, 4, 5], 5))
def test_basic_test_array(self):
arg = np.arange(1, 10, dtype='int32')
self.assertEqual(flatten(arg), ts.in_array1_3(arg, len(arg)))
def test_okay_to_pass_empty_list(self):
self.assertEqual((), ts.in_array1_3((), 0))
def test_non_contiguous_array(self):
array = np.arange(12, dtype='int32').reshape((4, 3))[:, 1]
self.assertEqual(flatten(array), ts.in_array1_3(array, len(array)))
def test_requires_integer_array(self):
with self.assertRaises(ValueError):
ts.in_array1_3(np.arange(10.0), 1)
def test_requires_one_dimensional_array(self):
array = np.zeros((3, 3), dtype="int32")
with self.assertRaises(ValueError):
ts.in_array1_3(array, 9)
def test_requires_one_dimensional_int_array(self):
array = np.zeros((3, 3), dtype="double")
with self.assertRaises(ValueError):
ts.in_array1_3(array, 9)
def test_requires_non_null(self):
with self.assertRaises(TypeError):
ts.in_array1_3(None, 0)
class test_array1_01_1(unittest.TestCase):
# %apply (int *IN_ARRAY01, int DIM1) {(int *arg, int dim)};
# cs.in_array01_1 received either an int scalar or sequence of integer, and
SMALL_INT_ARRAY = np.array((4, 5, 6), dtype="int32")
SMALL_FLOAT_ARRAY = np.array((4.0, 5.0, 6.0), dtype="double")
def f(self, x):
return ts.in_array01_1(x)
def test_basic_test_scalar(self):
self.assertEqual(1, ts.in_array01_1(1))
def test_basic_test_tuple(self):
self.assertEqual((1, 2, 3), ts.in_array01_1([1, 2, 3]))
def test_basic_test_int_array(self):
arg = np.arange(1, 10, dtype='int32')
self.assertEqual(flatten(arg), ts.in_array01_1(arg))
def test_non_contiguous_array(self):
array = np.arange(12, dtype='int32').reshape((4, 3))[:, 0]
self.assertEqual(flatten(array), ts.in_array01_1(array))
def test_requires_integer_array(self):
with self.assertRaises(ValueError):
ts.in_array01_1(np.arange(20.))
def test_requires_one_dimensional_array(self):
array = np.zeros((3, 3), dtype="int32")
with self.assertRaises(ValueError):
ts.in_array01_1(array)
def test_requires_one_dimensional_int_array(self):
array = np.zeros((3, 3), dtype="double")
with self.assertRaises(ValueError):
ts.in_array01_1(array)
def test_requires_non_null(self):
with self.assertRaises(TypeError):
ts.in_array01_1(None)
class test_array2_1(unittest.TestCase):
# %apply (int IN_ARRAY2[ANY][ANY]) {int arg[3][5]};
# This function specifically requires a 3x5 int array.
# It returns the first element, and the dimensions as a tuple.
def test_basic_run(self):
array = np.arange(1000, 1015, dtype='int32').reshape(3, 5)
self.assertEqual((flatten(array), 3, 5), ts.in_array2_1(array))
def test_non_contiguous_array(self):
array = np.arange(150, dtype='int32').reshape((3, 5, 10))[..., 2]
self.assertEqual((flatten(array), 3, 5), ts.in_array2_1(array))
def test_no_other_size(self):
array = np.array(range(100, 115), dtype='int32').reshape(5, 3)
with self.assertRaises(ValueError):
ts.in_array2_1(array)
def test_no_other_data_type(self):
array = np.array(range(100, 115), dtype='int64').reshape(3, 5)
with self.assertRaises(ValueError):
ts.in_array2_1(array)
def test_no_other_dimension(self):
array = np.array(range(100, 115), dtype='int64').reshape(3, 5, 1)
with self.assertRaises(ValueError):
ts.in_array2_1(array)
def test_requires_non_null(self):
with self.assertRaises(TypeError):
ts.in_array2_1(None)
class test_array2_2(unittest.TestCase):
# %apply (int *IN_ARRAY2, int DIM1, int DIM2) {(int *arg, int dim1, int dim2)};
# This function takes any sized integer array.
# It returns the elements of the array as a tuple, and the dimensions
def test_basic_run(self):
array = np.arange(100, 200, dtype='int32').reshape(5, 20)
self.assertEqual((flatten(array), 5, 20), ts.in_array2_2(array))
self.assertEqual((flatten(array[1:]), 4, 20), ts.in_array2_2(array[1:]))
def test_non_contiguous_array(self):
array = np.arange(150, dtype='int32').reshape((3, 5, 10))[..., 2]
self.assertEqual((flatten(array), 3, 5), ts.in_array2_2(array))
def test_no_other_data_type(self):
array =
|
np.arange(100., 200.)
|
numpy.arange
|
#!/usr/bin/env python
# Analysis/plotting functions for HDX analysis
import Functions, Methods
import numpy as np
import matplotlib.pyplot as plt
import os, glob, copy, itertools, pickle
from scipy.stats import pearsonr as correl
from scipy.stats import sem as stderr
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator, MaxNLocator
from cycler import cycler
### Define defaults for matplotlib plots
plt.rc('lines', linewidth=1.5, markersize=4)
plt.rc('axes', prop_cycle=(cycler('color', ['k','b','r','orange','c','m','y','g'])), # Color cycle defaults to black
labelweight='heavy', labelsize=14, titlesize=18) # Default fontsizes for printing
plt.rc('axes.spines', top=False, right=False) # Switch off top/right axes
plt.rc('legend', fontsize=10) # Default fontsizes for printing
plt.rc('xtick', labelsize=12) # Default fontsizes for printing
plt.rc('ytick', labelsize=12) # Default fontsizes for printing
plt.rc('figure', titlesize=22, titleweight='heavy') # Default fontsizes for printing
#plt.rc('text', usetex=True)
### Classes
class Analyze():
"""Class to contain results and analysis methods for HDX predictions"""
def __init__(self, resobj, top, **extra_params):
"""Initialises Analyze object from a Method object with by-residue results"""
try:
self.residxs = resobj.reslist
self.resobj = resobj
# Analysis ignores errors so far
# Cumulative resfracs = 3D-array[chunk, resfrac, time]
self.resfracs = np.reshape(resobj.resfracs[:,:,0], (1, len(resobj.resfracs), len(resobj.resfracs[0])))
self.c_resfracs = np.copy(self.resfracs)
# Byframe PFs = 2D-array[residue, PFs]
self.pf_byframe = np.copy(resobj.pf_byframe)
if type(resobj) is Methods.Radou:
self.lnpf_byframe = np.copy(resobj.lnpf_byframe)
# Cumulative PFs = 2D-array[chunk, PFs]
self.pfs = np.reshape(resobj.pfs[:,0], (1, len(resobj.pfs)))
if type(resobj) is Methods.Radou:
self.lnpfs = np.reshape(resobj.lnpfs[:,0], (1, len(resobj.lnpfs)))
self.c_pfs = np.copy(self.pfs)
if type(resobj) is Methods.Radou:
self.c_lnpfs = np.copy(self.lnpfs)
# Cumulative n_frames = 1D-array[n_frames]
self.n_frames = np.atleast_1d(resobj.n_frames)
self.c_n_frames = np.copy(self.n_frames)
# Topology & rates
self.rates = resobj.rates
self.top = top
self.resnums = np.asarray([ self.top.residue(i).resSeq for i in self.residxs ])
except AttributeError:
raise Functions.HDX_Error("Error when copying results from prediction to analysis objects - have you made any HDX predictions yet?")
self.params = resobj.params
try:
self.params.update(extra_params)
except (TypeError, ValueError):
print("Couldn't load extra parameters for analysis (maybe they weren't provided?).\nUsing previous parameters from %s object." % resobj)
def __add__(self, other):
"""Add resfracs, pfs and n_frames from a second results object and
update cumulative sums.
Usage: __add__(self, other)"""
if isinstance(other, Analyze):
# try:
if not all((np.array_equal(self.residxs, other.residxs), np.array_equal(self.rates, other.rates))):
print("Reslist or rates of added Analyze objects differ. Not adding them!")
return self
new = copy.deepcopy(self)
# Add n_frames
new.n_frames = np.append(new.n_frames, other.n_frames)
new.c_n_frames = np.cumsum(new.n_frames)
# Calc running ave of PFs = 2D-array[chunk, PFs]
new.pf_byframe = np.concatenate((new.pf_byframe, other.pf_byframe), axis=1)
new.pfs = np.concatenate((new.pfs, other.pfs), axis=0)
_ = np.copy(new.pfs)
for frames, curr_pf in zip(new.n_frames, _):
curr_pf *= frames
new.c_pfs = np.cumsum(_, axis=0)
for tot_frames, tot_pf in zip(new.c_n_frames, new.c_pfs):
tot_pf /= tot_frames
# new.c_lnpfs should be calculated from new.lnpf_byframe
if type(self.resobj) is Methods.Radou:
new.lnpf_byframe = np.concatenate((new.lnpf_byframe, other.lnpf_byframe), axis=1)
new.lnpfs = np.concatenate((new.lnpfs, other.lnpfs), axis=0)
new.c_lnpfs = np.append(new.c_lnpfs, np.mean(new.lnpf_byframe, axis=1)[np.newaxis,:], axis=0)
# Calc running ave of resfracs = 3D-array[chunk, resfrac, time]
new.resfracs = np.concatenate((new.resfracs, other.resfracs), axis=0)
_ = np.zeros(new.resfracs[0].shape)
# Redo resfrac calculation based on running average of pfs
# N.B. Due to the exponential this is NOT just an average of the resfrac blocks
if type(self.resobj) is Methods.Radou:
for i2, t in enumerate(new.params['times']):
def _residue_fraction_lnpf(lnpf, k, time=t):
return 1 - np.exp((-k / np.exp(lnpf)) * time)
for i1, curr_frac in enumerate(map(_residue_fraction_lnpf, new.c_lnpfs[-1], new.rates)):
_[i1,i2] = curr_frac
else:
for i2, t in enumerate(new.params['times']):
def _residue_fraction(pf, k, time=t):
return 1 - np.exp((-k / pf) * time)
for i1, curr_frac in enumerate(map(_residue_fraction, new.c_pfs[-1], new.rates)):
_[i1,i2] = curr_frac
new.c_resfracs = np.concatenate((new.c_resfracs,
np.reshape(_, (1, len(new.residxs), len(new.params['times'])))),
axis=0)
return new
# except AttributeError:
# raise Functions.HDX_Error("Error when adding analysis objects - have you made any HDX predictions yet?")
else:
return self
def __getstate__(self):
"""Set state of object for pickling.
Additional attributes can be removed here"""
odict = self.__dict__.copy()
for k1 in ['resobj']: # Results object
try:
for k2 in ['top']: # topology
del odict[k1].__dict__[k2]
except KeyError:
pass
return odict
def __setstate__(self, d):
"""Set state of object after pickling.
Additional attributes can be added here"""
# This will read in a single topology for the whole analysis.
# It may have attributes that differ from those in self.top
# e.g. for cis-prolines. These should be recalculated if needed
# The pfs/rates/fracs in the results object would be correct though.
self.__dict__ = d
if os.path.exists(self.params['outprefix']+"topology.pkl"):
try:
self.resobj.top = pickle.load(open(self.params['outprefix']+"topology.pkl", 'rb'))
except (IOError, EOFError):
raise Functions.HDX_Error("Can't read cached topology file %s. "\
"Re-run calculation after removing the file." \
% (self.params['outprefix']+"topology.pkl"))
else:
self.resobj.top = self.top
def _windowed_average(self, data, window):
"""Calculate average of non-overlapping windows (size=window) of a set of data.
Usage: _windowed_average(data, window)"""
blocks = len(data)/window
aves = []
for start_i in range(int(blocks)):
aves.append(np.mean(data[(start_i * window):(start_i * window) + window]))
return np.asarray(aves)
def _cumulative_average(self, data, blocksizes):
"""Calculate cumulative averages of a set of data at provided intervals
Data & blocksizes should be 1D arrays (or axis-slices of larger arrays)
Usage: _cumulative_average(data, blocksizes)"""
if not len(data) == np.sum(blocksizes):
raise Functions.HDX_Error("Unable to cumulatively average data of length %d using total blocksizes %d" \
% (len(data), int(np.sum(blocksizes))))
aves = np.zeros(len(blocksizes))
blocksum = np.cumsum(blocksizes)
for i, block in enumerate(blocksum):
aves[i] = np.mean(data[:block])
return aves
def read_segfile(self):
# segfile should contain at most 3 columns: startres, endres, chain_idx
try:
self.segres = np.loadtxt(self.params['segfile'],
dtype=[ ('segres', np.int32, (2,)), ('chain', np.int32, (1)) ]) # ResIDs will be converted to indices with dictionary in segments function
with open(self.params['logfile'], 'a') as f:
f.write("Chain indices read from segments file - segment averaging will be performed on defined chains\n")
self._single_chain = False
except IndexError:
tmp_segres = np.loadtxt(self.params['segfile'], dtype=np.int32, usecols=(0,1))
with open(self.params['logfile'], 'a') as f:
f.write("Chain indices NOT read from segments file - segment averaging will be performed on first chain\n")
self.segres = np.zeros(len(tmp_segres), dtype=[ ('segres', np.int32, (2,)), ('chain', np.int32, (1)) ])
self.segres['segres'] = tmp_segres
self._single_chain = True
except ValueError:
raise Functions.HDX_Error("There's a problem reading the values in your segments file: %s \n"
"File should contain either 2 or 3 columns of integers, separated by spaces.\n"
"Format: start_residue end_residue chain_index[optional]")
def read_expfile(self):
"""Reads an experimental data file for comparison to predicted data.
Experimental results file should be formatted as follows:
Seg_start Seg_end Time_1 Time_2 Time_3 ... [Time_n]
This is the same format as the printout of predicted results"""
# Check I'm not loading in too many timepoints
try:
if self._single_chain:
expt = np.loadtxt(self.params['expfile'], dtype=[ ('segres', np.int32, (2,)),
('fracs', np.float64, (len(self.params['times']),)) ])
else:
expt = np.loadtxt(self.params['expfile'], dtype=[ ('segres', np.int32, (2,)),
('chain', np.int32, (1)), ('fracs', np.float64, (len(self.params['times']),)) ])
except ValueError as err:
raise Functions.HDX_Error("There's a problem with the experimental data file. It has too few timepoints. \n" \
"This can be caused if you've defined chain indices in the segments file but not in the experimental data file.\n" \
"The error while reading was: %s" % str(err))
# Now check I'm not loading in too few timepoints
try:
if self._single_chain:
expt = np.loadtxt(self.params['expfile'], dtype=[ ('segres', np.int32, (2,)),
('fracs', np.float64, (len(self.params['times']) + 1,)) ])
else:
expt = np.loadtxt(self.params['expfile'], dtype=[ ('segres', np.int32, (2,)),
('chain', np.int32, (1)), ('fracs', np.float64, (len(self.params['times']) + 1,)) ])
raise Functions.HDX_Error("There's a problem with the experimental data file. It has too many timepoints. \n"
"This can be caused if you've defined chain indices in the experimental data file but not in the segments file.\n")
except ValueError:
pass
# Check expt = predicted
if self._single_chain:
if np.array_equal(self.segres['segres'], expt['segres']):
self.expfracs = expt['fracs']
else:
raise Functions.HDX_Error("The experimental segments read from %s and predicted segments read from %s don't match!" % (self.params['segfile'], self.params['expfile']))
else:
if all( (np.array_equal(self.segres['segres'], expt['segres']), np.array_equal(self.segres['chain'], expt['chain'])) ):
self.expfracs = expt['fracs']
else:
raise Functions.HDX_Error("The experimental segments/chains read from %s and predicted segments/chains read from %s don't match!" % (self.params['segfile'], self.params['expfile']))
def segments(self, top):
"""Function to average residue deuterated fractions over
a given set of peptide segments. The first residue in each
segment will not be included in the averaging, as it is assumed
to be 100% back-exchanged during analysis.
Residue indices provided in the given list are converted to
residue IDs from the given trajectory's topology. Currently this
remumbering will only work for single chain topologies with sequential
numbering. If a residue in a segment is not found (e.g. a truncated
N/C terminus), the next residue is chosen as the start/end point instead.
Writes info on skipped residues to logfile "HDX_analysis.log" by default
and the segment/average deuteration information to "Segment_average_fractions.dat"
Usage: segments(traj, residxs, fracs, segfile_name, times, [ log="HDX_analysis.log" ])
Returns: [n_segs, 2] 2D numpy array of segment start/end residue IDs,
[n_segs, n_times] 2D numpy array of segment deuterated fractions at each timepoint"""
res2idx = {}
with open(self.params['logfile'], 'a') as f:
f.write("Now converting residue numbers to indices for segment averaging:\n")
for idx, res in enumerate(top.residues):
if res.is_protein:
res2idx[(res.resSeq, res.chain.index)] = idx
# res2idx[res.resSeq] = idx # Only works for single chain or sequential numbers, no re-use of resnums
else:
with open(self.params['logfile'], 'a') as f:
f.write("Skipping residue: %s, not a protein residue\n" % res)
self.read_segfile()
try:
aves = np.zeros((len(self.resfracs), len(self.segres), len(self.params['times'])))
stddevs = np.zeros((len(self.resfracs), len(self.segres), len(self.params['times'])))
c_aves = np.zeros((len(self.c_resfracs), len(self.segres), len(self.params['times'])))
except TypeError:
aves = np.zeros((len(self.resfracs), len(self.segres), 1))
stddevs = np.zeros((len(self.resfracs), len(self.segres), 1))
c_aves = np.zeros((len(self.c_resfracs), len(self.segres), 1))
self.params['times'] = [self.params['times']]
# Info for 'skip_first'
if self.params['skip_first']:
for i1, (seg, chain) in enumerate(self.segres):
with open(self.params['logfile'], 'a') as f:
try:
f.write("'Skip_first' is set. Not including residue %s in averaging for segment %s-%s, chain idx %s.\n" \
% (top.residue(res2idx[(seg[0], chain)]), seg[0], seg[1], chain))
except KeyError:
_ = top.chain(chain).residue(0)
f.write("'Skip_first' is set. Not including residue %s in averaging for segment %s-%s, chain idx %s.\n" \
% (_, seg[0], seg[1], chain))
else:
for i1, (seg, chain) in enumerate(self.segres):
with open(self.params['logfile'], 'a') as f:
try:
f.write("'Skip_first' is NOT set. Including residue %s in averaging for segment %s-%s, chain idx %s.\n" \
% (top.residue(res2idx[(seg[0], chain)]), seg[0], seg[1], chain))
except KeyError:
_ = top.chain(chain).residue(0)
f.write("'Skip_first' is NOT set. Including residue %s in averaging for segment %s-%s, chain idx %s.\n" \
% (_, seg[0], seg[1], chain))
# Calc average fractions for each chunk
for i0, chunk, errchunk in zip(range(len(self.resfracs)), self.resfracs, self.resfrac_STDs):
for i2, t in enumerate(self.params['times']):
for i1, (seg, chain) in enumerate(self.segres):
try:
start = res2idx[(seg[0], chain)]
except KeyError:
with open(self.params['logfile'], 'a') as f:
f.write("Didn't find residue %s, chain %s in protein. Using residue %s, chain %s as startpoint instead.\n" \
% (seg[0], chain, top.chain(chain).residue(0), chain))
start = top.chain(chain).residue(0).index
try:
end = res2idx[(seg[1], chain)]
except KeyError:
with open(self.params['logfile'], 'a') as f:
f.write("Didn't find residue %s, chain %s in protein. Using residue %s, chain %s as endpoint instead.\n" \
% (seg[1], chain, top.chain(chain).residue(-1), chain))
end = top.chain(chain).residue(-1).index
if self.params['skip_first']:
idxs = np.where(np.logical_and( self.residxs > start, self.residxs <= end ))[0] # > start skips
else:
idxs = np.where(np.logical_and( self.residxs >= start, self.residxs <= end ))[0] # >= start incs
aves[i0, i1, i2] = np.mean(chunk[idxs, i2])
stddevs[i0, i1, i2] = np.sqrt(np.sum(errchunk[idxs, i2]**2)) / len(np.nonzero(idxs))
stderrs = np.copy(stddevs)
for i0, a in enumerate(stderrs):
a /= np.sqrt(self.n_frames[i0])
# Do the same for cumulative resfracs
for i0, cchunk in enumerate(self.c_resfracs):
for i2, t in enumerate(self.params['times']):
for i1, (seg, chain) in enumerate(self.segres):
try:
start = res2idx[(seg[0], chain)]
except KeyError:
with open(self.params['logfile'], 'a') as f:
f.write("Cumulative segment averages: "
"Didn't find residue %s, chain %s in protein. Using residue %s, chain %s as startpoint instead.\n" \
% (seg[0], chain, top.chain(chain).residue(0), chain))
start = top.chain(chain).residue(0).index
try:
end = res2idx[(seg[1], chain)]
except KeyError:
with open(self.params['logfile'], 'a') as f:
f.write("Cumulative segment averages: "
"Didn't find residue %s, chain %s in protein. Using residue %s, chain %s as endpoint instead.\n" \
% (seg[0], chain, top.chain(chain).residue(-1), chain))
end = top.chain(chain).residue(-1).index
if self.params['skip_first']:
idxs = np.where(np.logical_and( self.residxs > start, self.residxs <= end ))[0] # > start skips
else:
idxs = np.where(np.logical_and( self.residxs >= start, self.residxs <= end ))[0] # >= start incs
c_aves[i0, i1, i2] = np.mean(cchunk[idxs, i2])
# Write average fractions file for each chunk
# N.B Again, these will NOT add up to the c_segfracs value, which is recalc'd using
# the exponential decay and the mean PF at a given timepoint (not just a straight ave
# of the block averaged resfracs)
if self._single_chain:
for chunkave in aves:
if os.path.exists(self.params['outprefix']+"Segment_average_fractions.dat"):
filenum = len(glob.glob(self.params['outprefix']+"Segment_average_fractions*"))
np.savetxt(self.params['outprefix']+"Segment_average_fractions_chunk_%d.dat" % (filenum+1),
np.hstack((self.segres['segres'], chunkave)),
fmt='%6d %6d ' + '%8.5f '*len(self.params['times']), header="Res1 Res2 Times / min: %s" \
% ' '.join([ str(t) for t in self.params['times'] ]))
else:
np.savetxt(self.params['outprefix']+"Segment_average_fractions.dat", np.hstack((self.segres['segres'], chunkave)),
fmt='%6d %6d ' + '%8.5f '*len(self.params['times']), header="Res1 Res2 Times / min: %s" \
% ' '.join([ str(t) for t in self.params['times'] ]))
else:
for chunkave in aves:
if os.path.exists(self.params['outprefix']+"Segment_average_fractions.dat"):
filenum = len(glob.glob(self.params['outprefix']+"Segment_average_fractions*"))
np.savetxt(self.params['outprefix']+"Segment_average_fractions_chunk_%d.dat" % (filenum+1),
np.hstack((self.segres['segres'], self.segres['chain'].reshape((len(self.segres['segres']),1)), chunkave)),
fmt='%6d %6d %6d ' + '%8.5f '*len(self.params['times']), header="Res1 Res2 Chain Times / min: %s" \
% ' '.join([ str(t) for t in self.params['times'] ]))
else:
np.savetxt(self.params['outprefix']+"Segment_average_fractions.dat", np.hstack((self.segres['segres'],
self.segres['chain'].reshape((len(self.segres['segres']),1)), chunkave)),
fmt='%6d %6d %6d ' + '%8.5f '*len(self.params['times']), header="Res1 Res2 Chain Times / min: %s" \
% ' '.join([ str(t) for t in self.params['times'] ]))
with open(self.params['logfile'], 'a') as f:
f.write("Segment averaging complete.\n")
return aves, c_aves, stddevs, stderrs
def check_blocksize(self):
"""Evaluate convergence of standard error in the mean for PFs.
By-frame PFs are successively block averaged at every possible
block size (1 -> n_frames-1), the SEM calculated across block
averages, and saved to self.tot_SEMs"""
# self.tot_SEMs = np.zeros((len(self.pf_byframe)-1, 2), dtype=[np.int32, np.float64])
# Array(window, SEM)
valid_windows = []
for window in range(1, int((self.c_n_frames[-1] / 2)) + 1):
if self.c_n_frames[-1] % window == 0:
valid_windows.append(window)
with open(self.params['logfile'], 'a') as f:
f.write("Total frames divisible by: %s,\nEvaluating standard error in total PF at these windows.\n"\
% " ".join([ str(i) for i in valid_windows ]))
valid_windows = np.asarray(valid_windows, dtype=np.int32)
if len(valid_windows) > 0: # 1 or prime frames
self.tot_SEMs = np.zeros((len(valid_windows), 2))
else:
self.tot_SEMs = np.zeros((1, 2))
with np.errstate(invalid='ignore'): # Ignores infs in stderr calc
for i, window in enumerate(valid_windows):
self.tot_SEMs[i, 0] = window
self.tot_SEMs[i, 1] = stderr(self._windowed_average(np.sum(self.pf_byframe, axis=0), window))
self.norm_tot_SEMs = np.copy(self.tot_SEMs)
self.norm_tot_SEMs[:,1] /= np.max(self.tot_SEMs[:,1]) # Normalised to max
# Array(res, window, SEM)
if len(valid_windows) > 0: # 1 or prime frames
self.res_SEMs = np.zeros((len(self.resnums), len(valid_windows), 2))
self.res_STDs = np.zeros((len(self.resnums), len(valid_windows), 2))
else:
self.res_SEMs = np.zeros((len(self.resnums), 1, 2))
self.res_STDs = np.zeros((len(self.resnums), 1, 2))
with np.errstate(invalid='ignore'): # Ignores infs in stderr calc
for j, res in enumerate(self.resnums):
for i, window in enumerate(valid_windows):
self.res_SEMs[j,i,0] = window
self.res_SEMs[j,i,1] = stderr(self._windowed_average(self.pf_byframe[j], window))
self.res_STDs[j,i,1] = np.std(self._windowed_average(self.pf_byframe[j], window), ddof=1)
self.norm_res_SEMs = np.copy(self.res_SEMs)
for res in self.norm_res_SEMs:
res[:,1] /= np.max(res[:,1]) # Normalised to max
def propagate_errors(self):
"""Propagate errors for individual blocks. Save as std errors for PFs, resfracs & segfracs"""
self.pf_stds = np.zeros(self.pfs.shape)
self.pf_SEMs = np.zeros(self.pfs.shape)
startframe = 0
for i, endframe in enumerate(self.c_n_frames):
self.pf_stds[i] = np.std(self.pf_byframe[:,startframe:endframe], axis=1, ddof=1)
self.pf_SEMs[i] = stderr(self.pf_byframe[:,startframe:endframe], axis=1)
startframe += self.n_frames[i]
self.resfrac_STDs =
|
np.zeros(self.resfracs.shape)
|
numpy.zeros
|
import numpy as np, matplotlib.pyplot as plt
from matplotlib.cm import rainbow
from matplotlib.cm import YlGn as cmap_gradient
from matplotlib import colors, cm
#from PreFRBLE.convenience import *
from PreFRBLE.label import *
from PreFRBLE.likelihood import *
#from PreFRBLE.physics import *
#from PreFRBLE.parameter import *
############################################################################
############################ PLOT FIGURES ##################################
############################################################################
def PlotBayes( x=np.ones(1), bayes=np.ones(1), title=None, label=None, width=1.0, color='blue', show_values=False, ax=None, posterior=False ):
""" Barplot of bayes factor or posterior likelihood for values of parameter x """
if ax is None:
fig, ax = plt.subplots( )
ax.bar(x, bayes/bayes.max(), width, color=color )
ax.set_title( title )
ax.set_yscale('log')
ax.set_xlabel( label )
if posterior:
ax.set_ylabel(r"$P/P_{\rm max}$")
else:
ax.set_ylabel(r"$\mathcal{B}/\mathcal{B}_{\rm max}$")
# ax.set_ylabel(r"$\mathcal{B} = \prod L / L_0$")
if show_values: ## print value on top of each bar, .... doesnt work ...
shift = bayes.max()/bayes.min()/10
for xx, b in zip( x, bayes ):
ax.text( xx, b*shift, str(b), color=color, fontweight='bold' )
### assure that there are ticks at y axis
lim = ax.get_ylim()
ax.set_ylim(lim[0]*0.5, lim[1]*2)
def PlotBayes2D( bayes=[], dev=[], x=[], y=[], xlabel='', ylabel='', P_min=1e-5, graphs=False, plane=False, ax=None, posterior=False ):
"""
Plot 2D distribution of Bayes factors for joint analysis of two parameters x and y
Parameters
----------
x : 1D array-like
values of first parameter
y : 1D array-like
values of second parameter
bayes : 2D array-like, shape( N_y, N_x )
bayes factors for tuples of (x,y)
dev : 2D array-like, shape( N_y, N_x ), optional
deviation of log10 of bayes factors, only plotted for graphs=True
graphs : boolean
indicate whether results should be drawn as graphs
plane : boolean
indicate whether results should be drawn as plane. Do no use together with graphs
"""
if ax is None:
fig, ax = plt.subplots()
if posterior:
P_label = r"P/P_{\rm max}"
else:
P_label = r"\mathcal{B}/\mathcal{B}_{\rm max}"
# P_label = r"\mathcal{B} = \prod L / L_0"
if graphs:
for ib, (b, Y) in enumerate( zip( bayes/bayes.max(), y ) ):
if len(dev) > 0:
yerr = np.array([ b- 10.**( np.log10(b) - dev[ib]), 10.**(
|
np.log10(b)
|
numpy.log10
|
import pygame
import numpy as np
import colorsys
from PyEvolv.assets.font import FONT, get_font
from typing import Dict, List, Tuple
def display_creature(f):
def inner(self, gameDisplay: pygame.Surface, creatures: List) -> None:
pixels_per_relative = self.display_height / self.relatives_on_screen
for creature in creatures:
type, x, y, color, food_color, size, rotation, sensor_1, sensor_2, sensor_3 = creature()
if self.relative_x <= x <= self.relative_x + self.relatives_on_screen and self.relative_y <= y <= self.relative_y + self.relatives_on_screen:
size = int(size*pixels_per_relative)
surf_size = max(size, int(self.constants["max_sensor_length"]*pixels_per_relative))
creature_surf = pygame.Surface((2*surf_size, 2*surf_size), pygame.SRCALPHA)
creature_surf = creature_surf.convert_alpha()
color = tuple(round(i * 255) for i in colorsys.hsv_to_rgb(color[0], color[1], color[2]))
food_color = tuple(round(i * 255) for i in colorsys.hsv_to_rgb(food_color[0], food_color[1], food_color[2]))
f(creature_surf, color, food_color, size, surf_size)
pygame.draw.line(creature_surf, (0,0,0), (surf_size, surf_size),
(int(surf_size + (pixels_per_relative * sensor_1[0] * np.cos(np.radians(sensor_1[1])))),
int(surf_size + (pixels_per_relative * sensor_1[0] * np.sin(np.radians(sensor_1[1])))))
)
pygame.draw.line(creature_surf, (0,0,0), (surf_size, surf_size),
(int(surf_size + (pixels_per_relative * sensor_2[0] * np.cos(np.radians(sensor_2[1])))),
int(surf_size + (pixels_per_relative * sensor_2[0] * np.sin(
|
np.radians(sensor_2[1])
|
numpy.radians
|
# from tqdm.notebook import tqdm as tqdm_notebook
# import os
# import glob
import pickle
import numpy as np
from src.support_class import *
from matplotlib import pyplot as plt
from matplotlib import colors as mcolors
from scipy import linalg
from codeStore import support_fun as spf
colors11 = plt.get_cmap('Blues')
colors12 = plt.get_cmap('Reds')
colors1 = np.vstack((colors11(np.linspace(1, 0.2, 256)), colors12(np.linspace(0.4, 1, 256))))
cmpBR = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors1)
# generate the mobility matrix of the microswimmer from pickle file,
# with force and torque free conditions,
# ignore head tail interaction.
def fun_m_rot(mbase, R):
ab = mbase[0:3, 0:3]
bb1 = mbase[3:6, 0:3]
bb2 = mbase[0:3, 3:6]
cb = mbase[3:6, 3:6]
m2 = np.zeros_like(mbase)
m2[0:3, 0:3] = np.dot(R, np.dot(ab, R.T))
m2[3:6, 0:3] = np.dot(R, np.dot(bb1, R.T)) * np.linalg.det(R)
m2[0:3, 3:6] = np.dot(R, np.dot(bb2, R.T)) * np.linalg.det(R)
m2[3:6, 3:6] = np.dot(R, np.dot(cb, R.T))
return m2
def cross_matrix(v):
assert v.shape == (3,)
m = np.zeros((3, 3))
m[0, 1] = -v[2]
m[0, 2] = v[1]
m[1, 0] = v[2]
m[1, 2] = -v[0]
m[2, 0] = -v[1]
m[2, 1] = v[0]
return m
def fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, tail_ini_beta, rotM):
trs = rb1 * rb2 / np.sqrt((rb1 * np.sin(tail_ini_beta)) ** 2 +
(rb2 * np.cos(tail_ini_beta)) ** 2)
tl = 2 * rb1 + ch * ph + dist_hs
rbc_base = np.array((0, 0, tl / 2 - rb1))
rtc = rbc_base - np.array((0, 0, rb1 + dist_hs + ch * ph / 2))
head_end0 = rbc_base - np.array((0, 0, trs))
rbc = np.dot(rotM.T, (rbc_base - head_end0)) + head_end0
return rbc, rtc
def fun_mfull_ufull_core(mhead_base, mtail, dist_hs, beta, rotM, wbc, wtc,
rb1, rb2, ch, ph, body_size_fct=1, tail_size_fct=1, ):
beta_norm = np.array([0, 1, 0])
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
mhead = fun_m_rot(fun_m_rot(mhead_base, rotM_beta), rotM.T)
rbc, rtc = fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
drbc = rbc - rc
drtc = rtc - rc
mhead[0:3, 0:3] = mhead[0:3, 0:3] * body_size_fct ** 1
mhead[0:3, 3:6] = mhead[0:3, 3:6] * body_size_fct ** 2
mhead[3:6, 0:3] = mhead[3:6, 0:3] * body_size_fct ** 2
mhead[3:6, 3:6] = mhead[3:6, 3:6] * body_size_fct ** 3
mtail[0:3, 0:3] = mtail[0:3, 0:3] * tail_size_fct ** 1
mtail[0:3, 3:6] = mtail[0:3, 3:6] * tail_size_fct ** 2
mtail[3:6, 0:3] = mtail[3:6, 0:3] * tail_size_fct ** 2
mtail[3:6, 3:6] = mtail[3:6, 3:6] * tail_size_fct ** 3
# generate M matrix with the force- and torque-free conditions.
mfull = np.zeros((18, 18))
mfull[0: 6, 0: 6] = mhead
mfull[6:12, 6:12] = mtail
mfull[0: 3, 12:15] = -np.eye(3)
mfull[0: 3, 15:18] = cross_matrix(drbc)
mfull[3: 6, 15:18] = -np.eye(3)
mfull[6: 9, 12:15] = -np.eye(3)
mfull[6: 9, 15:18] = cross_matrix(drtc)
mfull[9:12, 15:18] = -np.eye(3)
mfull[12:15, 0: 3] = -np.eye(3)
mfull[12:15, 6: 9] = -np.eye(3)
mfull[15:18, 0: 3] = -cross_matrix(drbc)
mfull[15:18, 3: 6] = -np.eye(3)
mfull[15:18, 6: 9] = -cross_matrix(drtc)
mfull[15:18, 9:12] = -np.eye(3)
# generate boundary conditions.
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
ufull = np.zeros(18)
ufull[0: 3] = 0
ufull[3: 6] = wbc * norm_head
ufull[6: 9] = 0
ufull[9:12] = wtc * norm_tail
mobility_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail, }
return mfull, ufull, mobility_kwargs
def fun_position_kwargs(case_kwargs):
beta_norm = np.array([0, 1, 0])
dist_hs = case_kwargs['dist_hs']
beta = case_kwargs['tail_ini_beta']
theta = case_kwargs['tail_ini_theta']
phi = case_kwargs['tail_ini_phi']
psi = case_kwargs['tail_ini_psi']
rb1 = case_kwargs['rs1']
rb2 = case_kwargs['rs2']
ch = case_kwargs['ch']
ph = case_kwargs['ph']
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
rotM = Rloc2glb(theta, phi, psi)
rbc, rtc = fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
position_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail, }
return position_kwargs
def fun_ut_un(u, w):
ut = np.dot(u, w) * w / (np.linalg.norm(w) ** 2)
un = u - ut
return ut, un
def mobility_pickle(pickle_dir, beta, theta, phi, psi, dist_hs, wbc, wtc,
body_size_fct=1, tail_size_fct=1, ):
with open(pickle_dir, 'rb') as handle:
tpick = pickle.load(handle)
problem_kwargs = tpick['problem_kwargs']
rb1 = problem_kwargs['rs1']
rb2 = problem_kwargs['rs2']
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
mhead_base, mtail = tpick['Mhead'], tpick['Mtail']
rotM = Rloc2glb(theta, phi, psi)
mfull, ufull, mobility_kwargs = \
fun_mfull_ufull_core(mhead_base, mtail, dist_hs, beta, rotM,
wbc, wtc, rb1, rb2, ch, ph,
body_size_fct=body_size_fct, tail_size_fct=tail_size_fct)
mobility_kwargs['rb1'] = rb1
mobility_kwargs['rb2'] = rb2
mobility_kwargs['ch'] = ch
mobility_kwargs['ph'] = ph
return mfull, ufull, mobility_kwargs
def apx_resistance_pickle(pickle_dir, beta, theta, phi, psi, dist_hs, wbc, wtc):
# decoupled method, resistance
with open(pickle_dir, 'rb') as handle:
tpick = pickle.load(handle)
problem_kwargs = tpick['problem_kwargs']
rb1 = problem_kwargs['rs1']
rb2 = problem_kwargs['rs2']
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
mhead_base, mtail = tpick['Mhead'], tpick['Mtail']
#
Rhead_base = np.linalg.inv(mhead_base)
Rhead_base = np.diagflat(np.diag(Rhead_base))
t1 = (Rhead_base[0, 0] + Rhead_base[1, 1]) / 2
Rhead_base[0, 0] = t1
Rhead_base[1, 1] = t1
t1 = (Rhead_base[3, 3] + Rhead_base[4, 4]) / 2
Rhead_base[3, 3] = t1
Rhead_base[4, 4] = t1
Rtail = np.linalg.inv(mtail)
beta_norm = np.array([0, 1, 0])
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
rotM = Rloc2glb(theta, phi, psi)
Rhead = fun_m_rot(fun_m_rot(Rhead_base, rotM_beta), rotM.T)
Ab_rt = Rhead[0:3, 0:3]
Cb_rt = Rhead[3:6, 3:6]
At = np.diagflat(np.diag(Rtail[0:3, 0:3]))
t1 = (At[0, 0] + At[1, 1]) / 2
At[0, 0] = t1
At[1, 1] = t1
Bt = np.diagflat(np.diag((Rtail[0:3, 3:6] + Rtail[3:6, 0:3]) / 2))
t1 = (Bt[0, 0] + Bt[1, 1]) / 2 * 0
Bt[0, 0] = t1
Bt[1, 1] = t1
Ct = np.diagflat(np.diag(Rtail[3:6, 3:6]))
t1 = (Ct[0, 0] + Ct[1, 1]) / 2
Ct[0, 0] = t1
Ct[1, 1] = t1
#
rbc, rtc = fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
# drbc = rbc - rc
drtc = rtc - rc
dtc = cross_matrix(drtc)
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
#
Rfull = np.zeros((6, 6))
Rfull[0:3, 0:3] = Ab_rt + At
Rfull[0:3, 3:6] = - np.dot(At, dtc)
Rfull[3:6, 0:3] = + np.dot(dtc, At)
Rfull[3:6, 3:6] = Cb_rt + Ct + np.dot(dtc, Bt) - np.dot(Bt, dtc) - np.dot(dtc, np.dot(At, dtc))
FFull = np.zeros(6)
FFull[0:3] = -np.dot(Bt, wtc * norm_tail)
FFull[3:6] = -np.dot(Cb_rt, wbc * norm_head) - \
np.dot(Ct, wtc * norm_tail) - np.dot(dtc, np.dot(Bt, wtc * norm_tail))
resistance_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail, }
return Rfull, FFull, resistance_kwargs
def fun_alpha_bctc(model, wbc, wtc):
mfull, ufull, mobility_kwargs = model.mobility_matrix(wbc, wtc)
ffull = linalg.solve(mfull, ufull)
pb, pt = mobility_kwargs['norm_head'], mobility_kwargs['norm_tail']
Uc, Wc, Wbc = ffull[12:15], ffull[15:18], wbc * pb
Wg = Wc + Wbc
alpha_b = np.arccos(np.dot(pb, Wg) / np.linalg.norm(pb) / np.linalg.norm(Wg))
alpha_b = np.pi - alpha_b if alpha_b > np.pi / 2 else alpha_b
alpha_t = np.arccos(np.dot(pt, Wg) / np.linalg.norm(pt) / np.linalg.norm(Wg))
alpha_t = np.pi - alpha_t if alpha_t > np.pi / 2 else alpha_t
return alpha_b, alpha_t
def fun_kappa_alpha(model, wbc, wtc):
alpha_b, alpha_t = fun_alpha_bctc(model, wbc, wtc)
kappa_alpha = np.abs(alpha_b / alpha_t)
return kappa_alpha
def fun_hook_torque(model, wbc, wtc):
mfull, ufull, mobility_kwargs = model.mobility_matrix(wbc, wtc)
ffull = linalg.solve(mfull, ufull)
rb1 = mobility_kwargs['rb1']
rbc = mobility_kwargs['rbc']
pb = mobility_kwargs['norm_head']
ds = rbc + rb1 * pb
hookT = ffull[3:6] - np.cross(ds, ffull[0:3])
return hookT
def plot_3D_Traj(axi, tplt, theta_list):
axi.plot(np.zeros(1), np.zeros(1), np.zeros(1), ' ')
axi.plot(tplt[:, 0], tplt[:, 1], tplt[:, 2], ' ')
spf.set_axes_equal(axi)
spf.colorline3d(tplt, theta_list / np.pi, ax0=axi, clb_title='$\\theta / \\pi$',
cmap=plt.get_cmap('viridis'))
axi.scatter(axi.get_xlim()[0], np.zeros(1), np.zeros(1), marker='.', c='k')
axi.scatter(np.zeros(1), axi.get_ylim()[1], np.zeros(1), marker='.', c='k')
axi.scatter(np.zeros(1), np.zeros(1), axi.get_zlim()[0], marker='.', c='k')
axi.plot(np.ones_like(theta_list) * axi.get_xlim()[0], tplt[:, 1], tplt[:, 2],
'--', color='grey')
axi.plot(tplt[:, 0], np.ones_like(theta_list) * axi.get_ylim()[1], tplt[:, 2],
'--', color='grey')
axi.plot(tplt[:, 0], tplt[:, 1], np.ones_like(theta_list) * axi.get_zlim()[0],
'--', color='grey')
axi.view_init(25, -60)
axi.plot(np.zeros(1), np.zeros(1), np.zeros(1), marker='s', c='k')
return True
def plot_color_line(axi, tx, ty, xlabel, ylabel, c, vmin, vmax,
cmap=cmpBR, xscale0='linear', yscale0='linear', s=4,
marker='o', label=''):
axi.plot(tx, ty, linestyle='None')
# axi.relim()
# txlim0 = axi.get_xlim()
# tylim0 = axi.get_ylim()
# print(tylim0, ty.min())
sc = axi.scatter(tx, ty, vmin=vmin, vmax=vmax, c=c, cmap=cmap, s=s,
marker=marker, label=label)
axi.set_xlabel(xlabel)
axi.set_ylabel(ylabel)
axi.set_xscale(xscale0)
axi.set_yscale(yscale0)
# axi.set_xlim(*txlim0)
# axi.set_ylim(*tylim0)
return sc
def fun_cal_kwargs(Uc, Wc, wbc, pb, pt, kappa, mdf_alpha=True):
Wbc = wbc * pb
Wg = Wc + kappa * Wbc
UcWg_t, UcWg_n = fun_ut_un(Uc, Wg)
eta = np.arccos(np.dot(Uc, Wg) / np.linalg.norm(Uc) / np.linalg.norm(Wg))
alpha_b = np.arccos(np.dot(pb, Wg) / np.linalg.norm(pb) / np.linalg.norm(Wg))
alpha_t = np.arccos(np.dot(pt, Wg) / np.linalg.norm(pt) / np.linalg.norm(Wg))
if mdf_alpha:
alpha_b = np.pi - alpha_b if alpha_b > np.pi / 2 else alpha_b
alpha_t = np.pi - alpha_t if alpha_t > np.pi / 2 else alpha_t
R = np.linalg.norm(UcWg_n) / np.linalg.norm(Wg)
uc_par = np.sign(np.dot(Uc, Wg)) * np.linalg.norm(UcWg_t)
cal_kwargs = {'Wg': Wg,
'eta': eta,
'alpha_b': alpha_b,
'alpha_t': alpha_t,
'R': R,
'uc_par': uc_par,}
return cal_kwargs
class DecouplingModel:
def __init__(self, pickle_dir, beta_norm=np.array([0, 1, 0])):
with open(pickle_dir, 'rb') as handle:
tpick = pickle.load(handle)
self._case_kwargs = tpick['problem_kwargs']
self._rb1 = self._case_kwargs['rs1']
self._rb2 = self._case_kwargs['rs2']
self._ch = self._case_kwargs['ch']
self._ph = self._case_kwargs['ph']
self._mhead_base = tpick['Mhead']
self._mtail_base = tpick['Mtail']
self._beta_norm = beta_norm
self._beta = 0
self._theta = 0
self._phi = 0
self._psi = 0
self._dist_hs = 0
self._rotM_beta = np.eye(3)
self._rotM = np.eye(3)
@property
def case_kwargs(self):
return self._case_kwargs
@property
def rb1(self):
return self._rb1
@property
def rb2(self):
return self._rb2
@property
def ch(self):
return self._ch
@property
def ph(self):
return self._ph
@property
def mhead_base(self):
return self._mhead_base
@property
def mtail_base(self):
return self._mtail_base
@property
def beta_norm(self):
return self._beta_norm
@staticmethod
def fun_ut_un(u, w):
ut = np.dot(u, w) * w / (np.linalg.norm(w) ** 2)
un = u - ut
return ut, un
@staticmethod
def fun_MR_rot(mr_base, R):
ab = mr_base[0:3, 0:3]
bb1 = mr_base[3:6, 0:3]
bb2 = mr_base[0:3, 3:6]
cb = mr_base[3:6, 3:6]
m2 = np.zeros_like(mr_base)
m2[0:3, 0:3] = np.dot(R, np.dot(ab, R.T))
m2[3:6, 0:3] = np.dot(R, np.dot(bb1, R.T)) * np.linalg.det(R)
m2[0:3, 3:6] = np.dot(R, np.dot(bb2, R.T)) * np.linalg.det(R)
m2[3:6, 3:6] = np.dot(R, np.dot(cb, R.T))
return m2
@staticmethod
def cross_matrix(v):
assert v.shape == (3,)
m = np.zeros((3, 3))
m[0, 1] = -v[2]
m[0, 2] = v[1]
m[1, 0] = v[2]
m[1, 2] = -v[0]
m[2, 0] = -v[1]
m[2, 1] = v[0]
return m
@staticmethod
def fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, tail_ini_beta, rotM):
trs = rb1 * rb2 / np.sqrt((rb1 * np.sin(tail_ini_beta)) ** 2 +
(rb2 * np.cos(tail_ini_beta)) ** 2)
tl = 2 * rb1 + ch * ph + dist_hs
rbc_base = np.array((0, 0, tl / 2 - rb1))
rtc = rbc_base - np.array((0, 0, rb1 + dist_hs + ch * ph / 2))
head_end0 = rbc_base - np.array((0, 0, trs))
rbc = np.dot(rotM.T, (rbc_base - head_end0)) + head_end0
return rbc, rtc
def fun_position_kwargs(self):
beta_norm = self.beta_norm
rb1 = self.rb1
rb2 = self.rb2
ch = self.ch
ph = self.ph
beta = self._beta
theta = self._theta
phi = self._phi
psi = self._psi
dist_hs = self._dist_hs
left_hand = self.case_kwargs['left_hand']
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
rotM = Rloc2glb(theta, phi, psi)
rbc, rtc = self.fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
if left_hand:
norm_head = np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = -
|
np.array((0, 0, 1))
|
numpy.array
|
from utils.speech_featurizers import SpeechFeaturizer
from utils.text_featurizers import TextFeaturizer
import pypinyin
import numpy as np
from augmentations.augments import Augmentation
import random
import tensorflow as tf
import os
class AM_DataLoader():
def __init__(self, config_dict,training=True):
self.speech_config = config_dict['speech_config']
self.text_config = config_dict['decoder_config']
self.augment_config = config_dict['augments_config']
self.batch = config_dict['learning_config']['running_config']['batch_size']
self.speech_featurizer = SpeechFeaturizer(self.speech_config)
self.text_featurizer = TextFeaturizer(self.text_config)
self.make_file_list(self.speech_config['train_list'] if training else self.speech_config['eval_list'],training)
self.augment = Augmentation(self.augment_config)
self.init_text_to_vocab()
self.epochs = 1
self.LAS=False
self.steps = 0
def load_state(self,outdir):
try:
self.pick_index=np.load(os.path.join(outdir,'dg_state.npy')).flatten().tolist()
self.epochs=1+int(np.mean(self.pick_index))
except FileNotFoundError:
print('not found state file')
except:
print('load state falied,use init state')
def save_state(self,outdir):
np.save(os.path.join(outdir,'dg_state.npy'),np.array(self.pick_index))
def return_data_types(self):
if self.LAS:
return (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32,tf.float32)
else:
return (tf.float32, tf.int32, tf.int32, tf.int32)
def return_data_shape(self):
f,c=self.speech_featurizer.compute_feature_dim()
if self.LAS:
return (
tf.TensorShape([None,None,1]) if self.speech_config['use_mel_layer'] else tf.TensorShape([None,None,f,c]),
tf.TensorShape([None,]),
tf.TensorShape([None,None]),
tf.TensorShape([None,]),
tf.TensorShape([None,None,None])
)
else:
return (
tf.TensorShape([None, None, 1]) if self.speech_config['use_mel_layer'] else tf.TensorShape(
[None, None, f, c]),
tf.TensorShape([None, ]),
tf.TensorShape([None, None]),
tf.TensorShape([None, ])
)
def get_per_epoch_steps(self):
return len(self.train_list)//self.batch
def eval_per_epoch_steps(self):
return len(self.test_list)//self.batch
def init_text_to_vocab(self):
pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']],
'调小': [['tiáo'], ['xiǎo']],
'调亮': [['tiáo'], ['liàng']],
'调暗': [['tiáo'], ['àn']],
'肖': [['xiāo']],
'英雄传': [['yīng'], ['xióng'], ['zhuàn']],
'新传': [['xīn'], ['zhuàn']],
'外传': [['wài'], ['zhuàn']],
'正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]
})
def text_to_vocab_func(txt):
pins=pypinyin.pinyin(txt)
pins=[i[0] for i in pins]
return pins
self.text_to_vocab = text_to_vocab_func
def augment_data(self, wavs, label, label_length):
if not self.augment.available():
return None
mels = []
input_length = []
label_ = []
label_length_ = []
wavs_ = []
max_input = 0
max_wav = 0
for idx, wav in enumerate(wavs):
data = self.augment.process(wav.flatten())
speech_feature = self.speech_featurizer.extract(data)
if speech_feature.shape[0] // self.speech_config['reduction_factor'] < label_length[idx]:
continue
max_input = max(max_input, speech_feature.shape[0])
max_wav = max(max_wav, len(data))
wavs_.append(data)
mels.append(speech_feature)
input_length.append(speech_feature.shape[0] // self.speech_config['reduction_factor'])
label_.append(label[idx])
label_length_.append(label_length[idx])
for i in range(len(mels)):
if mels[i].shape[0] < max_input:
pad = np.ones([max_input - mels[i].shape[0], mels[i].shape[1],mels[i].shape[2]]) * mels[i].min()
mels[i] = np.vstack((mels[i], pad))
wavs_ = self.speech_featurizer.pad_signal(wavs_, max_wav)
x = np.array(mels, 'float32')
label_ = np.array(label_, 'int32')
input_length = np.array(input_length, 'int32')
label_length_ = np.array(label_length_, 'int32')
wavs_ = np.array(np.expand_dims(wavs_, -1), 'float32')
return x, wavs_, input_length, label_, label_length_
def make_file_list(self, wav_list,training=True):
with open(wav_list, encoding='utf-8') as f:
data = f.readlines()
data=[i.strip() for i in data if i!='']
num = len(data)
if training:
self.train_list = data[:int(num * 0.99)]
self.test_list = data[int(num * 0.99):]
np.random.shuffle(self.train_list)
self.pick_index = [0.] * len(self.train_list)
else:
self.test_list=data
self.offset=0
def only_chinese(self, word):
txt=''
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
txt+=ch
else:
continue
return txt
def eval_data_generator(self):
sample=self.test_list[self.offset:self.offset+self.batch]
self.offset+=self.batch
speech_features = []
input_length = []
y1 = []
label_length1 = []
max_input = 0
max_label1 = 0
for i in sample:
wp, txt = i.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
print('load data failed')
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
continue
if self.speech_config['only_chinese']:
txt= self.only_chinese(txt)
if self.speech_config['use_mel_layer']:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = self.speech_featurizer.extract(data)
in_len = int(speech_feature.shape[0] // self.speech_config['reduction_factor'])
max_input = max(max_input, speech_feature.shape[0])
py = self.text_to_vocab(txt)
if not self.check_valid(py, self.text_featurizer.vocab_array):
continue
text_feature = self.text_featurizer.extract(py)
if in_len < len(text_feature):
continue
max_input = max(max_input, len(speech_feature))
max_label1 = max(max_label1, len(text_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
y1.append(np.array(text_feature))
label_length1.append(len(text_feature))
if self.speech_config['use_mel_layer']:
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
else:
for i in range(len(speech_features)):
if speech_features[i].shape[0] < max_input:
pad = np.ones([max_input - speech_features[i].shape[0], speech_features[i].shape[1],
speech_features[i].shape[2]]) * speech_features[i].min()
speech_features[i] = np.vstack((speech_features[i], pad))
for i in range(len(y1)):
if y1[i].shape[0] < max_label1:
pad = np.ones(max_label1 - y1[i].shape[0]) * self.text_featurizer.pad
y1[i] = np.hstack((y1[i], pad))
x =
|
np.array(speech_features, 'float32')
|
numpy.array
|
import sys
import os
import pickle
import numpy as np
from metrics_ddie import ddie_compute_metrics
from scipy.special import softmax
from transformers import BertTokenizer
_, cv_dir, k = sys.argv
k = int(k)
tokenizer = BertTokenizer.from_pretrained('/mnt/model/scibert_scivocab_uncased', do_lower_case=True)
paths = ('cnn', 'rad1', 'desc')
#paths = ('cnn', 'desc')
for path in paths:
print(path)
fscores = []
for i in range(k):
result_path = os.path.join(cv_dir, str(i+1), path, 'eval_results.txt')
with open(result_path, 'r') as f:
fscore = f.read().strip().split('\n')[2].split()[-1]
print(i+1, fscore)
fscore = float(fscore)
fscores.append(fscore)
print(sum(fscores) / len(fscores))
#label_list = ('microF', 'Mechanism_F', 'Effect_F', 'Advise_F', 'Int._F')
label_list = ('Mechanism_F', 'Effect_F', 'Advise_F', 'Int._F')
model_list = ('cnn', 'rad1', 'desc', 'ensemble')
print_d = {'cnn': 'Text-only', 'rad1':'+ Mol (radius=1)', 'desc':'+ Desc', 'ensemble':'+ Desc + Mol (radius=1)'}
#model_list = ('cnn', 'desc')
def print_result(result_table):
for i_, x in enumerate(result_table):
print('& {} '.format(print_d[model_list[i_]]), end='')
for j_, y in enumerate(x):
if i_ == np.argmax(result_table[:, j_]):
print('& \\textbf{{{:.2f}}}'.format(y * 100), end=' ')
elif i_ != 0 and y < result_table[0,:][j_]:
print('& \\underline{{{:.2f}}}'.format(y * 100), end=' ')
else:
print('& {:.2f}'.format(y * 100), end=' ')
if i_ == len(model_list)-1:
print('\\\\\\hline')
else:
print('\\\\')
macro_result_dict = {}
micro_result_table = np.zeros((len(model_list), len(label_list)))
for model_i, model_name in enumerate(model_list):
micro_preds = None
micro_labels = None
for i in range(k):
if model_name == 'ensemble':
rad_preds = np.load(os.path.join(cv_dir, str(i+1), 'rad1', 'preds.npy'))
desc_preds = np.load(os.path.join(cv_dir, str(i+1), 'desc', 'preds.npy'))
preds = rad_preds + desc_preds
labels = np.load(os.path.join(cv_dir, str(i+1), 'rad1', 'labels.npy'))
else:
preds= np.load(os.path.join(cv_dir, str(i+1), model_name, 'preds.npy'))
labels= np.load(os.path.join(cv_dir, str(i+1), model_name, 'labels.npy'))
result = ddie_compute_metrics('ddie',
|
np.argmax(preds, axis=1)
|
numpy.argmax
|
# pylint: disable=R0201
import platform
from unittest.mock import MagicMock
import numpy as np
import pytest
from napari.utils.colormaps import make_colorbar
from qtpy import PYQT5
from qtpy.QtCore import QPoint, Qt
from qtpy.QtGui import QImage
import PartSegData
from PartSeg.common_backend.base_settings import BaseSettings, ColormapDict, ViewSettings
from PartSeg.common_gui.channel_control import ChannelProperty, ColorComboBox, ColorComboBoxGroup
from PartSeg.common_gui.napari_image_view import ImageView
from PartSegCore.color_image.base_colors import starting_colors
from PartSegCore.image_operations import NoiseFilterType
from PartSegImage import TiffImageReader
from .utils import CI_BUILD
if PYQT5:
def array_from_image(image: QImage):
size = image.size().width() * image.size().height()
return np.frombuffer(image.bits().asstring(size * image.depth() // 8), dtype=np.uint8)
else:
def array_from_image(image: QImage):
size = image.size().width() * image.size().height()
return np.frombuffer(image.bits(), dtype=np.uint8, count=size * image.depth() // 8)
class TestChannelProperty:
def test_fail_construct(self, base_settings):
with pytest.raises(ValueError):
ChannelProperty(base_settings, start_name="")
def test_collapse(self, base_settings, qtbot):
ch_prop = ChannelProperty(base_settings, start_name="test")
qtbot.add_widget(ch_prop)
ch_prop.show()
assert not ch_prop.collapse_widget.isChecked()
assert ch_prop.minimum_value.isVisible()
ch_prop.collapse_widget.setChecked(True)
assert not ch_prop.minimum_value.isVisible()
ch_prop.hide()
def test_get_value_from_settings(self, base_settings, qtbot):
ch_prop = ChannelProperty(base_settings, start_name="test1")
base_settings.set_in_profile("test.range_0", (100, 300))
mock = MagicMock()
mock.viewer_name = "test"
ch_prop.register_widget(mock)
with pytest.raises(ValueError):
ch_prop.register_widget(mock)
assert ch_prop.minimum_value.value() == 100
assert ch_prop.maximum_value.value() == 300
base_settings.set_in_profile("test.range_0", (200, 500))
assert ch_prop.minimum_value.value() == 200
assert ch_prop.maximum_value.value() == 500
base_settings.set_in_profile("test.range_1", (20, 50))
assert ch_prop.minimum_value.value() == 200
assert ch_prop.maximum_value.value() == 500
with pytest.raises(ValueError):
ch_prop.change_current("test7", 1)
class TestColorComboBox:
def test_base(self, qtbot):
dkt = ColormapDict({})
box = ColorComboBox(0, starting_colors, dkt)
box.show()
qtbot.add_widget(box)
with qtbot.waitSignal(box.channel_visible_changed), qtbot.assertNotEmitted(box.clicked):
qtbot.mouseClick(box.check_box, Qt.LeftButton)
with qtbot.waitSignal(box.clicked, timeout=1000):
qtbot.mouseClick(box, Qt.LeftButton, pos=QPoint(5, 5))
with qtbot.waitSignal(box.clicked):
qtbot.mouseClick(box, Qt.LeftButton, pos=QPoint(box.width() - 5, 5))
index = 3
with qtbot.waitSignal(box.currentTextChanged):
box.set_color(starting_colors[index])
img = np.array(make_colorbar(dkt[starting_colors[index]][0], size=(1, 512)))
print(array_from_image(box.image), array_from_image(box.image).size)
print(img)
print(img.flatten(), img.size, img.shape)
print(dkt[starting_colors[index]][0])
print(box.image, box.image.size(), box.image.depth())
assert np.all(array_from_image(box.image) == img.flatten())
box.hide()
def test_visibility(self, qtbot):
dkt = ColormapDict({})
box = ColorComboBox(0, starting_colors, dkt, lock=True)
qtbot.add_widget(box)
box.show()
qtbot.wait(100)
assert box.lock.isVisible()
box.hide()
box = ColorComboBox(0, starting_colors, dkt, blur=NoiseFilterType.Gauss)
qtbot.add_widget(box)
box.show()
qtbot.wait(100)
assert box.blur.isVisible()
box.hide()
box = ColorComboBox(0, starting_colors, dkt, gamma=2)
qtbot.add_widget(box)
box.show()
qtbot.wait(100)
assert box.gamma.isVisible()
box.hide()
def test_show_frame_arrow(self, qtbot):
dkt = ColormapDict({})
box = ColorComboBox(0, starting_colors, dkt)
qtbot.add_widget(box)
box.show()
box.show_arrow = True
box.repaint()
qtbot.wait(100)
box.show_arrow = False
box.show_frame = True
box.repaint()
qtbot.wait(100)
box.hide()
def test_change_colors(self, qtbot):
dkt = ColormapDict({})
box = ColorComboBox(0, starting_colors, dkt)
qtbot.add_widget(box)
box.change_colors(starting_colors[:-1])
assert box.count() == len(starting_colors) - 1
box.change_colors(starting_colors[1:])
assert box.count() == len(starting_colors) - 1
class TestColorComboBoxGroup:
def test_change_channels_num(self, qtbot, image2):
settings = ViewSettings()
box = ColorComboBoxGroup(settings, "test", height=30)
qtbot.add_widget(box)
box.set_channels(1)
box.set_channels(4)
box.set_channels(10)
box.set_channels(4)
box.set_channels(10)
settings.image = image2
box.update_channels()
assert box.layout().count() == image2.channels
def test_update_colormaps(self, qtbot, base_settings):
box = ColorComboBoxGroup(base_settings, "test", height=30)
qtbot.add_widget(box)
box.set_channels(4)
assert box.current_colormaps == [base_settings.colormap_dict[x][0] for x in starting_colors[:4]]
box.update_color_list(starting_colors[1:2])
assert box.current_colors == [starting_colors[1] for _ in range(4)]
box.update_color_list()
assert box.layout().itemAt(0).widget().count() == len(starting_colors)
def test_settings_updated(self, qtbot, base_settings, monkeypatch):
box = ColorComboBoxGroup(base_settings, "test", height=30)
box.set_channels(4)
mock = MagicMock()
monkeypatch.setattr(box, "parameters_changed", mock)
base_settings.set_in_profile("test.lock_0", True)
mock.assert_called_once_with(0)
dkt = dict(**base_settings.get_from_profile("test"))
dkt["lock_0"] = False
dkt["lock_1"] = True
base_settings.set_in_profile("test", dkt)
assert mock.call_count == 5
mock.assert_called_with(3)
def test_color_combo_box_group(self, qtbot):
settings = ViewSettings()
box = ColorComboBoxGroup(settings, "test", height=30)
qtbot.add_widget(box)
box.set_channels(3)
assert len(box.current_colors) == 3
assert all(map(lambda x: isinstance(x, str), box.current_colors))
with qtbot.waitSignal(box.coloring_update):
box.layout().itemAt(0).widget().check_box.setChecked(False)
with qtbot.waitSignal(box.coloring_update):
box.layout().itemAt(0).widget().setCurrentIndex(2)
assert box.current_colors[0] is None
assert all(map(lambda x: isinstance(x, str), box.current_colors[1:]))
def test_color_combo_box_group_and_color_preview(self, qtbot):
settings = ViewSettings()
ch_property = ChannelProperty(settings, "test")
box = ColorComboBoxGroup(settings, "test", ch_property, height=30)
qtbot.add_widget(box)
qtbot.add_widget(ch_property)
box.set_channels(3)
box.set_active(1)
with qtbot.assert_not_emitted(box.coloring_update), qtbot.assert_not_emitted(box.change_channel):
ch_property.minimum_value.setValue(10)
ch_property.minimum_value.setValue(100)
def check_parameters(name, index):
return name == "test" and index == 1
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(True)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.minimum_value.setValue(10)
ch_property.maximum_value.setValue(10000)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.maximum_value.setValue(11000)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(False)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.setCurrentEnum(NoiseFilterType.Gauss)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.setCurrentEnum(NoiseFilterType.Median)
ch_property.filter_radius.setValue(0.5)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.filter_radius.setValue(2)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.setCurrentEnum(NoiseFilterType.No)
with qtbot.assert_not_emitted(box.coloring_update), qtbot.assert_not_emitted(box.change_channel):
ch_property.filter_radius.setValue(0.5)
@pytest.mark.xfail((platform.system() == "Windows") and CI_BUILD, reason="GL problem")
@pytest.mark.parametrize("filter_value", NoiseFilterType.__members__.values())
def test_image_view_integration_filter(self, qtbot, tmp_path, filter_value):
settings = BaseSettings(tmp_path)
ch_property = ChannelProperty(settings, "test")
image_view = ImageView(settings, ch_property, "test")
# image_view.show()
qtbot.addWidget(image_view)
qtbot.addWidget(ch_property)
image = TiffImageReader.read_image(PartSegData.segmentation_analysis_default_image)
with qtbot.waitSignal(image_view.image_added, timeout=10 ** 6):
settings.image = image
image_view.channel_control.set_active(1)
def check_parameters(name, index):
return name == "test" and index == 1
if filter_value is NoiseFilterType.No:
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.setCurrentEnum(NoiseFilterType.Gauss)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.setCurrentEnum(filter_value)
image4 = image_view.viewer_widget.screenshot()
assert (filter_value != NoiseFilterType.No and np.any(image4 != 255)) or (
filter_value == NoiseFilterType.No and np.any(image4 == 255)
)
@pytest.mark.xfail((platform.system() == "Windows") and CI_BUILD, reason="GL problem")
def test_image_view_integration(self, qtbot, tmp_path):
settings = BaseSettings(tmp_path)
ch_property = ChannelProperty(settings, "test")
image_view = ImageView(settings, ch_property, "test")
# image_view.show()
qtbot.addWidget(image_view)
qtbot.addWidget(ch_property)
image = TiffImageReader.read_image(PartSegData.segmentation_analysis_default_image)
with qtbot.waitSignal(image_view.image_added, timeout=10 ** 6):
settings.image = image
channels_num = image.channels
assert image_view.channel_control.channels_count == channels_num
image_view.viewer_widget.screenshot()
image1 = image_view.viewer_widget.canvas.render()
assert np.any(image1 != 255)
image_view.channel_control.set_active(1)
ch_property.minimum_value.setValue(100)
ch_property.maximum_value.setValue(10000)
ch_property.filter_radius.setValue(0.5)
image2 = image_view.viewer_widget.canvas.render()
assert np.any(image2 != 255)
assert np.all(image1 == image2)
def check_parameters(name, index):
return name == "test" and index == 1
# Test fixed range
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(True)
image1 = image_view.viewer_widget.canvas.render()
assert np.any(image1 != 255)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.minimum_value.setValue(20)
image2 = image_view.viewer_widget.canvas.render()
assert np.any(image2 != 255)
assert np.any(image1 != image2)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.maximum_value.setValue(11000)
image3 = image_view.viewer_widget.screenshot()
assert np.any(image3 != 255)
assert np.any(image2 != image3)
assert np.any(image1 != image3)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(False)
image1 = image_view.viewer_widget.screenshot()
assert np.any(image1 != 255)
assert np.any(image1 != image2)
assert np.any(image1 != image3)
# Test gauss
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.setCurrentEnum(NoiseFilterType.Gauss)
image4 = image_view.viewer_widget.screenshot()
assert np.any(image4 != 255)
assert np.any(image1 != image4)
assert np.any(image2 != image4)
assert np.any(image3 != image4)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.filter_radius.setValue(1)
image5 = image_view.viewer_widget.screenshot()
assert np.any(image5 != 255)
assert np.any(image1 != image5)
assert np.any(image2 != image5)
assert np.any(image3 != image5)
assert np.any(image4 != image5)
# Test gauss and fixed range
ch_property.minimum_value.setValue(100)
ch_property.maximum_value.setValue(10000)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(True)
image1 = image_view.viewer_widget.screenshot()
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.minimum_value.setValue(10)
image2 = image_view.viewer_widget.screenshot()
assert
|
np.any(image2 != 255)
|
numpy.any
|
import numpy as np
import os
import parmap
import scipy
def remove_duplicates(fname_templates, fname_weights,
save_dir, CONFIG, units_in=None, units_to_process=None,
multi_processing=False, n_processors=1):
# output folder
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# load weights
weights = np.load(fname_weights)
# units_in is all units if none
if units_in is None:
units_in = np.arange(len(weights))
if units_to_process is None:
units_to_process = np.copy(units_in)
# this allows units not in units_to_prcoess not get killed
units_to_not_process = np.arange(len(weights))
units_to_not_process = units_to_not_process[
~np.in1d(units_to_not_process, units_to_process)]
weights[units_to_not_process] = np.max(weights) + 10
# compute overlapping units
fname_units_to_compare = os.path.join(save_dir, 'units_to_compare.npy')
if os.path.exists(fname_units_to_compare):
units_to_compare = np.load(fname_units_to_compare)[()]
else:
units_to_compare = compute_units_to_compare(
fname_templates, units_in, units_to_process, CONFIG)
# save it
np.save(fname_units_to_compare,
units_to_compare)
## partition templates
#save_dir_partition = os.path.join(save_dir, 'partition')
#if not os.path.exists(save_dir_partition):
# os.makedirs(save_dir_partition)
#fnames_in = partition_templates(fname_templates,
# units_to_compare,
# save_dir_partition)
#find duplicates
#save_dir_result = os.path.join(save_dir, 'result')
#if not os.path.exists(save_dir_result):
# os.makedirs(save_dir_result)
fname_duplicates = os.path.join(save_dir, 'duplicates.npy')
if os.path.exists(fname_duplicates):
duplicates = np.load(fname_duplicates)[()]
else:
up_factor = 5
max_diff_threshold = CONFIG.clean_up.abs_max_diff
max_diff_rel_threshold = CONFIG.clean_up.rel_max_diff
# find duplicates
if multi_processing:
# divide keys
units = list(units_to_compare.keys())
n_units = len(units)
sub_units_to_compare = []
for j in range(n_processors):
sub_keys = units[slice(j, n_units, n_processors)]
sub_units_to_compare.append({k: units_to_compare[k] for k in sub_keys})
# run duplicate detector
duplicates_list = parmap.map(run_duplicate_detector,
sub_units_to_compare,
fname_templates,
up_factor,
max_diff_threshold,
max_diff_rel_threshold,
processes=n_processors)
duplicates = {}
for sub_list in duplicates_list:
for unit in sub_list:
duplicates[unit] = sub_list[unit]
else:
duplicates = run_duplicate_detector(
units_to_compare, fname_templates,
up_factor, max_diff_threshold,
max_diff_rel_threshold)
# save it
np.save(fname_duplicates, duplicates)
fname_units_killed = os.path.join(save_dir, 'units_killed.npy')
if os.path.exists(fname_units_killed):
units_killed = np.load(fname_units_killed)
else:
units_killed = kill_duplicates(duplicates, weights)
np.save(fname_units_killed, units_killed)
return np.setdiff1d(units_in, units_killed)
def compute_units_to_compare(fname_templates, units_in,
units_to_process, CONFIG):
# threshold on ptp diff
diff_threshold = CONFIG.clean_up.abs_max_diff
diff_rel_threshold = CONFIG.clean_up.rel_max_diff
# load templates
templates = np.load(fname_templates)
#templates = templates[units_in]
#n_units = templates.shape[0]
# get ptps
max_val = templates.max(1)
min_val = templates.min(1)
ptps = (max_val - min_val).max(1)
ptps_higher = np.maximum(ptps[:, None], ptps[None])
units_to_compare = {}
idx_process = np.in1d(units_in, units_to_process)
units_in_process = units_in[idx_process]
units_in_dont_process = units_in[~idx_process]
for ii, j in enumerate(units_in_process):
if ii < len(units_in_process) - 1:
# add within units_in_:
max_val_diff = np.max(np.abs(max_val[units_in_process[ii+1:]] - max_val[[j]]), axis=1)
min_val_diff = np.max(np.abs(min_val[units_in_process[ii+1:]] - min_val[[j]]), axis=1)
abs_diff = np.maximum(max_val_diff, min_val_diff)
abs_diff_rel = abs_diff/ptps_higher[j, units_in_process[ii+1:]]
units_to_compare_1 = units_in_process[ii+1:][np.logical_or(
abs_diff < diff_threshold, abs_diff_rel < diff_rel_threshold)]
else:
units_to_compare_1 = np.array(0, 'int32')
#
max_val_diff = np.max(np.abs(max_val[units_in_dont_process] - max_val[[j]]), axis=1)
min_val_diff = np.max(
|
np.abs(min_val[units_in_dont_process] - min_val[[j]])
|
numpy.abs
|
import numpy as np
import copy as cp
from scipy.linalg import expm
from . import cmanif
class ManifoldPointArray:
def __init__(self, manifold):
self._manifold = cp.deepcopy(manifold)
self._coords = np.array([])
def __str__(self):
return "Array of {num} points of the manifold: ".format(num=len(self._coords))+ str(self._manifold)
@property
def manifold(self):
return self._manifold
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
self._coords = self._manifold.project_on_manifold(coords)
class ManifoldPointArrayParameterized(ManifoldPointArray):
def __init__(self, manifold):
assert manifold.parameterized
self._local_coords = np.array([])
ManifoldPointArray.__init__(self,manifold)
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
ManifoldPointArray.coords.fset(self,coords)
self._local_coords = self._manifold.compute_inverse_parameterization(self._coords)
#self._local_coords = np.empty([coords.shape[0],self._manifold.local_dim])
#inverse_parameterization = self._manifold.compute_inverse_parameterization
#for i, point in enumerate(self._coords):
# self._local_coords[i] = inverse_parameterization(point)
@property
def local_coords(self):
return self._local_coords
@local_coords.setter
def local_coords(self, local_coords):
self._local_coords = np.empty(local_coords.shape)
self._local_coords[:] = local_coords
self._coords = self._manifold.compute_parameterization(local_coords)
class TangentVectorArray:
def __init__(self, manifold_point_array):
self._base_point_array = cp.deepcopy(manifold_point_array)
self._coords = np.zeros(self._base_point_array.coords.shape)
def __str__(self):
return "Array of {num} tangent vectors of the manifold: ".format(num=len(self._coords)) \
+ str(self._base_point_array.manifold)
@property
def base_point_array(self):
return self._base_point_array
@property
def manifold(self):
return self._base_point_array.manifold
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
self._coords = self.manifold.project_on_tangent_space(self._base_point_array.coords,coords)
def perform_geodesic_step(self, step_length=1):
self._base_point_array._coords, self._coords = self.manifold.geodesic_step(self._base_point_array.coords, self.coords, step=step_length)
def normal_vector_coords(self):
return self.manifold.normal_vector(self._base_point_array._coords,self._coords)
def christoffel_matrix_lin_comb_mult(self, coeffs):
christoffel_lin_comb = self.manifold.christoffel_matrix_lin_comb
base_coords = self._base_point_array._coords
mult_coords = np.empty(self._coords.shape)
for i, tangent_coords in enumerate(self._coords):
matrix = christoffel_lin_comb(base_coords[i], coeffs[i])
mult_coords[i] = np.dot(matrix, tangent_coords)
return mult_coords
class TangentVectorArrayParameterized(TangentVectorArray):
def __init__(self, manifold_point_array):
assert manifold_point_array.manifold.parameterized
TangentVectorArray.__init__(self,manifold_point_array)
self._local_coords = np.zeros(self._base_point_array.local_coords.shape)
def perform_geodesic_step(self, step_length=1):
TangentVectorArray.perform_geodesic_step(self, step_length)
self._base_point_array._local_coords = self.manifold.compute_inverse_parameterization(self._base_point_array._coords)
jacobi_matrix = self.manifold.compute_jacobi_matrix(self._base_point_array._local_coords)
inverse_riemannian_matrix = self.manifold.compute_inverse_riemannian_matrix(self._base_point_array._local_coords)
jacobi_transp_dot_coords = np.zeros([self._coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.ambient_dim):
jacobi_transp_dot_coords[:,i] += jacobi_matrix[:,j,i]*self._coords[:,j]
self._local_coords = np.zeros([self._coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.local_dim):
self._local_coords[:,i] += inverse_riemannian_matrix[:,i,j] * jacobi_transp_dot_coords[:,j]
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
TangentVectorArray.coords.fset(self,coords)
jacobi_matrix = self.manifold.compute_jacobi_matrix(self._base_point_array._local_coords)
inverse_riemannian_matrix = self.manifold.compute_inverse_riemannian_matrix(self._base_point_array._local_coords)
jacobi_transp_dot_coords = np.zeros([coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.ambient_dim):
jacobi_transp_dot_coords[:,i] += jacobi_matrix[:,j,i]*coords[:,j]
self._local_coords = np.zeros([coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.local_dim):
self._local_coords[:,i] += inverse_riemannian_matrix[:,i,j] * jacobi_transp_dot_coords[:,j]
@property
def local_coords(self):
return self._local_coords
@local_coords.setter
def local_coords(self, local_coords):
self._local_coords = local_coords
jacobi_matrix = self.manifold.compute_jacobi_matrix(self._base_point_array._local_coords)
for i, base_point in enumerate(self._base_point_array.local_coords):
self._coords[i] = np.dot(jacobi_matrix[i,:,:], local_coords[i])
class Manifold:
def __init__(self):
# set to True if a parameterization is implemented, (e.g. see Shpere2)
self._parameterized = False
def __str__(self):
return self._description
def __eq__(self, other):
if isinstance(other, Manifold):
return self._description == other._description
return NotImplemented
@property
def ambient_dim(self):
return self._dim
@property
def local_dim(self):
return self._local_dim
@property
def parameterized(self):
return self._parameterized
def christoffel_matrices(self, base_point_coords):
"""
Slow fallback implementation of computing christoffel matrices from normal vectors,
which should be reimplemented explicitly for performance reasons
(see for instance implementations on the Sphere, Rotation Group, or Grassmannian)
"""
dim = self._dim
cm = np.empty([dim,dim,dim])
basis = np.identity(dim)
for i in range(dim):
v_i = self.project_on_tangent_space(base_point_coords, basis[i])
n_i = self.normal_vector(base_point_coords, v_i)
cm[:,i,i] = - n_i
for j in range(i,dim):
v_j = self.project_on_tangent_space(base_point_coords, basis[j])
n_ipj = self.normal_vector(base_point_coords, v_i + v_j)
n_imj = self.normal_vector(base_point_coords, v_i - v_j)
cm[:,i,j] = (n_imj - n_ipj)/4
cm[:,j,i] = cm[:,i,j]
return cm
def christoffel_matrix_lin_comb(self, base_point_coords, coeffs):
"""
Slow fallback implementation of computing a linear combination of christoffel matrices,
which should be reimplemented explicitly for performance reasons
(see for instance implementations on the Sphere, Rotation Group, or Grassmannian)
"""
cm = self.christoffel_matrices(base_point_coords)
return np.asmatrix(np.tensordot(coeffs, cm, axes=(0,0)))
class EuclideanSpace(Manifold):
def __init__(self, d):
self._dim=d
self._local_dim=d
self._description = "Euclidean Space R^{dim}".format(dim=self._dim)
Manifold.__init__(self)
def project_on_manifold(self, vector):
return np.array(vector)
def project_on_tangent_space(self, base_point_coords, vector):
return np.array(vector)
def geodesic_step(self, base_point_coords, tangent_vector_coords, step=1.0):
new_base_point_coords = base_point_coords + step * tangent_vector_coords
new_tangent_vector_coords = np.array(tangent_vector_coords)
return new_base_point_coords, new_tangent_vector_coords
def normal_vector(self, base_point_coords, tangent_vector_coords):
return np.zeros(tangent_vector_coords.shape)
def christoffel_matrices(self, base_point_coords):
return np.zeros((self._dim,)*3)
def christoffel_matrices_lin_comb(self, base_point_coords):
return np.asmatrix(np.zeros((self._dim,)*2))
class Sphere(Manifold):
def __init__(self, d):
self._dim=d+1
self._local_dim = d
self._description = "Sphere S^{d_s} in R^{d_r}".format(d_s=self._local_dim, d_r=self._dim)
Manifold.__init__(self)
def project_on_manifold(self, vector):
norm = np.sqrt(np.sum(vector*vector,axis=1)).reshape([vector.shape[0],1])
return vector/norm
def project_on_tangent_space(self, base_point_coords, vector):
pv = np.sum(base_point_coords*vector,axis=1).reshape([vector.shape[0],1])
return vector - pv*base_point_coords
def geodesic_step(self, base_point_coords, tangent_vector_coords, step=1.0):
v_norm = np.sqrt(np.sum(tangent_vector_coords*tangent_vector_coords,axis=1)).reshape([tangent_vector_coords.shape[0],1])
length = step*v_norm
new_base_point_coords = np.cos(length)*base_point_coords + np.sin(length)*tangent_vector_coords/v_norm
new_tangent_vector_coords = - v_norm*np.sin(length)*base_point_coords + np.cos(length)*tangent_vector_coords
return new_base_point_coords, new_tangent_vector_coords
def normal_vector(self, base_point_coords, tangent_vector_coords):
norm2 = np.sum(tangent_vector_coords*tangent_vector_coords,axis=1)
return -base_point_coords*norm2
def christoffel_matrices(self, base_point_coords):
dim = self._dim
christoffel_matrices = np.empty((dim,)*3)
for i in range(dim):
christoffel_matrices[i,:,:] = np.eye(dim) * base_point_coords[i]
return christoffel_matrices
def christoffel_matrix_lin_comb(self, base_point_coords, coeffs):
return np.eye(self._dim)*(coeffs*base_point_coords).sum()
class Sphere3(Sphere):
def __init__(self):
Sphere.__init__(self,3)
@staticmethod
def compute_stereographicprojection(point_coords_4d):
if point_coords_4d[0] > 0:
point_coords_3d = point_coords_4d[1:4]/(1+point_coords_4d[0])
else:
point_coords_3d = -point_coords_4d[1:4]/(1-point_coords_4d[0])
return point_coords_3d
class Sphere2(Sphere):
def __init__(self):
Sphere.__init__(self,2)
self._parameterized = True
@staticmethod
def compute_parameterization(spherical_coords):
point_coords = np.empty([spherical_coords.shape[0],3])
theta = spherical_coords[:,0]
phi = spherical_coords[:,1]
point_coords[:,0] = np.sin(theta) * np.cos(phi)
point_coords[:,1] = np.sin(theta) * np.sin(phi)
point_coords[:,2] = np.cos(theta)
return point_coords
@staticmethod
def compute_inverse_parameterization(point_coords):
local_coords = np.zeros([point_coords.shape[0],2])
# theta
local_coords[:,0] = np.arccos( point_coords[:,2] )
# phi
local_coords[:,1] = point_coords[:,1] / ( point_coords[:,0] - np.sqrt(point_coords[:,0]**2 + point_coords[:,1]**2) )
local_coords[:,1] = 2*np.arctan(local_coords[:,1]) + np.pi
mask = (point_coords[:,1]==0)*(0<=point_coords[:,0])
local_coords[mask,1] = 0
return local_coords
@staticmethod
def compute_jacobi_matrix(spherical_coords):
theta = spherical_coords[:,0]
phi = spherical_coords[:,1]
jacobi_matrix = np.empty([spherical_coords.shape[0],3,2])
# d_theat
jacobi_matrix[:,0,0] = np.cos(theta) * np.cos(phi)
jacobi_matrix[:,1,0] = np.cos(theta) * np.sin(phi)
jacobi_matrix[:,2,0] = -np.sin(theta)
# d_phi
jacobi_matrix[:,0,1] = -np.sin(theta) * np.sin(phi)
jacobi_matrix[:,1,1] = np.sin(theta) * np.cos(phi)
jacobi_matrix[:,2,1] = 0
return jacobi_matrix
@staticmethod
def compute_inverse_riemannian_matrix(spherical_coords):
inverse_riemannian_matrix = np.empty([spherical_coords.shape[0],2,2])
theta = spherical_coords[:,0]
# theta column
inverse_riemannian_matrix[:,0,0] = 1.
inverse_riemannian_matrix[:,1,0] = 0.
# phi column
inverse_riemannian_matrix[:,0,1] = 0.
inverse_riemannian_matrix[:,1,1] = 1. / ( np.sin(theta) ** 2 )
return inverse_riemannian_matrix
@staticmethod
def compute_christoffel_matrix_lin_comb_parameterization(spherical_coords,coeffs):
theta = spherical_coords[:,0]
christoffel_matrix = np.zeros([coeffs.shape[0],2,2])
# theta_coeff * theta_ChristoffelMatrix
christoffel_matrix[:,1,1] += -coeffs[:,0] * np.sin(theta) * np.cos(theta)
# phi_coeff * phi_ChristoffelMatrix
christoffel_matrix[:,0,1] += coeffs[:,1] / np.tan(theta)
christoffel_matrix[:,1,0] += christoffel_matrix[:,0,1]
return christoffel_matrix
class MatrixManifold(Manifold):
@property
def matrix_size(self):
return self._matrix_size
def coords_as_matrix(self, vector):
return np.asmatrix(vector.reshape(self._matrix_size))
def coords_as_vector(self, matrix):
return np.asarray(matrix.reshape(self._dim))
class RotationGroup(MatrixManifold):
def __init__(self, d):
self._matrix_size = (d,d)
self._dim=d**2
self._local_dim=int(d*(d-1)/2)
self._description = "Rotation Group SO({d_so}) in R^{d_r}".format(d_so=self._matrix_size[0], d_r=self._dim)
Manifold.__init__(self)
def project_on_manifold(self, vector):
vector_m = vector.shape[0]
projected_vector = np.empty([vector_m,self._dim])
U, _, V = np.linalg.svd(vector.reshape([vector_m,self._matrix_size[0],self._matrix_size[1]]), full_matrices=True)
m = np.matmul(U,V)
det = np.linalg.det(m)
for i in range(vector_m):#, v in enumerate(vector):
#U, __, V = np.linalg.svd(self.coords_as_matrix(v), full_matrices=True)
#m = np.matmul(U[i],V[i])
if det[i]<0:#np.linalg.det(m) < 0:
m[i,:,[0, 1]] = m[i,:,[1, 0]]
projected_vector[i,:] = m[i,:,:].reshape(self._dim)#self.coords_as_vector(m)
return projected_vector
def project_on_tangent_space(self, base_point_coords, vector):
projected_vector = np.empty([vector.shape[0],self._dim])
m = base_point_coords.shape[0]
m_p = base_point_coords.reshape(m,self._matrix_size[0],self._matrix_size[1])
m_v = vector.reshape(m,self._matrix_size[0],self._matrix_size[1])
return ((m_v - np.matmul(m_p,np.matmul(m_v.transpose(0,2,1),m_p)))/2).reshape(m,self._dim)
#for i in range(vector.shape[0]):
# m_p = (base_point_coords[i,:]).reshape(self._matrix_size)
# m_v = (vector[i,:]).reshape(self._matrix_size)
# projected_vector[i,:] = ((m_v - m_p @ m_v.transpose() @ m_p)/2).reshape(self._dim)
#return projected_vector
def geodesic_step(self, base_point_coords, tangent_vector_coords, step=1.0):
m = base_point_coords.shape[0]
#new_base_point_coords = np.empty([base_point_coords.shape[0],self._dim])
#new_tangent_vector_coords = np.empty([tangent_vector_coords.shape[0],self._dim])
m_p = base_point_coords.reshape(m,self._matrix_size[0],self._matrix_size[1])
m_p_t = m_p.transpose(0,2,1)
m_v = tangent_vector_coords.reshape(m,self._matrix_size[0],self._matrix_size[1])
m_p_t_m_v = step*np.matmul(m_p_t,m_v)
x = (np.sqrt(np.sum(np.sum(m_p_t_m_v**2,axis=1),axis=1)/2)).reshape(m,1,1)
x[x==0]=1e-10
expm1_step = np.sin(x)/x*m_p_t_m_v + (1-np.cos(x))/x**2*np.matmul(m_p_t_m_v,m_p_t_m_v)
m_p_new = m_p + np.matmul(m_p,expm1_step)
m_v_new = m_v +
|
np.matmul(m_v,expm1_step)
|
numpy.matmul
|
import itertools
import unittest
from copy import copy
import numpy as np
import pytest
from coremltools._deps import _HAS_KERAS2_TF, _HAS_KERAS_TF
from coremltools.models.utils import _macos_version, _is_macos
np.random.seed(1377)
if _HAS_KERAS2_TF or _HAS_KERAS_TF:
import keras
from keras.models import Sequential
from keras.layers import LSTM, GRU, SimpleRNN, RepeatVector
from keras.layers.wrappers import Bidirectional
import keras.backend as K
from coremltools.converters import keras as keras_converter
"""
=============================
Utility Functions
=============================
"""
def get_recurrent_activation_name_from_keras(activation):
if activation == keras.activations.sigmoid:
activation_str = "SIGMOID"
elif activation == keras.activations.hard_sigmoid:
activation_str = "SIGMOID_HARD"
elif activation == keras.activations.tanh:
activation_str = "TANH"
elif activation == keras.activations.relu:
activation_str = "RELU"
elif activation == keras.activations.linear:
activation_str = "LINEAR"
else:
raise NotImplementedError(
"activation %s not supported for Recurrent layer." % activation
)
return activation_str
def linear(x, alpha=1, beta=0):
return alpha * x + beta
def relu(x):
return np.maximum(0, x)
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def hard_sigmoid(x, alpha=0.2, beta=0.5):
return np.minimum(np.maximum(alpha * x + beta, 0), 1)
def tanh(x):
return np.tanh(x)
def apply_act(x, option):
if option == "TANH":
return tanh(x)
elif option == "RELU":
return relu(x)
elif option == "SIGMOID":
return sigmoid(x)
elif option == "SIGMOID_HARD":
return hard_sigmoid(x)
elif option == "LINEAR":
return linear(x)
def clip(x, threshold=50.0):
return np.maximum(np.minimum(x, threshold), -threshold)
def valid_params(params):
"""Checks if this combination of parameters is allowed by Keras"""
return not (params["input_dims"][1] == 1 and params["unroll"])
def _compute_SNR(x, y):
x = x.flatten()
y = y.flatten()
noise = x - y
noise_var = np.sum(noise ** 2) / len(noise) + 1e-7
signal_energy = np.sum(y ** 2) / len(y)
signal_energy2 = np.sum(x ** 2) / len(x)
if signal_energy < 1e-5 and signal_energy2 < 1e-5:
return 50, 50, 0
max_signal_energy = np.amax(y ** 2)
SNR = 10 * np.log10(signal_energy / noise_var)
PSNR = 10 * np.log10(max_signal_energy / noise_var)
return SNR, PSNR, signal_energy
"""
=============================
Numpy implementations
=============================
"""
def get_numpy_prediction_gru(model, X):
X = X[0, :, :]
seq_len, input_size = X.shape
keras_layer = model.layers[0]
return_seq = keras_layer.return_sequences
if keras_layer.go_backwards:
X = X[::-1, :]
if _HAS_KERAS2_TF:
hidden_size = keras_layer.units
keras_W_h = keras_layer.get_weights()[1].T
R_z = keras_W_h[0 * hidden_size :][:hidden_size]
R_r = keras_W_h[1 * hidden_size :][:hidden_size]
R_o = keras_W_h[2 * hidden_size :][:hidden_size]
keras_W_x = keras_layer.get_weights()[0].T
W_z = keras_W_x[0 * hidden_size :][:hidden_size]
W_r = keras_W_x[1 * hidden_size :][:hidden_size]
W_o = keras_W_x[2 * hidden_size :][:hidden_size]
keras_b = keras_layer.get_weights()[2]
b_z = keras_b[0 * hidden_size :][:hidden_size]
b_r = keras_b[1 * hidden_size :][:hidden_size]
b_o = keras_b[2 * hidden_size :][:hidden_size]
inner_activation_str = get_recurrent_activation_name_from_keras(
keras_layer.recurrent_activation
)
activation_str = get_recurrent_activation_name_from_keras(
keras_layer.activation
)
else:
hidden_size = keras_layer.output_dim
W_z = keras_layer.get_weights()[0].T
W_r = keras_layer.get_weights()[3].T
W_o = keras_layer.get_weights()[6].T
R_z = keras_layer.get_weights()[1].T
R_r = keras_layer.get_weights()[4].T
R_o = keras_layer.get_weights()[7].T
b_z = keras_layer.get_weights()[2]
b_r = keras_layer.get_weights()[5]
b_o = keras_layer.get_weights()[8]
inner_activation_str = get_recurrent_activation_name_from_keras(
keras_layer.inner_activation
)
activation_str = get_recurrent_activation_name_from_keras(
keras_layer.activation
)
h = np.zeros((hidden_size))
c = np.zeros((hidden_size))
np_out = np.zeros((seq_len, hidden_size))
for k in range(seq_len):
x = X[k, :]
z = apply_act(clip(np.dot(W_z, x) + np.dot(R_z, h) + b_z), inner_activation_str)
r = apply_act(clip(np.dot(W_r, x) + np.dot(R_r, h) + b_r), inner_activation_str)
c = clip(h * r)
o = apply_act(clip(np.dot(W_o, x) + np.dot(R_o, c) + b_o), activation_str)
h = (1 - z) * o + z * h
np_out[k, :] = h
if return_seq:
np_out_final = np_out
else:
np_out_final = np_out[-1, :]
return np_out_final
def get_numpy_prediction_unilstm(model, X):
X = X[0, :, :]
seq_len, input_size = X.shape
keras_layer = model.layers[0]
return_seq = keras_layer.return_sequences
if keras_layer.go_backwards:
X = X[::-1, :]
if _HAS_KERAS2_TF:
hidden_size = keras_layer.units
keras_W_h = keras_layer.get_weights()[1].T
R_i = keras_W_h[0 * hidden_size :][:hidden_size]
R_f = keras_W_h[1 * hidden_size :][:hidden_size]
R_o = keras_W_h[3 * hidden_size :][:hidden_size]
R_g = keras_W_h[2 * hidden_size :][:hidden_size]
keras_W_x = keras_layer.get_weights()[0].T
W_i = keras_W_x[0 * hidden_size :][:hidden_size]
W_f = keras_W_x[1 * hidden_size :][:hidden_size]
W_o = keras_W_x[3 * hidden_size :][:hidden_size]
W_g = keras_W_x[2 * hidden_size :][:hidden_size]
keras_b = keras_layer.get_weights()[2]
b_i = keras_b[0 * hidden_size :][:hidden_size]
b_f = keras_b[1 * hidden_size :][:hidden_size]
b_o = keras_b[3 * hidden_size :][:hidden_size]
b_g = keras_b[2 * hidden_size :][:hidden_size]
inner_activation_str = get_recurrent_activation_name_from_keras(
keras_layer.recurrent_activation
)
activation_str = get_recurrent_activation_name_from_keras(
keras_layer.activation
)
else:
hidden_size = keras_layer.output_dim
R_i = keras_layer.get_weights()[1].T
R_f = keras_layer.get_weights()[7].T
R_o = keras_layer.get_weights()[10].T
R_g = keras_layer.get_weights()[4].T
W_i = keras_layer.get_weights()[0].T
W_f = keras_layer.get_weights()[6].T
W_o = keras_layer.get_weights()[9].T
W_g = keras_layer.get_weights()[3].T
b_i = keras_layer.get_weights()[2]
b_f = keras_layer.get_weights()[8]
b_o = keras_layer.get_weights()[11]
b_g = keras_layer.get_weights()[5]
inner_activation_str = get_recurrent_activation_name_from_keras(
keras_layer.inner_activation
)
activation_str = get_recurrent_activation_name_from_keras(
keras_layer.activation
)
h = np.zeros((hidden_size))
c = np.zeros((hidden_size))
np_out = np.zeros((seq_len, hidden_size))
for k in range(seq_len):
x = X[k, :]
i = apply_act(clip(np.dot(W_i, x) + np.dot(R_i, h) + b_i), inner_activation_str)
f = apply_act(clip(np.dot(W_f, x) + np.dot(R_f, h) + b_f), inner_activation_str)
g = apply_act(clip(np.dot(W_g, x) + np.dot(R_g, h) + b_g), activation_str)
c = c * f + i * g
c = clip(c, 50000.0)
o = apply_act(clip(np.dot(W_o, x) + np.dot(R_o, h) + b_o), inner_activation_str)
h = o * apply_act(c, activation_str)
np_out[k, :] = h
if return_seq:
np_out_final = np_out
else:
np_out_final = np_out[-1, :]
return np_out_final
def get_numpy_prediction_bilstm_batched(model, X):
batch, _, _ = X.shape
out = []
for i in range(batch):
out.append(
get_numpy_prediction_bilstm(model, np.expand_dims(X[i, :, :], axis=0))
)
return np.stack(out, axis=0)
def get_numpy_prediction_bilstm(model, X):
X = X[0, :, :]
seq_len, input_size = X.shape
keras_layer = model.layers[0]
return_seq = keras_layer.return_sequences
if _HAS_KERAS2_TF:
hidden_size = keras_layer.forward_layer.units
keras_W_h = keras_layer.forward_layer.get_weights()[1].T
R_i = keras_W_h[0 * hidden_size :][:hidden_size]
R_f = keras_W_h[1 * hidden_size :][:hidden_size]
R_o = keras_W_h[3 * hidden_size :][:hidden_size]
R_g = keras_W_h[2 * hidden_size :][:hidden_size]
keras_W_x = keras_layer.forward_layer.get_weights()[0].T
W_i = keras_W_x[0 * hidden_size :][:hidden_size]
W_f = keras_W_x[1 * hidden_size :][:hidden_size]
W_o = keras_W_x[3 * hidden_size :][:hidden_size]
W_g = keras_W_x[2 * hidden_size :][:hidden_size]
keras_b = keras_layer.forward_layer.get_weights()[2]
b_i = keras_b[0 * hidden_size :][:hidden_size]
b_f = keras_b[1 * hidden_size :][:hidden_size]
b_o = keras_b[3 * hidden_size :][:hidden_size]
b_g = keras_b[2 * hidden_size :][:hidden_size]
keras_W_h = keras_layer.backward_layer.get_weights()[1].T
R_i_back = keras_W_h[0 * hidden_size :][:hidden_size]
R_f_back = keras_W_h[1 * hidden_size :][:hidden_size]
R_o_back = keras_W_h[3 * hidden_size :][:hidden_size]
R_g_back = keras_W_h[2 * hidden_size :][:hidden_size]
keras_W_x = keras_layer.backward_layer.get_weights()[0].T
W_i_back = keras_W_x[0 * hidden_size :][:hidden_size]
W_f_back = keras_W_x[1 * hidden_size :][:hidden_size]
W_o_back = keras_W_x[3 * hidden_size :][:hidden_size]
W_g_back = keras_W_x[2 * hidden_size :][:hidden_size]
keras_b = keras_layer.backward_layer.get_weights()[2]
b_i_back = keras_b[0 * hidden_size :][:hidden_size]
b_f_back = keras_b[1 * hidden_size :][:hidden_size]
b_o_back = keras_b[3 * hidden_size :][:hidden_size]
b_g_back = keras_b[2 * hidden_size :][:hidden_size]
inner_activation_str = get_recurrent_activation_name_from_keras(
keras_layer.forward_layer.recurrent_activation
)
activation_str = get_recurrent_activation_name_from_keras(
keras_layer.forward_layer.activation
)
else:
hidden_size = keras_layer.forward_layer.output_dim
R_i = keras_layer.get_weights()[1].T
R_f = keras_layer.get_weights()[7].T
R_o = keras_layer.get_weights()[10].T
R_g = keras_layer.get_weights()[4].T
W_i = keras_layer.get_weights()[0].T
W_f = keras_layer.get_weights()[6].T
W_o = keras_layer.get_weights()[9].T
W_g = keras_layer.get_weights()[3].T
b_i = keras_layer.get_weights()[2]
b_f = keras_layer.get_weights()[8]
b_o = keras_layer.get_weights()[11]
b_g = keras_layer.get_weights()[5]
R_i_back = keras_layer.backward_layer.get_weights()[1].T
R_f_back = keras_layer.backward_layer.get_weights()[7].T
R_o_back = keras_layer.backward_layer.get_weights()[10].T
R_g_back = keras_layer.backward_layer.get_weights()[4].T
W_i_back = keras_layer.backward_layer.get_weights()[0].T
W_f_back = keras_layer.backward_layer.get_weights()[6].T
W_o_back = keras_layer.backward_layer.get_weights()[9].T
W_g_back = keras_layer.backward_layer.get_weights()[3].T
b_i_back = keras_layer.backward_layer.get_weights()[2]
b_f_back = keras_layer.backward_layer.get_weights()[8]
b_o_back = keras_layer.backward_layer.get_weights()[11]
b_g_back = keras_layer.backward_layer.get_weights()[5]
inner_activation_str = get_recurrent_activation_name_from_keras(
keras_layer.forward_layer.inner_activation
)
activation_str = get_recurrent_activation_name_from_keras(
keras_layer.forward_layer.activation
)
h = np.zeros((hidden_size))
c = np.zeros((hidden_size))
np_out_forward = np.zeros((seq_len, hidden_size))
for k in range(seq_len):
x = X[k, :]
i = apply_act(clip(np.dot(W_i, x) + np.dot(R_i, h) + b_i), inner_activation_str)
f = apply_act(clip(np.dot(W_f, x) + np.dot(R_f, h) + b_f), inner_activation_str)
g = apply_act(clip(np.dot(W_g, x) + np.dot(R_g, h) + b_g), activation_str)
c = c * f + i * g
c = clip(c, 50000.0)
o = apply_act(clip(np.dot(W_o, x) + np.dot(R_o, h) + b_o), inner_activation_str)
h = o * apply_act(c, activation_str)
np_out_forward[k, :] = h
h = np.zeros((hidden_size))
c = np.zeros((hidden_size))
np_out_backward = np.zeros((seq_len, hidden_size))
for k in range(seq_len):
x = X[seq_len - k - 1, :]
i = apply_act(
clip(np.dot(W_i_back, x) + np.dot(R_i_back, h) + b_i_back),
inner_activation_str,
)
f = apply_act(
clip(np.dot(W_f_back, x) + np.dot(R_f_back, h) + b_f_back),
inner_activation_str,
)
g = apply_act(
clip(np.dot(W_g_back, x) + np.dot(R_g_back, h) + b_g_back), activation_str
)
c = c * f + i * g
c = clip(c, 50000.0)
o = apply_act(
clip(
|
np.dot(W_o_back, x)
|
numpy.dot
|
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
from common.functions import softmax, cross_entropy_error
from common.gradient import numerical_gradient
# np.random.seed(1)
class simpleNet:
def __init__(self):
self.W = np.random.randn(2,3) # 정규분포로 초기화
self.y = []
def predict(self, x):
return
|
np.dot(x, self.W)
|
numpy.dot
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from threading import Lock
import numpy as np
import sys
import array
import math
import ctypes
import pyzed.sl as sl
VERTEX_SHADER = """
# version 330 core
layout(location = 0) in vec3 in_Vertex;
layout(location = 1) in vec4 in_Color;
uniform mat4 u_mvpMatrix;
out vec4 b_color;
void main() {
b_color = in_Color;
gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);
}
"""
FRAGMENT_SHADER = """
# version 330 core
in vec4 b_color;
layout(location = 0) out vec4 out_Color;
void main() {
out_Color = b_color;
}
"""
M_PI = 3.1415926
GRID_SIZE = 15.0
CLASS_COLORS = np.array([
[44, 117, 255] # People
, [255, 0, 255] # Vehicle
, [0, 0, 255]
, [0, 255, 255]
, [0, 255, 0]
, [255, 255, 255]]
, np.float32)
ID_COLORS = np.array([
[0.231, 0.909, 0.69]
, [0.098, 0.686, 0.816]
, [0.412, 0.4, 0.804]
, [1, 0.725, 0]
, [0.989, 0.388, 0.419]]
, np.float32)
def get_color_class(_idx):
_idx = min(5, _idx)
clr = [CLASS_COLORS[_idx][0], CLASS_COLORS[_idx][1], CLASS_COLORS[_idx][2], 1.0]
return np.divide(clr, 255.0)
def generate_color_id(_idx):
clr = []
if _idx < 0:
clr = [236, 184, 36, 255]
clr = np.divide(clr, 255.0)
else:
offset = _idx % 5
clr = [ID_COLORS[offset][0], ID_COLORS[offset][1], ID_COLORS[offset][2], 1]
return clr
class Shader:
def __init__(self, _vs, _fs):
self.program_id = glCreateProgram()
vertex_id = self.compile(GL_VERTEX_SHADER, _vs)
fragment_id = self.compile(GL_FRAGMENT_SHADER, _fs)
glAttachShader(self.program_id, vertex_id)
glAttachShader(self.program_id, fragment_id)
glBindAttribLocation( self.program_id, 0, "in_vertex")
glBindAttribLocation( self.program_id, 1, "in_texCoord")
glLinkProgram(self.program_id)
if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE:
info = glGetProgramInfoLog(self.program_id)
glDeleteProgram(self.program_id)
glDeleteShader(vertex_id)
glDeleteShader(fragment_id)
raise RuntimeError('Error linking program: %s' % (info))
glDeleteShader(vertex_id)
glDeleteShader(fragment_id)
def compile(self, _type, _src):
try:
shader_id = glCreateShader(_type)
if shader_id == 0:
print("ERROR: shader type {0} does not exist".format(_type))
exit()
glShaderSource(shader_id, _src)
glCompileShader(shader_id)
if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE:
info = glGetShaderInfoLog(shader_id)
glDeleteShader(shader_id)
raise RuntimeError('Shader compilation failed: %s' % (info))
return shader_id
except:
glDeleteShader(shader_id)
raise
def get_program_id(self):
return self.program_id
class Simple3DObject:
"""
Class that manages simple 3D objects to render with OpenGL
"""
def __init__(self, _is_static):
self.vaoID = 0
self.drawing_type = GL_TRIANGLES
self.is_static = _is_static
self.elementbufferSize = 0
self.vertices = array.array('f')
self.colors = array.array('f')
self.normals = array.array('f')
self.indices = array.array('I')
def __del__(self):
if self.vaoID:
self.vaoID = 0
"""
Add a unique point to the list of points
"""
def add_pt(self, _pts):
for pt in _pts:
self.vertices.append(pt)
"""
Add a unique color to the list of colors
"""
def add_clr(self, _clrs):
for clr in _clrs:
self.colors.append(clr)
"""
Add a unique normal to the list of normals
"""
def add_normal(self, _normals):
for normal in _normals:
self.normals.append(normal)
"""
Add a set of points to the list of points and their corresponding color
"""
def add_points(self, _pts, _base_clr):
for i in range(len(_pts)):
pt = _pts[i]
self.add_pt(pt)
self.add_clr(_base_clr)
current_size_index = (len(self.vertices)/3)-1
self.indices.append(current_size_index)
self.indices.append(current_size_index+1)
"""
Add a point and its corresponding color to the list of points
"""
def add_point_clr(self, _pt, _clr):
self.add_pt(_pt)
self.add_clr(_clr)
self.indices.append(len(self.indices))
"""
Define a line from two points
"""
def add_line(self, _p1, _p2, _clr):
self.add_point_clr(_p1, _clr)
self.add_point_clr(_p2, _clr)
def add_full_edges(self, _pts, _clr):
start_id = int(len(self.vertices) / 3)
_clr[3] = 0.2
for i in range(len(_pts)):
self.add_pt(_pts[i])
self.add_clr(_clr)
box_links_top = np.array([0, 1, 1, 2, 2, 3, 3, 0])
i = 0
while i < box_links_top.size:
self.indices.append(start_id + box_links_top[i])
self.indices.append(start_id + box_links_top[i+1])
i = i + 2
box_links_bottom = np.array([4, 5, 5, 6, 6, 7, 7, 4])
i = 0
while i < box_links_bottom.size:
self.indices.append(start_id + box_links_bottom[i])
self.indices.append(start_id + box_links_bottom[i+1])
i = i + 2
def __add_single_vertical_line(self, _top_pt, _bottom_pt, _clr):
current_pts = np.array(
[_top_pt,
((GRID_SIZE - 1) * np.array(_top_pt) + np.array(_bottom_pt)) / GRID_SIZE,
((GRID_SIZE - 2) * np.array(_top_pt) + np.array(_bottom_pt) * 2) / GRID_SIZE,
(2 * np.array(_top_pt) + np.array(_bottom_pt) * (GRID_SIZE - 2)) / GRID_SIZE,
(np.array(_top_pt) + np.array(_bottom_pt) * (GRID_SIZE - 1)) / GRID_SIZE,
_bottom_pt
], np.float32)
start_id = int(len(self.vertices) / 3)
for i in range(len(current_pts)):
self.add_pt(current_pts[i])
if (i == 2 or i == 3):
_clr[3] = 0
else:
_clr[3] = 0.2
self.add_clr(_clr)
box_links = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
i = 0
while i < box_links.size:
self.indices.append(start_id + box_links[i])
self.indices.append(start_id + box_links[i+1])
i = i + 2
def add_vertical_edges(self, _pts, _clr):
self.__add_single_vertical_line(_pts[0], _pts[4], _clr)
self.__add_single_vertical_line(_pts[1], _pts[5], _clr)
self.__add_single_vertical_line(_pts[2], _pts[6], _clr)
self.__add_single_vertical_line(_pts[3], _pts[7], _clr)
def add_top_face(self, _pts, _clr):
_clr[3] = 0.25
for pt in _pts:
self.add_point_clr(pt, _clr)
def __add_quad(self, _quad_pts, _alpha1, _alpha2, _clr):
for i in range(len(_quad_pts)):
self.add_pt(_quad_pts[i])
if i < 2:
_clr[3] = _alpha1
else:
_clr[3] = _alpha2
self.add_clr(_clr)
self.indices.append(len(self.indices))
self.indices.append(len(self.indices))
self.indices.append(len(self.indices))
self.indices.append(len(self.indices))
def add_vertical_faces(self, _pts, _clr):
# For each face, we need to add 4 quads (the first 2 indexes are always the top points of the quad)
quads = [[0, 3, 7, 4] # Front face
, [3, 2, 6, 7] # Right face
, [2, 1, 5, 6] # Back face
, [1, 0, 4, 5]] # Left face
alpha = 0.25
# Create gradually fading quads
for quad in quads:
quad_pts_1 = [
_pts[quad[0]],
_pts[quad[1]],
((GRID_SIZE - 0.5) * np.array(_pts[quad[1]]) + 0.5 * np.array(_pts[quad[2]])) / GRID_SIZE,
((GRID_SIZE - 0.5) * np.array(_pts[quad[0]]) + 0.5 * np.array(_pts[quad[3]])) / GRID_SIZE
]
self.__add_quad(quad_pts_1, alpha, alpha, _clr)
quad_pts_2 = [
((GRID_SIZE - 0.5) * np.array(_pts[quad[0]]) + 0.5 * np.array(_pts[quad[3]])) / GRID_SIZE,
((GRID_SIZE - 0.5) * np.array(_pts[quad[1]]) + 0.5 * np.array(_pts[quad[2]])) / GRID_SIZE,
((GRID_SIZE - 1.0) * np.array(_pts[quad[1]]) + np.array(_pts[quad[2]])) / GRID_SIZE,
((GRID_SIZE - 1.0) * np.array(_pts[quad[0]]) + np.array(_pts[quad[3]])) / GRID_SIZE
]
self.__add_quad(quad_pts_2, alpha, 2 * alpha / 3, _clr)
quad_pts_3 = [
((GRID_SIZE - 1.0) * np.array(_pts[quad[0]]) + np.array(_pts[quad[3]])) / GRID_SIZE,
((GRID_SIZE - 1.0) * np.array(_pts[quad[1]]) + np.array(_pts[quad[2]])) / GRID_SIZE,
((GRID_SIZE - 1.5) * np.array(_pts[quad[1]]) + 1.5 * np.array(_pts[quad[2]])) / GRID_SIZE,
((GRID_SIZE - 1.5) * np.array(_pts[quad[0]]) + 1.5 * np.array(_pts[quad[3]])) / GRID_SIZE
]
self.__add_quad(quad_pts_3, 2 * alpha / 3, alpha / 3, _clr)
quad_pts_4 = [
((GRID_SIZE - 1.5) *
|
np.array(_pts[quad[0]])
|
numpy.array
|
'''
Walker-v2 solution by <NAME>
**Experimentatl**
https://github.com/FitMachineLearning/FitML/
https://www.youtube.com/channel/UCi7_WxajoowBl4_9P0DhzzA/featured
Using DeepQ Learning
'''
import numpy as np
import keras
import gym
import os
import h5py
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras import optimizers
num_env_variables = 24
num_env_actions = 4
num_initial_observation = 1
learning_rate = 0.001
weigths_filename = "BpW1024-QL-v2-weights.h5"
apWeights_filename = "BpW1024_ap-QL-v2-weights.h5"
#range within wich the SmartCrossEntropy action parameters will deviate from
#remembered optimal policy
sce_range = 0.5
b_discount = 0.98
max_memory_len = 90000
starting_explore_prob = 0.1
training_epochs = 2
load_previous_weights = True
observe_and_train = True
save_weights = True
num_games_to_play = 1000
#One hot encoding array
possible_actions = np.arange(0,num_env_actions)
actions_1_hot = np.zeros((num_env_actions,num_env_actions))
actions_1_hot[np.arange(num_env_actions),possible_actions] = 1
#Create testing enviroment
env = gym.make('BipedalWalker-v2')
env.reset()
#initialize training matrix with random states and actions
dataX = np.random.random(( 5,num_env_variables+num_env_actions ))
#Only one output for the total score / reward
dataY = np.random.random((5,1))
#initialize training matrix with random states and actions
apdataX = np.random.random(( 5,num_env_variables ))
apdataY = np.random.random((5,num_env_actions))
#nitialize the Reward predictor model
model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
model.add(Dense(1024, activation='relu', input_dim=dataX.shape[1]))
model.add(Dense(dataY.shape[1]))
opt = optimizers.adam(lr=learning_rate)
model.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(512, activation='relu', input_dim=apdataX.shape[1]))
action_predictor_model.add(Dense(apdataY.shape[1]))
opt2 = optimizers.adam(lr=learning_rate)
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
#load previous model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+weigths_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
model.load_weights(weigths_filename)
else:
print("File ",weigths_filename," does not exis. Retraining... ")
#load previous action predictor model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+ apWeights_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
action_predictor_model.load_weights(apWeights_filename)
else:
print("File ",apWeights_filename," does not exis. Retraining... ")
#Record first 500 in a sequence and add them to the training sequence
total_steps = 0
dataX = np.zeros(shape=(1,num_env_variables+num_env_actions))
dataY = np.zeros(shape=(1,1))
memoryX = np.zeros(shape=(1,num_env_variables+num_env_actions))
memoryY = np.zeros(shape=(1,1))
apmemoryX = np.zeros(shape=(1,num_env_variables))
apmemoryY = np.zeros(shape=(1,num_env_actions))
print("dataX shape", dataX.shape)
print("dataY shape", dataY.shape)
def predictTotalRewards(qstate, action):
qs_a = np.concatenate((qstate,action), axis=0)
predX = np.zeros(shape=(1,num_env_variables+num_env_actions))
predX[0] = qs_a
#print("trying to predict reward at qs_a", predX[0])
pred = model.predict(predX[0].reshape(1,predX.shape[1]))
remembered_total_reward = pred[0][0]
return remembered_total_reward
def GetRememberedOptimalPolicy(qstate):
predX = np.zeros(shape=(1,num_env_variables))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
pred = action_predictor_model.predict(predX[0].reshape(1,predX.shape[1]))
r_remembered_optimal_policy = pred[0]
return r_remembered_optimal_policy
def SmartCrossEntropy(current_optimal_policy):
sce = np.zeros(shape=(num_env_actions))
#print("current_optimal_policy", current_optimal_policy)
for i in range(num_env_actions):
sce[i] = current_optimal_policy[i] + sce_range * (
|
np.random.rand(1)
|
numpy.random.rand
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.image as mplimg
from matplotlib.colors import LogNorm
from numpy import fft
def get_photon_positions(image, cdf, cdf_indexes, nphot=1):
"""
Uses an inverse CDF lookup to find positions for uniform draws
:param image: The 3d voxel representation of the truth
:param cdf: CDF representation of the image. CDF should only be computed for
non-zero pixels
:param cdf_indexes: 1d indexes from image of the pixels represented in cdf
:param nphot: Number of photons to draw
:return: 3D positions of the drawn photons, about the image center
ISSUES: make sure the cdf picker is statistically correct
"""
draws = np.random.uniform(size=nphot) * cdf[-1]
insert_locations = np.searchsorted(cdf, draws)
insert_locations = cdf_indexes[insert_locations]
indexes_3d = np.unravel_index(insert_locations, image.shape)
indexes_3d = np.column_stack(indexes_3d)
jitter = np.random.uniform(size=indexes_3d.size).reshape(indexes_3d.shape)
return indexes_3d + jitter - np.array(image.shape) / 2
def project_by_random_matrix(photon_zyxs, distort=None, debug=False):
"""
Generate a randomized 3D-to-2D projection matrix, and project given photon
positions using it.
:param photon_zyxs: Photon positions in 3D, zyx order
:param distort: Either None, or a dictionary of vectors {'dipole': vec,
'quadrupole': vec}
:param debug: If True, return axis, rot matrix, and proj matrix instead of
transforming points
:return: Projected photon positions in 2D, yx order
"""
rand_axis = np.random.normal(size=3)
if distort is not None:
if 'quadrupole' in distort:
rand_axis *= distort['quadrupole']
if 'dipole' in distort:
rand_axis += distort['dipole']
rand_axis /= np.sqrt(np.dot(rand_axis, rand_axis))
rand_angle = np.random.uniform(0, 2 * np.pi) + 1 # 0 to 2pi can scale by 0
rot_matrix = angle_axis_to_matrix(rand_angle*rand_axis)
proj_matrix = rot_matrix[:, 1:3].T # first two cols (arbitrary)
if debug:
return rand_axis, rot_matrix, proj_matrix
projected_yxs = np.dot(proj_matrix, photon_zyxs.T).T
return projected_yxs
def random_fourier_slice(f_image_zyxis, distort=None):
rand_axis = np.random.normal(size=3)
if distort is not None:
if 'quadrupole' in distort:
rand_axis *= distort['quadrupole']
if 'dipole' in distort:
rand_axis += distort['dipole']
rand_axis /= np.sqrt(np.dot(rand_axis, rand_axis))
rand_angle = np.random.uniform(0, 2*np.pi) + 1 # 0 to 2pi can scale by 0
rot_matrix = angle_axis_to_matrix(rand_angle*rand_axis)
proj_matrix = rot_matrix[:, 1:3].T # project along z axis (arbitrary)
# Point-plane distance for a plane in Hessian normal form is just
# dot(n, x) + p. If the plane goes through the origin p is zero. Get voxels
# that are within 1 unit of the slicing plane
z_axis_aug = np.zeros(4)
z_axis_aug[0:3] = rot_matrix[:, 0]
slice_plane_distances = np.abs(np.dot(z_axis_aug, f_image_zyxis.T))
# TODO: be more precise about which voxels to select
dist_mask = slice_plane_distances < 1.0
fourier_slice = f_image_zyxis[dist_mask]
dist_weights = 1 - slice_plane_distances[dist_mask]
proj_matrix_aug = np.zeros((3, 4))
proj_matrix_aug[0:2, 0:3] = proj_matrix
proj_matrix_aug[2, 3] = 1
proj_yxis = np.dot(proj_matrix_aug, fourier_slice.T).T
# assert np.all(np.isclose(proj_yxis[:,2], fourier_slice[:,3]))
# TODO: Right now this sums up into pixels bins. Should really interpolate.
extents_yx = np.ceil(np.abs(proj_yxis[:, 0:2]).max(axis=0))
bins_x = np.arange(-extents_yx[1], extents_yx[1]+1)
bins_y = np.arange(-extents_yx[0], extents_yx[0]+1)
img2d = np.zeros((bins_x.size-1, bins_y.size-1))
# 2d gaussian kernel with FWHM=1 pixel
kern = np.outer((0.05554667, 0.88890666, 0.05554667),
(0.05554667, 0.88890666, 0.05554667))
shifts = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 0), (0, 1), (1, -1),
(1, 0), (1, 1)]
for (shiftx, shifty), kern_weight in zip(shifts, kern.flat):
img, bx, by = np.histogram2d(
proj_yxis[:, 1]+shiftx, proj_yxis[:, 0]+shifty,
bins=(bins_x, bins_y),
weights=proj_yxis[:, 2]*dist_weights*kern_weight)
img2d += img
return img2d
def make_fake_data_fft(truth, num_images=1024, rate=1., distort=None,
save_pngs=0):
"""
# inputs:
- truth: pixelized image of density
- N: number of images to take
- rate: mean number of photons per image
# notes:
- Images that get zero photons will be dropped, but N images will be
returned.
"""
n_phots = np.zeros(num_images, dtype=int)
while True:
resamp_mask = n_phots == 0
n_phots[resamp_mask] = np.random.poisson(rate, size=np.sum(resamp_mask))
if np.all(n_phots > 0):
break
nyxs = np.zeros(shape=(np.sum(n_phots), 3))
fft_truth = fft.fftshift(fft.fftn(truth))
fft_truth = np.real(fft_truth * fft_truth.conj())
inds =
|
np.arange(fft_truth.size, dtype=int)
|
numpy.arange
|
#!/usr/bin/env python3
import os.path
import numpy as np
import numpy.linalg as la
import scipy.io as sio
import matplotlib.pyplot as plt
from neml import models, elasticity, parse
import sys
sys.path.append('../../..')
from srlife import receiver, structural
class TestCase:
def __init__(self, name, T, analytic, ri = 0.9, ro = 1.0, h = 10.0,
alpha = 1.0e-5, E = 100000.0, nu = 0.3, p = 1.0):
self.name = name
self.Tfn = T
self.afn = analytic
self.ri = ri
self.ro = ro
self.h = h
self.alpha = alpha
self.E = E
self.nu = nu
self.p = p
def T(self, r):
return self.Tfn(r, self.ri, self.ro)
def exact(self, r):
return self.afn(r, self.p, self.ri, self.ro, self.E, self.nu, self.alpha)
def make_mat(self):
emodel = elasticity.IsotropicLinearElasticModel(self.E, "youngs",
self.nu, "poissons")
return models.SmallStrainElasticity(emodel, alpha = self.alpha)
def make_tube(self, dim, nr = 15, nt = 30, nz = 5):
tube = receiver.Tube(self.ro, self.ro - self.ri, self.h, nr, nt, nz)
if dim == 1:
tube.make_1D(self.h/2, 0)
elif dim == 2:
tube.make_2D(self.h/2)
times = np.array([0,1])
tube.set_times(times)
R, _, _ = tube.mesh
Ts = np.zeros((2,) + R.shape[:dim])
Ts[1] = self.T(R)
tube.add_results("temperature", Ts)
if self.p != 0:
tube.set_pressure_bc(receiver.PressureBC(times, times * self.p))
return tube
def run_comparison(self, dim, solver, axial_strain = 0, nr = 10,
nt = 20, nz = 10):
mat = self.make_mat()
tube = self.make_tube(dim, nr, nt, nz)
solver.setup_tube(tube)
state_n = solver.init_state(tube, mat)
state_np1 = solver.solve(tube, 1, state_n, axial_strain)
solver.dump_state(tube, 1, state_np1)
return tube
def get_comparison(self, tube):
if tube.ndim == 3:
z = tube.nz // 2
x_avg = np.mean(tube.results['disp_x'])
u = tube.results['disp_x'][1,:,0,z] - 2*x_avg
r = tube.mesh[0][:,0,z]
elif tube.ndim == 2:
# The displacements tend to drift, need to recenter
x_avg = np.mean(tube.results['disp_x'])
u = tube.results['disp_x'][1,:,0] - 2*x_avg
r = tube.mesh[0][:,0,0]
else:
u = tube.results['disp_x'][1]
r = tube.mesh[0][:,0,0]
return u, r
def plot_comparison(self, tube):
u, r = self.get_comparison(tube)
plt.figure()
plt.plot(r, u, 'k-')
plt.plot(r, self.exact(r), 'k--')
plt.xlabel("Radial position")
plt.ylabel("Radial displacement")
plt.title(self.name + ": " + "%iD" % tube.ndim)
plt.show()
def evaluate_comparison(self, tube):
u, r = self.get_comparison(tube)
err = np.abs(u - self.exact(r))
rel = err / np.abs(self.exact(r))
return np.max(err), np.max(rel)
def exact1(r, p, ri, ro, E, v, a):
A = p / (1.0/ro**2.0 - 1.0/ri**2.0)
C = -A/(2*ro**2.0)
res = (1.0+v)/E * (-A/r + 2.0*(1-2.0*v) * C* r)
return res
cases = [
TestCase("Inner pressure", lambda T, ri, ro: 0.0, exact1,
p = 100, ri = 8, ro = 10.0)
]
# Moose comparison stuff
# MOOSE test stuff
ramp = lambda x: np.piecewise(x,
[x < 1, x>=1],
[lambda xx: xx, lambda xx: 1.0+xx*0.0])
unit_temperature = lambda x, y, z: (np.sqrt(x**2.0+y**2.0)-9)/(10-9) * np.cos(np.arctan2(y,x))*(z/10.0 + 1)
temperature = lambda t, x, y, z: np.array([100*ramp(ti)*unit_temperature(x,y,z) for ti in t])
pressure = lambda t: 1.0 * ramp(t)
times = np.array([0, 0.5, 1, 101, 201, 301, 401, 501, 601, 701, 801, 901, 1001])
mat = parse.parse_xml(os.path.join(os.path.dirname(__file__),
'moose-verification', 'model.xml'), 'creeping')
ri = 9.0
ro = 10.0
h = 10.0
nr = 11
nt = 20
nz = 6
moose_base = os.path.join(os.path.dirname(__file__), 'moose-verification')
moose_ver = [os.path.join(moose_base, f) for f in ['1d_out.e', '2d_out.e', '3d_out.e']]
def run_reference_simulation(d, solver):
tube = receiver.Tube(ro, ro - ri, h, nr, nt, nz)
tube.set_times(times)
if d == 1:
tube.make_1D(0, 0)
elif d == 2:
tube.make_2D(0)
R, T, Z = tube.mesh
X = R * np.cos(T)
Y = R * np.sin(T)
Ts = temperature(times, X, Y, Z).reshape((len(times),) + tube.dim[:tube.ndim])
tube.add_results("temperature", Ts)
tube.set_pressure_bc(receiver.PressureBC(times, pressure(times)))
solver.setup_tube(tube)
state_n = solver.init_state(tube, mat)
for i in range(1,len(tube.times)):
state_np1 = solver.solve(tube, i, state_n, 0.0)
solver.dump_state(tube, i, state_np1)
state_n = state_np1
return tube
def load_displacements_exodus(efilename, d):
nf = sio.netcdf_file(efilename)
cnames = ['coordx', 'coordy', 'coordz']
coords = np.array([
np.copy(nf.variables[c][:]) for c in cnames[:d]])
names = [b''.join(v).decode('utf-8') for v in nf.variables['name_nod_var']]
disps = np.array([np.copy(nf.variables['vals_nod_var%i' % (names.index(nm)+1)][:]) for
nm in ['disp_x', 'disp_y', 'disp_z'][:d]])
return coords, disps
def compare_nodal_field(c1, r1, c2, r2, d, dec = 6, gtol = 1.0e-8):
c1 = np.round(c1, decimals=dec)
c2 =
|
np.round(c2, decimals=dec)
|
numpy.round
|
from sqlalchemy import true
import FinsterTab.W2020.DataForecast
import datetime as dt
from FinsterTab.W2020.dbEngine import DBEngine
import pandas as pd
import sqlalchemy as sal
import numpy
from datetime import datetime, timedelta, date
import pandas_datareader.data as dr
def get_past_data(self):
"""
Get raw data from Yahoo! Finance for SPY during Great Recession
Store data in MySQL database
:param sources: provides ticker symbols of instruments being tracked
"""
# Assume that date is 2010
now = dt.date(2009, 1, 1) # Date Variables
start = now - timedelta(days=1500) # get date value from 5 years ago
end = now
# data will be a 2D Pandas Dataframe
data = dr.DataReader('SPY', 'yahoo', start, end)
symbol = [3] * len(data) # add column to identify instrument id number
data['instrumentid'] = symbol
data = data.reset_index() # no designated index - easier to work with mysql database
# Yahoo! Finance columns to match column names in MySQL database.
# Column names are kept same to avoid any ambiguity.
# Column names are not case-sensitive.
data.rename(columns={'Date': 'date', 'High': 'high', 'Low': 'low', 'Open': 'open', 'Close': 'close',
'Adj Close': 'adj close', 'Volume': 'volume'}, inplace=True)
data.sort_values(by=['date']) # make sure data is ordered by trade date
# send data to database
# replace data each time program is run
data.to_sql('dbo_paststatistics', self.engine, if_exists=('replace'),
index=False,
dtype={'date': sal.Date, 'open': sal.FLOAT, 'high': sal.FLOAT, 'low': sal.FLOAT,
'close': sal.FLOAT, 'adj close': sal.FLOAT, 'volume': sal.FLOAT})
# Tests the accuracy of the old functions
def accuracy(self):
query = 'SELECT * FROM dbo_algorithmmaster'
algorithm_df = pd.read_sql_query(query, self.engine)
query = 'SELECT * FROM dbo_instrumentmaster'
instrument_master_df = pd.read_sql_query(query, self.engine)
# Changes algorithm code
for code in range(len(algorithm_df)):
# Dynamic range for changing instrument ID starting at 1
for ID in range(1, len(instrument_master_df) + 1):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = %d AND b.instrumentid = %d AND a.algorithmcode = "%s"' % (
ID, ID, algorithm_df['algorithmcode'][code])
df = pd.read_sql_query(query, self.engine)
count = 0
# Calculates accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(len(df)):
absolute_percent_error.append(
abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / len(df)
# return the average percent error calculated above
print("Average percent error for instrument: %d and algorithm: %s " % (ID, algorithm_df['algorithmcode'][code]), average_percent_error)
#print('Algorithm:', algorithm_df['algorithmcode'][code])
#print('instrumentid: %d' % ID, instrument_master_df['instrumentname'][ID - 1])
#print('length of data is:', len(df))
#print('number correct: ', count)
d = len(df)
b = (count / d) * 100
#print('The accuracy is: %.2f%%\n' % b)
# Isolated tests for ARIMA as we where trying to determine why it was so accurate
def arima_accuracy(self):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = 1 AND b.instrumentid = 1 AND a.algorithmcode = "ARIMA"'
df = pd.read_sql_query(query, self.engine)
df = df.tail(10)
df = df.reset_index(drop=true)
#print(df)
arima_count = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x] \
or (df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
arima_count += 1
#print(df['close'], df['forecastcloseprice'])
#print(arima_count)
#print(arima_count/len(df))
# Accuracy test for the new function MSF1
def MSF1_accuracy(self):
# Queires the database to grab all of the Macro Economic Variable codes
query = "SELECT macroeconcode FROM dbo_macroeconmaster WHERE activecode = 'A'"
id = pd.read_sql_query(query, self.engine)
id = id.reset_index(drop=True)
# Queries the database to grab all of the instrument IDs
query = 'SELECT instrumentid FROM dbo_instrumentmaster'
id2 = pd.read_sql_query(query, self.engine)
id2 = id2.reset_index(drop=True)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# Bool to determine whether we append to dbo_tempvisualize or replace the values
to_append = False
# Create a for loop to iterate through all of the instrument ids
for v in id2['instrumentid']:
# Initializes a list for which we will eventually be storing all data to add to the macroeconalgorithm database table
data = []
# Data1 will be used to store the forecastdate, instrumentid, forecastprice, and algorithm code
# It will be used to graph our backtested forecast against the actual instrument prices
data1 = []
# Getting Dates for Future Forecast as well as actual close prices for instrumentID#
# We chose 2018 - 2020, to alter this date range simply change the dates in the 3rd line of the query for the dates you want to test on
# Make sure they are valid dates as some instruments only have statistics that go back so far, check the instrument statistic table to figure out how far back each instrument goes
query = "SELECT date, close FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, start_date, end_date)
# instrument_stats will hold the closing prices and the dates for the dates we are forecasting for
instrument_stats = pd.read_sql_query(query, self.engine)
# We isolate the dates and closing prices into individual arrays to make them easier to work with
date = []
close = []
for i in instrument_stats['date']:
date.append(i)
for i in instrument_stats['close']:
close.append(i)
# n will always correspond to the amount of dates, as the amount of dates is the number of data points being compared
n = len(date)
# Median_forecast will be a dictionary where the key is the date and the value is a list of forecasted prices
median_forecast = {}
# This disctionary will be used to easily combine all of the forecasts for different dates to determine the median forecast value
for i in date:
temp = {i: []}
median_forecast.update(temp)
# This query will grab quarterly instrument prices from between 2014 and the current date to be used in the forecasting
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, train_date, start_date)
# Executes the query and stores the result in a dataframe variable
df2 = pd.read_sql_query(query, self.engine)
# This for loop iterates through the different macro economic codes to calculate the percent change for each macroeconomic variable
for x in id['macroeconcode']:
# Retrieves the most recent macro economic statistics prior to the date for which we are testing our algorithm
query = "SELECT * FROM dbo_macroeconstatistics WHERE macroeconcode = {} and date <= {} ".format('"' + str(x) + '"', start_date)
df = pd.read_sql_query(query, self.engine)
macro = df.tail(n)
SP = df2.tail(n)
temp = df.tail(n + 1)
temp = temp.reset_index()
# Converts macro variables to precent change
macroPercentChange = macro
macro = macro.reset_index(drop=True)
SP = SP.reset_index(drop=True)
macroPercentChange = macroPercentChange.reset_index(drop=True)
for i in range(0, n):
if (i == 0):
macrov = (macro['statistics'][i] - temp['statistics'][i]) / temp['statistics'][i]
macroPercentChange['statistics'].iloc[i] = macrov * 100
else:
macrov = (macro['statistics'][i] - macro['statistics'][i - 1]) / macro['statistics'][i - 1]
macroPercentChange['statistics'].iloc[i] = macrov * 100
# Algorithm for forecast price
S = calc(self, macroPercentChange, SP,n) # Calculates the average GDP and S&P values for the given data points over n days and performs operations on GDP average
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# temp_price will be used to hold the previous forecast price for the next prediction
temp_price = 0
# Setup a for loop to calculate the final forecast price and add data to the list variable data
for i in range(n):
if isFirst:
if x in [2, 3, 4]:
temp_price = ((S * (SP['close'].iloc[n-1])) + (SP['close'].iloc[n-1]))
isFirst = False
else:
temp_price = ((S * SP['close'].iloc[n-1]) + SP['close'].iloc[n-1])
isFirst = False
else:
if x in [2, 3, 4]:
temp_price = ((S * temp_price) + temp_price)
else:
temp_price = ((S * temp_price) + temp_price)
# Once the forecast price is calculated append it to median_forecast list
median_forecast[date[i]].append(temp_price)
# Calculates the median value for each date using a list of prices forecasted by each individual macro economic variable
forecast_prices = []
for i in date:
# Sort the forecasted prices based on date
sorted_prices = sorted(median_forecast[i])
# calculate the median forecasted price for each date
if len(sorted_prices) % 2 == 0:
center = int(len(sorted_prices) / 2)
forecast_prices.append(sorted_prices[center])
else:
center = int(len(sorted_prices) / 2)
forecast_prices.append((sorted_prices[center] + sorted_prices[center - 1]) / 2)
# Set up a for loop to construct a list using variables associated with macroeconalgorithm database table
for i in range(len(forecast_prices)):
data.append([date[i], v, 'ALL', forecast_prices[i], close[i], 'MSF1', 0])
data1.append([date[i], v, forecast_prices[i], 'MSF1'])
# Convert data list to dataframe variable
df = pd.DataFrame(data, columns=['forecastdate', 'instrumentid', 'macroeconcode',
'forecastcloseprice', 'close', 'algorithmcode', 'prederror'])
df1 = pd.DataFrame(data1, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine, if_exists=('replace' if not to_append else 'append'), index=False)
to_append = True
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(n):
absolute_percent_error.append(abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / n
count = 0
# Calculates trend accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
length = len(df)
trend_error = (count / length) * 100
print("Trend accuracy for %s for instrument %d is %.2f%%" % ('MSF1', v, trend_error))
print("The average percent error for %s for instrument %d is %.2f%%" % ('MSF1', v, average_percent_error * 100))
# return the average percent error calculated above
# This function is not currently used, it can be used to check the accuracy of MSF2 but will need set weightings
# The functions below this one will test the accuracy using a variety of weightings and choose the weightings with the best results
def MSF2_accuracy(self):
n = 8
#Gets the macro economic variables codes and names to loop through the inidividual macro variables
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
macrocodes = []
indicators = {}
for i in range(len(data['macroeconcode'])):
macrocodes.append(data['macroeconcode'].loc[i])
d = {data['macroeconcode'].loc[i]: []}
indicators.update(d)
#Gets the instrument ids to loop through the individual instruments
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data = pd.read_sql_query(query, self.engine)
instrumentids = []
for i in data['instrumentid']:
instrumentids.append(i)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
#Loops through each instrument id to preform error calculations 1 instrument at a time
for i in instrumentids:
#Gets the instrument statistics to run through the function
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, train_date, start_date)
train_data = pd.read_sql_query(query, self.engine)
#Gets the instrument statistics to check against the forecast prices
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, start_date, end_date)
check_data = pd.read_sql_query(query, self.engine)
#Gets the dates for the future forecast prices so they match the instrument statistics
dates = []
for l in check_data['date']:
dates.append(str(l))
#Loops through the macro economic variable codes to calculate percent change
for j in macrocodes:
#Retrieves macro economic statistics for each macro variables
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format('"' + j + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for k in range(n):
temp = data.tail(n + 1)
data = data.tail(n)
if j == k:
macrov = (data['statistics'].iloc[k] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
indicators[j].append(macrov)
else:
macrov = (data['statistics'].iloc[k] - data['statistics'].iloc[k - 1]) / data['statistics'].iloc[
k - 1]
indicators[j].append(macrov)
#Preforms the actual calculations and stores them in an array called calculated forecast
calculated_forecast = []
for k in range(n):
stat = indicators['GDP'][k] * 1 - (indicators['UR'][k] * 0 + indicators['IR'][k] * .5) - (
indicators['MI'][k] * indicators['MI'][k])
stat = (stat * train_data['close'].iloc[n-1]) + train_data['close'].iloc[n-1]
calculated_forecast.append(stat)
#Creates and inserts the forecast dates, instrument ids, calculated forecast prices, and actual close prices into an array
results = []
for k in range(n):
results.append([dates[k], i, calculated_forecast[k], check_data['close'].loc[k]])
#Creates a dataframe out of the array created above
df = pd.DataFrame(results, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'close'])
#print(df)
count = 0
# Calculates accuracy
percent_error = []
temp_error = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
count += 1
temp_error = abs((df['close'][x] - df['forecastcloseprice'][x]))/df['close']
#Percent Error calculation
temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
absolute_percent_error = [abs(ele) for ele in temp_error]
percent_error.append(absolute_percent_error)
if df['instrumentid'][i] == 1:
gm_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
gm_absolute_percent_error = [abs(ele) for ele in gm_temp_error]
#Calculate sum of percent error and find average
gm_average_percent_error = sum(gm_absolute_percent_error) / 8
#print("Average percent error of MSF2 on GM stock is: ", gm_average_percent_error * 100, "%")
if df['instrumentid'][i] == 2:
pfe_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
pfe_absolute_percent_error = [abs(ele) for ele in pfe_temp_error]
#Calculate sum of percent error and find average
pfe_average_percent_error = sum(pfe_absolute_percent_error) / 8
#print("Average percent error of MSF2 on PFE stock is: ", pfe_average_percent_error * 100, "%")
if df['instrumentid'][i] == 3:
spy_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
spy_absolute_percent_error = [abs(ele) for ele in spy_temp_error]
#Calculate sum of percent error and find average
spy_average_percent_error = sum(spy_absolute_percent_error) / 8
#print("Average percent error of MSF2 on S&P 500 stock is: ", spy_average_percent_error * 100, "%")
if df['instrumentid'][i] == 4:
xph_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
xph_absolute_percent_error = [abs(ele) for ele in xph_temp_error]
#Calculate sum of percent error and find average
xph_average_percent_error = sum(xph_absolute_percent_error) / 8
#print("Average percent error of MSF2 on XPH stock is: ", xph_average_percent_error * 100, "%")
if df['instrumentid'][i] == 5:
carz_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
carz_absolute_percent_error = [abs(ele) for ele in carz_temp_error]
#Calculate sum of percent error and find average
carz_average_percent_error = sum(carz_absolute_percent_error) / 8
#print("Average percent error of MSF2 on CARZ index stock is: ", carz_average_percent_error * 100, "%")
if df['instrumentid'][i] == 6:
tyx_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
tyx_absolute_percent_error = [abs(ele) for ele in tyx_temp_error]
#Calculate sum of percent error and find average
tyx_average_percent_error = sum(tyx_absolute_percent_error) / 8
#print("Average percent error of MSF2 on TYX 30-YR bond is: ", tyx_average_percent_error * 100, "%")
d = len(df)
b = (count / d) * 100
#Prints the trend accuracy
#print('The accuracy for instrument %d: %.2f%%\n' % (i, b))
#Create weightings MSF2 runs the MSF2 algorithm for past dates and compares them to actual instrument prices, generating a percent error calculation
#We then iterate through several different weightings and we compare each percent error for each instrument and determine the weightings with the lowest percent error
def create_weightings_MSF2(self, setWeightings):
# Query to grab the macroeconcodes and macroeconnames from the macroeconmaster database table
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
# Query to grab the instrumentid and instrument name from the instrumentmaster database table
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data1 = pd.read_sql_query(query, self.engine)
# Keys is a dictionary that will be used to store the macro econ code for each macro econ name
keys = {}
for i in range(len(data)):
keys.update({data['macroeconname'].iloc[i]: data['macroeconcode'].iloc[i]})
# ikeys is a dictionary that will be used to store instrument ids for each instrument name
ikeys = {}
for x in range(len(data1)):
ikeys.update({data1['instrumentname'].iloc[x]: data1['instrumentid'].iloc[x]})
#Vars is a dictionary used to store the macro economic variable percent change for each macro economic code
vars = {}
#Vars is only populated with the relevant macro economic variables (GDP, COVI, CPIUC, and FSI)
for i in data['macroeconcode']:
if (i == 'GDP' or i == 'UR' or i == 'IR' or i == 'MI'):
d = {i: []}
vars.update(d)
#Weightings is used to store the best weightings for each instrument id which is returned to dataforecast and used for actual prediction
weightings = {}
#n represents the number of datapoints we are working with (represented in quarters)
n = 8
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# For loop to loop through the macroeconomic codes to calculate the macro economic variable percent change
for i in keys:
# Check to make sure the macroeconcode we are working with is one of the relevant ones
if keys[i] in vars:
# Query to grab the macroeconomic statistics from the database using the relevant macro economic codes
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format(
'"' + keys[i] + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for j in range(n):
# This will grab the n+1 statistic to use to calculate the percent change to the n statistic
temp = data.tail(n + 1)
# This will grab the most recent n statistics from the query, as we are working only with n points
data = data.tail(n)
# For the first iteration we need to use the n+1th statistic to calculate percent change on the oldest point
if j == 0:
macrov = (data['statistics'].iloc[j] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
vars[keys[i]].append(macrov)
else:
macrov = (data['statistics'].iloc[j] - data['statistics'].iloc[j - 1]) / \
data['statistics'].iloc[j - 1]
vars[keys[i]].append(macrov)
# If you are not using set weightings then this if statement will run and create the best fit weightings
if not setWeightings:
# We now iterate through the instrument ids
for x in ikeys:
# This query will grab the quarterly instrument statistics from 2016 to 2018
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(ikeys[x], train_date, start_date)
# Then we execute the query and store the returned values in instrumentStats, and grab the last n stats from the dataframe as we are only using n datapoints
instrumentStats = pd.read_sql_query(query, self.engine)
instrumentStats = instrumentStats.tail(n)
#Best weightings will be used to store the best weightings for each instrument
best_weightings = [0, 0, 0]
#Best avg error will be used to store the best average percent error for each isntrument
best_avg_error = -1
#Best trend error will be used to store the best trend error for each instrument
best_trend_error = -1
#Best forecast prices will be used to store the forecast prices for the best weightings to store them in a database for visual comparison later
best_forecast_prices = []
# We now iterate through all 3 different possible weightings
for weight in numpy.arange(-5.7, 2.8, .25):
for uweight in
|
numpy.arange(-3.7, 3.6, .25)
|
numpy.arange
|
# In order to manipulate the array
import numpy as np
# In order to load mat file
from scipy.io import loadmat
# In order to import the libsvm format dataset
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import Binarizer
from collections import Counter
from fetch.coil_2000 import fetch_coil_2000
from process.coil_2000 import convert_coil_2000
def abalone_19():
# Abalone dataset - Convert the ring = 19 to class 1 and the other to class 0
filename = '../../data/raw/mldata/uci-20070111-abalone.mat'
matfile = loadmat(filename)
sex_array = np.zeros(np.ravel(matfile['int1']).shape[0])
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'M')] = 0
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'F')] = 1
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'I')] = 2
data = np.zeros((np.ravel(matfile['int1']).shape[0], 8))
data[:, 0] = sex_array
data[:, 1::] = matfile['double0'].T
label = np.zeros((np.ravel(matfile['int1']).shape[0], ), dtype=(int))
label[np.nonzero(np.ravel(matfile['int1']) == 19)] = 1
np.savez('../../data/clean/uci-abalone-19.npz', data=data, label=label)
def abalone_7():
# Abalone dataset - Convert the ring = 19 to class 1 and the other to class 0
filename = '../../data/raw/mldata/uci-20070111-abalone.mat'
matfile = loadmat(filename)
sex_array = np.zeros(np.ravel(matfile['int1']).shape[0])
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'M')] = 0
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'F')] = 1
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'I')] = 2
data = np.zeros((np.ravel(matfile['int1']).shape[0], 8))
data[:, 0] = sex_array
data[:, 1::] = matfile['double0'].T
label = np.zeros((np.ravel(matfile['int1']).shape[0], ), dtype=(int))
label[np.nonzero(np.ravel(matfile['int1']) == 7)] = 1
np.savez('../../data/clean/uci-abalone-7.npz', data=data, label=label)
def adult():
# Adult dataset
filename = '../../data/raw/mldata/adult'
tmp_input = np.loadtxt(filename, delimiter = ',', usecols = (0, 2, 4, 10, 11, 12, 14))
data = tmp_input[:, :-1]
label = tmp_input[:, -1].astype(int)
np.savez('../../data/clean/uci-adult.npz', data=data, label=label)
def ecoli():
# ecoli dataset
filename = '../../data/raw/mldata/ecoli.data'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, usecols = (1, 2, 3, 4, 5, 6, 7), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, usecols = (8, ), dtype=str)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 'imU')] = 1
np.savez('../../data/clean/uci-ecoli.npz', data=data, label=label)
def optical_digits():
# optical digits dataset
filename = '../../data/raw/mldata/optdigits'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(64)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (64, ), dtype=int)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 8)] = 1
np.savez('../../data/clean/uci-optical-digits.npz', data=data, label=label)
def sat_image():
# sat image dataset
filename = '../../data/raw/mldata/satimage.scale'
tmp_data, tmp_label = load_svmlight_file(filename)
data = tmp_data.toarray()
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 4)] = 1
np.savez('../../data/clean/uci-sat-image.npz', data=data, label=label)
def pen_digits():
# sat image dataset
filename = '../../data/raw/mldata/pendigits'
tmp_data, tmp_label = load_svmlight_file(filename)
data = tmp_data.toarray()
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 5)] = 1
np.savez('../../data/clean/uci-pen-digits.npz', data=data, label=label)
def spectrometer():
# spectrometer dataset
filename = '../../data/raw/mldata/lrs.data'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, usecols = tuple(range(10, 103)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, usecols = (1, ), dtype=int)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 44)] = 1
np.savez('../../data/clean/uci-spectrometer.npz', data=data, label=label)
def balance():
# balance dataset
filename = '../../data/raw/mldata/balance-scale.data'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, delimiter= ',', usecols = tuple(range(1, 5)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (0, ), dtype=str)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 'B')] = 1
np.savez('../../data/clean/uci-balance.npz', data=data, label=label)
def car_eval_34():
# car eval dataset
filename = '../../data/raw/mldata/car.data'
tmp_data = np.loadtxt(filename, delimiter = ',', dtype=str)
tmp_label = tmp_data[:, -1]
tmp_data_2 = np.zeros((tmp_data.shape), dtype=int)
# Encode each label with an integer
for f_idx in range(tmp_data.shape[1]):
le = LabelEncoder()
tmp_data_2[:, f_idx] = le.fit_transform(tmp_data[:, f_idx])
# initialise the data
data = np.zeros((tmp_data.shape[0], tmp_data.shape[1] - 1), dtype=float)
label = np.zeros((tmp_data.shape[0], ), dtype=int)
# Push the data
data = tmp_data_2[:, :-1]
label[np.nonzero(tmp_label == 'good')] = 1
label[np.nonzero(tmp_label == 'vgood')] = 1
np.savez('../../data/clean/uci-car-eval-34.npz', data=data, label=label)
def car_eval_4():
# car eval dataset
filename = '../../data/raw/mldata/car.data'
tmp_data = np.loadtxt(filename, delimiter = ',', dtype=str)
tmp_label = tmp_data[:, -1]
tmp_data_2 = np.zeros((tmp_data.shape), dtype=int)
# Encode each label with an integer
for f_idx in range(tmp_data.shape[1]):
le = LabelEncoder()
tmp_data_2[:, f_idx] = le.fit_transform(tmp_data[:, f_idx])
# initialise the data
data = np.zeros((tmp_data.shape[0], tmp_data.shape[1] - 1), dtype=float)
label = np.zeros((tmp_data.shape[0], ), dtype=int)
# Push the data
data = tmp_data_2[:, :-1]
label[np.nonzero(tmp_label == 'vgood')] = 1
np.savez('../../data/clean/uci-car-eval-4.npz', data=data, label=label)
def isolet():
# isolet dataset
filename = '../../data/raw/mldata/isolet.data'
data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(617)), dtype=float)
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (617, ), dtype=float)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 1.)] = 1
label[np.nonzero(tmp_label == 2.)] = 1
|
np.savez('../../data/clean/uci-isolet.npz', data=data, label=label)
|
numpy.savez
|
import copy as cp
import numpy as np
from scipy.linalg import pinv, eigh
from sklearn.base import TransformerMixin
from mne import EvokedArray
def shrink(cov, alpha):
n = len(cov)
shrink_cov = (1 - alpha) * cov + alpha * np.trace(cov) * np.eye(n) / n
return shrink_cov
def fstd(y):
y = y.astype(np.float32)
y -= y.mean(axis=0)
y /= y.std(axis=0)
return y
def _get_scale(X, scale):
if scale == 'auto':
scale = 1 / np.mean([[np.trace(y) for y in x] for x in X])
return scale
class ProjIdentitySpace(TransformerMixin):
def __init__(self):
return None
def fit(self, X, y):
return self
def transform(self, X):
return X
class ProjLWSpace(TransformerMixin):
def __init__(self, shrink):
self.shrink = shrink
def fit(self, X, y):
return self
def transform(self, X):
n_sub, n_fb, p, _ = X.shape
Xout = np.empty((n_sub, n_fb, p, p))
for fb in range(n_fb):
for sub in range(n_sub):
Xout[sub, fb] = shrink(X[sub, fb], self.shrink)
return Xout # (sub , fb, compo, compo)
class ProjRandomSpace(TransformerMixin):
def __init__(self, n_compo=71):
self.n_compo = n_compo
def fit(self, X, y):
n_sub, n_fb, n_chan, _ = X.shape
self.filters_ = []
for fb in range(n_fb):
U = np.linalg.svd(np.random.rand(n_chan, n_chan))[0][:self.n_compo]
self.filters_.append(U) # (fb, compo, chan) row vec
return self
def transform(self, X):
n_sub, n_fb, _, _ = X.shape
Xout = np.empty((n_sub, n_fb, self.n_compo, self.n_compo))
for fb in range(n_fb):
filters = self.filters_[fb] # (compo, chan)
for sub in range(n_sub):
Xout[sub, fb] = filters @ X[sub, fb] @ filters.T
return Xout # (sub , fb, compo, compo)
class ProjCommonWassSpace(TransformerMixin):
def __init__(self, n_compo=71):
self.n_compo = n_compo
def fit(self, X, y):
n_sub, n_fb, _, _ = X.shape
self.filters_ = []
for fb in range(n_fb):
covsfb = X[:, fb]
C = mean_covs(covsfb, self.n_compo)
eigvals, eigvecs = eigh(C)
ix = np.argsort(
|
np.abs(eigvals)
|
numpy.abs
|
"""
Library of simple image processing effects that can be applied to source
images or video
"""
from __future__ import print_function
from __future__ import division
import cv2
import numpy as np
from vidviz.utils import SmoothNoise
class Effect(object):
"""Base class for vid-viz effects"""
def __init__(self, style='effect'):
"""
Args:
style (str): 'effect' | 'postproc'
"""
# set attributes common to all effects
self.name = None
self.type = style
self.props = None
self.max_num_styles = 1
self.auto_play = False
self.style = 0
self.reinitialize = False
self.random_walk = False
self.chan_vec_pos = np.zeros((1, 1))
self.noise = SmoothNoise(
num_samples=1,
num_channels=self.chan_vec_pos.size)
self.update_output = False # boolean for updating screen output
self.inf = 1000
self.none_dict = {
'desc': 'unassigned',
'name': '',
'val': 0,
'init': 0,
'min': 0,
'max': 1,
'mod': self.inf,
'step': 1,
'inc': False,
'dec': False}
def _process_io(self, key_list):
self.update_output = -1
if key_list[ord('-')]:
key_list[ord('-')] = False
self.props[0]['dec'] = True
self.update_output = 0
elif key_list[ord('=')]:
key_list[ord('=')] = False
self.props[0]['inc'] = True
self.update_output = 0
elif key_list[ord('[')]:
key_list[ord('[')] = False
self.props[1]['dec'] = True
self.update_output = 1
elif key_list[ord(']')]:
key_list[ord(']')] = False
self.props[1]['inc'] = True
self.update_output = 1
elif key_list[ord(';')]:
key_list[ord(';')] = False
self.props[2]['dec'] = True
self.update_output = 2
elif key_list[ord('\'')]:
key_list[ord('\'')] = False
self.props[2]['inc'] = True
self.update_output = 2
elif key_list[ord(',')]:
key_list[ord(',')] = False
self.props[3]['dec'] = True
self.update_output = 3
elif key_list[ord('.')]:
key_list[ord('.')] = False
self.props[3]['inc'] = True
self.update_output = 3
elif key_list[ord('R')]:
key_list[ord('R')] = False
self.props[4]['dec'] = True
self.update_output = 4
elif key_list[ord('T')]:
key_list[ord('T')] = False
self.props[4]['inc'] = True
self.update_output = 4
elif key_list[ord('Q')]:
key_list[ord('Q')] = False
self.props[5]['dec'] = True
self.update_output = 5
elif key_list[ord('S')]:
key_list[ord('S')] = False
self.props[5]['inc'] = True
self.update_output = 5
elif key_list[ord('/')]:
key_list[ord('/')] = False
self.reinitialize = True
elif key_list[ord('t')]:
key_list[ord('t')] = False
self.style = (self.style + 1) % self.max_num_styles
# self.reinitialize = True
# elif key_list[ord('a')]:
# key_list[ord('a')] = False
# self.auto_play = not self.auto_play
elif key_list[ord('w')]:
key_list[ord('w')] = False
self.random_walk = not self.random_walk
self.chan_vec_pos = np.zeros(self.chan_vec_pos.shape)
self.noise.reinitialize()
# process options
for index, _ in enumerate(self.props):
if self.props[index]['dec']:
self.props[index]['dec'] = False
self.props[index]['val'] -= self.props[index]['step']
if self.props[index]['inc']:
self.props[index]['inc'] = False
self.props[index]['val'] += self.props[index]['step']
if self.props[index]['mod'] != self.inf:
self.props[index]['val'] = np.mod(
self.props[index]['val'],
self.props[index]['mod'])
self.props[index]['val'] = np.clip(
self.props[index]['val'],
self.props[index]['min'],
self.props[index]['max'])
def process(self, frame, key_list):
raise NotImplementedError
def print_update(self, force=False):
"""Print effect settings to console"""
if self.update_output > -1 or force:
print()
print()
print()
print('%s effect settings' % self.name)
print('keys | min | cur | max | description')
print('-----|-------|-------|-------|-------------')
for index in range(6):
if index == 0:
keys = '-/+'
elif index == 1:
keys = '{/}'
elif index == 2:
keys = ";/'"
elif index == 3:
keys = '</>'
elif index == 4:
keys = 'u/d'
elif index == 5:
keys = 'l/r'
print(' %s | %5g | %5g | %5g | %s' %
(keys,
self.props[index]['min'],
self.props[index]['val'],
self.props[index]['max'],
self.props[index]['desc']))
# print extra updates
print('t - toggle between effect types')
print('w - toggle random walk')
print('/ - reset effect parameters')
print('q - quit %s effect' % self.name)
if self.type == 'effect':
print('~ - enable post-processing edit mode')
print('spacebar - cycle through sources')
elif self.type == 'postproc':
print('tab - reverse processing order')
print('backspace - quit post-processing edit mode')
def reset(self):
for index, _ in enumerate(self.props):
self.props[index]['val'] = self.props[index]['init']
self.style = 0
self.auto_play = False
self.reinitialize = False
self.chan_vec_pos = np.zeros(self.chan_vec_pos.shape)
self.noise.reinitialize()
class Border(Effect):
"""
Manipulate image borders
KEYBOARD INPUTS:
t - toggle between border styles
-/+ - decrease/increase border padding
[/] - decrease/increase zoom
;/' - rotate image left/right
,/. - None
lrud arrows - translate image
/ - reset parameters
backspace - quit border effect
"""
def __init__(self, style='effect'):
super(Border, self).__init__(style=style)
self.name = 'border'
# user option constants
MULT_FACTOR = {
'desc': 'shrink frame and fill border',
'name': 'mult_factor',
'val': 1.0,
'init': 1.0,
'min': 0.01,
'max': 1.0,
'mod': self.inf,
'step': 0.05,
'inc': False,
'dec': False}
ZOOM_FACTOR = {
'desc': 'zoom on original frame',
'name': 'zoom_factor',
'val': 1.0,
'init': 1.0,
'min': 1.0,
'max': 10.0,
'mod': self.inf,
'step': 0.05,
'inc': False,
'dec': False}
ROT_ANGLE = {
'desc': 'rotation on original frame',
'name': 'rot_angle',
'val': 0,
'init': 0,
'min': -self.inf,
'max': self.inf,
'mod': 360,
'step': 5,
'inc': False,
'dec': False}
SHIFT_PIX_VERT = {
'desc': 'vertical shift on original frame',
'name': 'shift_vert',
'val': 0,
'init': 0,
'min': -500,
'max': 500,
'mod': self.inf,
'step': 10,
'inc': False,
'dec': False}
SHIFT_PIX_HORZ = {
'desc': 'horizontal shift on original frame',
'name': 'shift_horz',
'val': 0,
'init': 0,
'min': -500,
'max': 500,
'mod': self.inf,
'step': 10,
'inc': False,
'dec': False}
self.max_num_styles = 3
# combine dicts into a list for easy general access
self.props = [
MULT_FACTOR,
ZOOM_FACTOR,
ROT_ANGLE,
self.none_dict,
SHIFT_PIX_VERT,
SHIFT_PIX_HORZ]
def process(self, frame, key_list, key_lock=False):
# process keyboard input
if not key_lock:
self._process_io(key_list)
if self.reinitialize:
self.reinitialize = False
for index, _ in enumerate(self.props):
self.props[index]['val'] = self.props[index]['init']
# human-readable names
mult_factor = self.props[0]['val']
zoom_factor = self.props[1]['val']
rot_angle = self.props[2]['val']
shift_vert = self.props[4]['val']
shift_horz = self.props[5]['val']
# process image
if len(frame.shape) == 3:
[im_height, im_width, _] = frame.shape
elif len(frame.shape) == 2:
[im_height, im_width] = frame.shape
# rotate
if rot_angle is not 0:
rot_mat = cv2.getRotationMatrix2D(
(im_width / 2, im_height / 2),
rot_angle,
1.0)
frame = cv2.warpAffine(
frame,
rot_mat,
(im_width, im_height))
# translate
if shift_horz is not 0 or shift_vert is not 0:
frame = cv2.warpAffine(
frame,
np.float32([[1, 0, shift_horz],
[0, 1, shift_vert]]),
(im_width, im_height))
# zoom
if zoom_factor > 1.0:
frame = cv2.getRectSubPix(
frame,
(int(im_width / zoom_factor),
int(im_height / zoom_factor)),
(im_width / 2, im_height / 2))
frame = cv2.resize(frame, (im_width, im_height))
# add borders
if self.style == 1:
# resize frame
frame = cv2.resize(
frame, None,
fx=mult_factor,
fy=mult_factor,
interpolation=cv2.INTER_LINEAR)
if mult_factor < 1.0:
# top, bottom, left, right
frame = cv2.copyMakeBorder(
frame,
int(im_height * (1.0 - mult_factor) / 2),
int(im_height * (1.0 - mult_factor) / 2),
int(im_width * (1.0 - mult_factor) / 2),
int(im_width * (1.0 - mult_factor) / 2),
cv2.BORDER_WRAP)
elif self.style == 2:
# resize frame
frame = cv2.resize(
frame, None,
fx=mult_factor,
fy=mult_factor,
interpolation=cv2.INTER_LINEAR)
if mult_factor < 1.0:
# top, bottom, left, right
frame = cv2.copyMakeBorder(
frame,
int(im_height * (1.0 - mult_factor) / 2),
int(im_height * (1.0 - mult_factor) / 2),
int(im_width * (1.0 - mult_factor) / 2),
int(im_width * (1.0 - mult_factor) / 2),
cv2.BORDER_REFLECT)
return frame
class Cell(object):
"""Helper class for Grating class"""
def __init__(self, num_pix_cell, num_pix_cell_half, border_prop,
center, vel, frame_size=[0, 0], use_full_frame=False):
self.num_pix_cell = num_pix_cell
self.num_pix_cell_half = num_pix_cell_half
self.center = center
self.vel = vel
self.num_pix_img_half = None
self.border_prop = None
self.update_border_prop(border_prop)
self.frame_size = frame_size
self.use_full_frame = use_full_frame
def update_border_prop(self, border_prop):
self.border_prop = border_prop
self.num_pix_img_half = \
[int((self.num_pix_cell[0] * (1 - self.border_prop) // 2)),
int((self.num_pix_cell[1] * (1 - self.border_prop) // 2))]
def update_position_lazy(self):
self.center[0] += self.vel[0]
self.center[1] += self.vel[1]
if self.center[0] + self.num_pix_cell_half[0] >= self.frame_size[0]:
self.center[0] = self.num_pix_cell_half[0] + 1
elif self.center[0] - self.num_pix_cell_half[0] <= 0:
self.center[0] = self.frame_size[0] - self.num_pix_cell_half[0] - 1
if self.center[1] + self.num_pix_cell_half[1] >= self.frame_size[1]:
self.center[1] = self.num_pix_cell_half[1] + 1
elif self.center[1] - self.num_pix_cell_half[1] <= 0:
self.center[1] = self.frame_size[1] - self.num_pix_cell_half[1] - 1
def update_position(self):
self.center[0] += self.vel[0]
self.center[1] += self.vel[1]
if self.center[0] + self.num_pix_cell_half[0] >= self.frame_size[0]:
# set position to border
self.center[0] = self.frame_size[0] - self.num_pix_cell_half[0]
# reverse vertical velocity
self.vel[0] *= -1
elif self.center[0] - self.num_pix_cell_half[0] <= 0:
self.center[0] = self.num_pix_cell_half[0] + 1
self.vel[0] *= -1
if self.center[1] + self.num_pix_cell_half[1] >= self.frame_size[1]:
self.center[1] = self.frame_size[1] - self.num_pix_cell_half[1]
self.vel[1] *= -1
elif self.center[1] - self.num_pix_cell_half[1] <= 0:
self.center[1] = self.num_pix_cell_half[1] + 1
self.vel[1] *= -1
def draw(self, frame, background):
if self.use_full_frame:
# render full frame in cell
cell = frame
else:
# render part of frame in cell
cell = cv2.getRectSubPix(
frame,
(self.num_pix_cell[1], self.num_pix_cell[0]),
(self.center[1], self.center[0]))
cell = cv2.resize(
cell,
(2 * self.num_pix_img_half[1] + 1,
2 * self.num_pix_img_half[0] + 1),
interpolation=cv2.INTER_LINEAR)
background[
self.center[0] - self.num_pix_img_half[0]:
self.center[0] + self.num_pix_img_half[0] + 1,
self.center[1] - self.num_pix_img_half[1]:
self.center[1] + self.num_pix_img_half[1] + 1,
:] = cell
return background
class Grating(Effect):
"""
Render image in rectangular cells, the aspect ratio and border thickness of
which are controllable parameters
KEYBOARD INPUTS:
-/+ - decrease/increase border proportion
[/] - None
;/' - None
,/. - None
lrud arrows - decrease/increase number of cells in horz/vert direction
"""
def __init__(self, style='effect'):
super(Grating, self).__init__(style=style)
self.name = 'grating'
border_prop = {
'desc': 'proportion of cell used for border',
'name': 'border_prop',
'val': 0.1,
'init': 0.2,
'min': 0.0,
'max': 1.0,
'mod': self.inf,
'step': 0.02,
'inc': False,
'dec': False}
cells_horz = {
'desc': 'number of cells in horizontal direction',
'name': 'num_cells_horz',
'val': 10,
'init': 10,
'min': 1,
'max': 200,
'mod': self.inf,
'step': 1,
'inc': False,
'dec': False}
cells_vert = {
'desc': 'number of cells in vertical direction',
'name': 'num_cells_vert',
'val': 5,
'init': 5,
'min': 1,
'max': 200,
'mod': self.inf,
'step': 1,
'inc': False,
'dec': False}
self.max_num_styles = 3
# combine dicts into a list for easy general access
self.props = [
border_prop,
self.none_dict,
self.none_dict,
self.none_dict,
cells_vert,
cells_horz]
# user options
self.prev_border_prop = self.props[0]['val']
self.cells = []
self.cell_index = -1 # index into list of cells
def process(self, frame, key_list, key_lock=False):
# update if blur kernel toggled
if key_list[ord('t')]:
self.reinitialize = True
else:
self.reinitialize = False
# process keyboard input
if not key_lock:
self._process_io(key_list)
if self.reinitialize:
self.reinitialize = False
for index, _ in enumerate(self.props):
self.props[index]['val'] = self.props[index]['init']
self.cells = []
# human-readable names
border_prop = self.props[0]['val']
num_cells = [self.props[4]['val'], self.props[5]['val']]
# process image
if len(frame.shape) == 3:
[im_height, im_width, _] = frame.shape
elif len(frame.shape) == 2:
[im_height, im_width] = frame.shape
if self.style == -1:
# original grating style; static vertical and horizontal black bars
# reinitialize cells if number has changed
if num_cells[0] * num_cells[1] != len(self.cells):
# get initial values for cells
num_pix_cell_half = [
(im_height / num_cells[0]) // 2,
(im_width / num_cells[1]) // 2]
num_pix_cell = [
int(2 * num_pix_cell_half[0] + 1),
int(2 * num_pix_cell_half[1] + 1)]
centers = [
[int(val * num_pix_cell[0] + num_pix_cell_half[0] + 1)
for val in range(num_cells[0])],
[int(val * num_pix_cell[1] + num_pix_cell_half[1] + 1)
for val in range(num_cells[1])]]
# shift center points at end
centers[0][-1] = int(im_height - num_pix_cell_half[0] - 1)
centers[1][-1] = int(im_width - num_pix_cell_half[1] - 1)
# build cells
self.cells = []
for h in range(num_cells[0]):
for w in range(num_cells[1]):
self.cells.append(Cell(
num_pix_cell, num_pix_cell_half, border_prop,
[centers[0][h], centers[1][w]], [0, 0]))
# update cells if border prop has changed
if self.prev_border_prop != border_prop:
self.prev_border_prop = border_prop
for _, cell in enumerate(self.cells):
cell.update_border_prop(border_prop)
# update background with frame info
if border_prop == 0.0:
background = frame
elif border_prop == 1.0:
background = np.zeros(shape=frame.shape, dtype=np.uint8)
else:
background = np.zeros(shape=frame.shape, dtype=np.uint8)
# tile background array with image
for _, cell in enumerate(self.cells):
# tile background array with image
background = cell.draw(frame, background)
elif self.style == 0 or self.style == 1 or self.style == 2:
# random horizontal translation effect
# add cells if necessary
while len(self.cells) < int(num_cells[1]):
# get initial values for cells
if self.style == 0:
# render portion of frame in each cell
use_full_frame = False
# num_pix_cell_half = [np.random.randint(10, 30),
# np.random.randint(10, 30)]
num_pix_cell_half = [
np.random.randint(30, 50),
np.random.randint(30, 50)]
# velocity = [0, np.random.randint(2, 20)]
velocity = [
np.random.randint(-20, 20),
np.random.randint(-20, 20)]
elif self.style == 1:
# render full frame in each (larger) cell
use_full_frame = True
num_pix_cell_half = [
np.random.randint(50, 80),
np.random.randint(70, 90)]
velocity = [0, np.random.randint(-10, 10)]
elif self.style == 2:
use_full_frame = True
# num_pix_cell_half = [np.random.randint(50, 80),
# np.random.randint(70, 90)]
num_pix_cell_half = [100, int(100 * 16 / 9)]
# velocity = [np.random.randint(-5, 5),
# np.random.randint(-5, 5)]
velocity = [
np.random.randint(-8, 8),
np.random.randint(-8, 8)]
num_pix_cell = [
int(2 * num_pix_cell_half[0] + 1),
int(2 * num_pix_cell_half[1] + 1)]
# use random portion of frame
lower_height = num_pix_cell_half[0] + 1
upper_height = im_height - num_pix_cell_half[0] - 1
lower_width = num_pix_cell_half[1] + 1
upper_width = im_width - num_pix_cell_half[1] - 1
centers = [np.random.randint(lower_height, upper_height),
np.random.randint(lower_width, upper_width)]
self.cells.append(Cell(
num_pix_cell, num_pix_cell_half, border_prop,
centers, velocity, frame_size=[im_height, im_width],
use_full_frame=use_full_frame))
# delete cells if necessary
while len(self.cells) > int(num_cells[1]):
del self.cells[-1]
# update cells if border prop has changed
if self.prev_border_prop != border_prop:
self.prev_border_prop = border_prop
for _, cell in enumerate(self.cells):
cell.update_border_prop(border_prop)
# update background with frame info
background = np.zeros(shape=frame.shape, dtype=np.uint8)
# tile background array with image
for _, cell in enumerate(self.cells):
# update positions of cells
if self.style == 0 or self.style == 1:
cell.update_position_lazy()
elif self.style == 2:
cell.update_position()
# tile background array with image
background = cell.draw(frame, background)
else:
raise NotImplementedError
return background
def reset(self):
super(Grating, self).reset()
self.cells = []
class AdaptiveThreshold(Effect):
"""
Threshold individual channels in RGB frame
KEYBOARD INPUTS:
t - toggle between threshold types
-/+ - decrease/increase adaptive threshold kernel size
[/] - decrease/increase adaptive threshold offset value
;/' - None
,/. - None
r/g/b - select red/green/blue channel for further processing
0 - selected channel uses original values
1 - selected channel uses all pixels as 0
2 - selected channel uses all pixels as 255
3 - selected channel uses threshold effect
/ - reset parameters
q - quit threshold effect
"""
def __init__(self, style='effect'):
super(AdaptiveThreshold, self).__init__(style=style)
self.name = 'threshold'
# user option constants
THRESH_KERNEL = {
'desc': 'kernel size for adaptive thresholding',
'name': 'kernel_size',
'val': 21,
'init': 21,
'min': 3,
'max': 71,
'mod': self.inf,
'step': 2,
'inc': False,
'dec': False}
THRESH_OFFSET = {
'desc': 'offset constant for adaptive thresholding',
'name': 'offset',
'val': 4,
'init': 4,
'min': 0,
'max': 30,
'mod': self.inf,
'step': 1,
'inc': False,
'dec': False}
self.max_num_styles = 2
# combine dicts into a list for easy general access
self.props = [
THRESH_KERNEL,
THRESH_OFFSET,
self.none_dict,
self.none_dict,
self.none_dict,
self.none_dict]
# user options
self.style = 0
self.reinitialize = False
self.random_walk = True
self.chan_vec_pos = np.zeros((3, 2))
self.noise = SmoothNoise(
num_samples=10,
num_channels=self.chan_vec_pos.size)
# other user options
self.optimize = 1 # skips a smoothing step
self.use_chan = [False, False, False] # rgb channel selector
self.chan_style = [0, 0, 0] # effect selector for each chan
self.MAX_NUM_CHAN_STYLES = 4
# opencv parameters
self.THRESH_TYPE = cv2.THRESH_BINARY
self.ADAPTIVE_THRESH_TYPE = cv2.ADAPTIVE_THRESH_MEAN_C
def process(self, frame, key_list, key_lock=False):
# process keyboard input
if not key_lock:
self._process_io(key_list)
if key_list[ord('b')]:
key_list[ord('b')] = False
self.use_chan[0] = True
self.use_chan[1] = False
self.use_chan[2] = False
elif key_list[ord('g')]:
key_list[ord('g')] = False
self.use_chan[0] = False
self.use_chan[1] = True
self.use_chan[2] = False
elif key_list[ord('r')]:
key_list[ord('r')] = False
self.use_chan[0] = False
self.use_chan[1] = False
self.use_chan[2] = True
if self.reinitialize:
self.reinitialize = False
self.chan_vec_pos = np.zeros((3, 2))
self.noise.reinitialize()
for index, _ in enumerate(self.props):
self.props[index]['val'] = self.props[index]['init']
# human-readable names
kernel_size = self.props[0]['val']
offset = self.props[1]['val']
for chan in range(3):
if self.use_chan[chan]:
for chan_style in range(self.MAX_NUM_CHAN_STYLES):
if key_list[ord(str(chan_style))]:
self.chan_style[chan] = chan_style
key_list[ord(str(chan_style))] = False
# process image
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv2.medianBlur(frame_gray, 11)
frame_thresh = cv2.adaptiveThreshold(
frame_gray,
255,
self.ADAPTIVE_THRESH_TYPE,
self.THRESH_TYPE,
kernel_size,
offset)
if self.style == 0:
self.THRESH_TYPE = cv2.THRESH_BINARY
elif self.style == 1:
self.THRESH_TYPE = cv2.THRESH_BINARY_INV
for chan in range(3):
if self.chan_style[chan] == 1:
frame[:, :, chan] = 0
elif self.chan_style[chan] == 2:
frame[:, :, chan] = 255
elif self.chan_style[chan] == 3:
frame[:, :, chan] = frame_thresh
if not self.optimize:
frame = cv2.medianBlur(frame, 11)
return frame
def reset(self):
super(AdaptiveThreshold, self).reset()
self.use_chan = [False, False, False] # rgb channel selector
self.chan_style = [0, 0, 0] # effect selector for each chan
class SimpleThreshold(Effect):
"""
Threshold individual channels in RGB frame
KEYBOARD INPUTS:
t - toggle threshold type (apply inverse threshold)
-/+ - decrease/increase threshold
[/] - None
;/' - None
,/. - None
r/g/b/a - select red/green/blue/all channels for further processing
/ - reset parameters
q - quit soft threshold effect
"""
def __init__(self, style='effect'):
super(SimpleThreshold, self).__init__(style=style)
self.name = 'soft-threshold'
# user option constants
THRESHOLD = {
'desc': 'threshold value',
'name': 'threshold',
'val': 128,
'init': 128,
'min': 0,
'max': 255,
'mod': self.inf,
'step': 1,
'inc': False,
'dec': False}
self.max_num_styles = 2 # thresh_binary, thresh_binary_inv
# combine dicts into a list for easy general access
self.props = [
THRESHOLD,
self.none_dict,
self.none_dict,
self.none_dict,
self.none_dict,
self.none_dict]
# user options
self.style = 0
self.reinitialize = False
self.random_walk = False
self.chan_vec_pos = np.zeros((3, 2))
self.noise = SmoothNoise(
num_samples=10,
num_channels=self.chan_vec_pos.size)
# other user options
self.use_chan = [False, False, False] # rgb channel selector
self.chan_style = [0, 0, 0] # effect selector for each chan
self.MAX_NUM_CHAN_STYLES = 5
# opencv parameters
self.THRESH_TYPE = cv2.THRESH_BINARY
def process(self, frame, key_list, key_lock=False):
# process keyboard input
if not key_lock:
self._process_io(key_list)
if key_list[ord('b')]:
key_list[ord('b')] = False
self.use_chan[0] = True
self.use_chan[1] = False
self.use_chan[2] = False
elif key_list[ord('g')]:
key_list[ord('g')] = False
self.use_chan[0] = False
self.use_chan[1] = True
self.use_chan[2] = False
elif key_list[ord('r')]:
key_list[ord('r')] = False
self.use_chan[0] = False
self.use_chan[1] = False
self.use_chan[2] = True
elif key_list[ord('a')]:
key_list[ord('a')] = False
self.use_chan[0] = True
self.use_chan[1] = True
self.use_chan[2] = True
if self.reinitialize:
self.reinitialize = False
self.chan_vec_pos = np.zeros((3, 2))
self.noise.reinitialize()
for index, _ in enumerate(self.props):
self.props[index]['val'] = self.props[index]['init']
# human-readable names
threshold = self.props[0]['val']
for chan_style in range(self.MAX_NUM_CHAN_STYLES):
if key_list[ord(str(chan_style))]:
for chan in range(3):
if self.use_chan[chan]:
self.chan_style[chan] = chan_style
key_list[ord(str(chan_style))] = False
# process image
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if self.style == 0:
self.THRESH_TYPE = cv2.THRESH_BINARY
elif self.style == 1:
self.THRESH_TYPE = cv2.THRESH_BINARY_INV
_, frame_thresh = cv2.threshold(
frame_gray,
threshold,
255,
self.THRESH_TYPE)
for chan in range(3):
if self.chan_style[chan] == 1:
frame[:, :, chan] = 0
elif self.chan_style[chan] == 2:
frame[:, :, chan] = 255
elif self.chan_style[chan] == 3:
frame[:, :, chan] = frame_thresh
elif self.chan_style[chan] == 4:
frame[:, :, chan] = frame_gray
return frame
def reset(self):
super(SimpleThreshold, self).reset()
self.use_chan = [False, False, False] # rgb channel selector
self.chan_style = [0, 0, 0] # effect selector for each chan
class PowerThreshold(Effect):
"""
Threshold individual channels in RGB frame
KEYBOARD INPUTS:
t - toggle threshold type (apply inverse threshold)
-/+ - decrease/increase threshold
[/] - None
;/' - None
,/. - None
r/g/b/a - select red/green/blue/all channels for further processing
/ - reset parameters
q - quit soft threshold effect
"""
def __init__(self, style='effect'):
super(PowerThreshold, self).__init__(style=style)
self.name = 'power-threshold'
# user option constants
THRESHOLD = {
'desc': 'threshold value',
'name': 'threshold',
'val': 128,
'init': 128,
'min': 0,
'max': 255,
'mod': self.inf,
'step': 5,
'inc': False,
'dec': False}
THRESHOLD_POWER = {
'desc': 'threshold power',
'name': 'threshold_power',
'val': 1,
'init': 1,
'min': 1,
'max': 5,
'mod': self.inf,
'step': 0.05,
'inc': False,
'dec': False}
self.max_num_styles = 2 # thresh_binary, thresh_binary_inv
# combine dicts into a list for easy general access
self.props = [
THRESHOLD,
THRESHOLD_POWER,
self.none_dict,
self.none_dict,
self.none_dict,
self.none_dict]
# user options
self.style = 0
self.reinitialize = False
self.random_walk = False # TODO - walk through power space
self.chan_vec_pos = np.zeros((3, 2))
self.noise = SmoothNoise(
num_samples=10,
num_channels=self.chan_vec_pos.size)
# other user options
self.use_chan = [False, False, False] # rgb channel selector
self.chan_style = [0, 0, 0] # effect selector for each chan
self.MAX_NUM_CHAN_STYLES = 5
# opencv parameters
self.THRESH_TYPE = cv2.THRESH_BINARY
def process(self, frame, key_list, key_lock=False):
# process keyboard input
if not key_lock:
self._process_io(key_list)
if key_list[ord('b')]:
key_list[ord('b')] = False
self.use_chan[0] = True
self.use_chan[1] = False
self.use_chan[2] = False
elif key_list[ord('g')]:
key_list[ord('g')] = False
self.use_chan[0] = False
self.use_chan[1] = True
self.use_chan[2] = False
elif key_list[ord('r')]:
key_list[ord('r')] = False
self.use_chan[0] = False
self.use_chan[1] = False
self.use_chan[2] = True
elif key_list[ord('a')]:
key_list[ord('a')] = False
self.use_chan[0] = True
self.use_chan[1] = True
self.use_chan[2] = True
if self.reinitialize:
self.reinitialize = False
self.chan_vec_pos =
|
np.zeros((3, 2))
|
numpy.zeros
|
import sys
import open3d as o3d
from model import *
from utils import *
import argparse
import random
import numpy as np
import torch
import os
import visdom
sys.path.append("./emd/")
import emd_module as emd
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = './trained_model/network.pth', help='optional reload model path')
parser.add_argument('--num_points', type=int, default = 8192, help='number of points')
parser.add_argument('--n_primitives', type=int, default = 16, help='number of primitives in the atlas')
parser.add_argument('--env', type=str, default ="MSN_VAL" , help='visdom environment')
opt = parser.parse_args()
print (opt)
network = MSN(num_points = opt.num_points, n_primitives = opt.n_primitives)
network.cuda()
network.apply(weights_init)
vis = visdom.Visdom(port = 8097, env=opt.env) # set your port
if opt.model != '':
network.load_state_dict(torch.load(opt.model))
print("Previous weight loaded ")
network.eval()
with open(os.path.join('./data/val.list')) as file:
model_list = [line.strip().replace('/', '_') for line in file]
partial_dir = "./data/val/"
gt_dir = "./data/complete/"
vis = visdom.Visdom(port = 8097, env=opt.env) # set your port
def resample_pcd(pcd, n):
"""Drop or duplicate points so that pcd has exactly n points"""
idx = np.random.permutation(pcd.shape[0])
if idx.shape[0] < n:
idx = np.concatenate([idx, np.random.randint(pcd.shape[0], size = n - pcd.shape[0])])
return pcd[idx[:n]]
EMD = emd.emdModule()
labels_generated_points = torch.Tensor(range(1, (opt.n_primitives+1)*(opt.num_points//opt.n_primitives)+1)).view(opt.num_points//opt.n_primitives,(opt.n_primitives+1)).transpose(0,1)
labels_generated_points = (labels_generated_points)%(opt.n_primitives+1)
labels_generated_points = labels_generated_points.contiguous().view(-1)
with torch.no_grad():
for i, model in enumerate(model_list):
print(model)
partial = torch.zeros((50, 5000, 3), device='cuda')
gt = torch.zeros((50, opt.num_points, 3), device='cuda')
for j in range(50):
pcd = o3d.io.read_point_cloud(os.path.join(partial_dir, model + '_' + str(j) + '_denoised.pcd'))
partial[j, :, :] = torch.from_numpy(resample_pcd(np.array(pcd.points), 5000))
pcd = o3d.io.read_point_cloud(os.path.join(gt_dir, model + '.pcd'))
gt[j, :, :] = torch.from_numpy(resample_pcd(
|
np.array(pcd.points)
|
numpy.array
|
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
delta = n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.ptr[ii+iv+1] += delta
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name='%s_dof_info' % var_name,
var_name=var_name,
n_dof=self.n_dof[var_name],
indx=self.indx[var_name],
details=self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
def get_n_dof_total(self):
"""
Return the total number of DOFs of all state variables.
"""
return self.ptr[-1]
def is_active_bc(bc, ts=None, functions=None):
"""
Check whether the given boundary condition is active in the current
time.
Returns
-------
active : bool
True if the condition `bc` is active.
"""
if (bc.times is None) or (ts is None):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if tt[0] <= ts.time < tt[1]:
active = True
break
else:
active = False
else:
if isinstance(bc.times, basestr):
if functions is not None:
fun = functions[bc.times]
else:
raise ValueError('no functions given for bc %s!' % bc.name)
elif isinstance(bc.times, Function):
fun = bc.times
else:
raise ValueError('unknown times type! (%s)'
% type(bc.times))
active = fun(ts)
return active
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di)
self.dpn = len(self.dof_names)
self.eq = nm.arange(var_di.n_dof, dtype=nm.int32)
self.n_dg_ebc = 0
self.dg_ebc_names = {}
self.dg_ebc = {}
self.dg_ebc_val = {}
self.n_dg_epbc = 0
self.dg_epbc_names = []
self.dg_epbc = []
def _init_empty(self, field):
self.val_ebc = nm.empty((0,), dtype=field.dtype)
if field.get('unused_dofs') is None:
self.eqi = nm.arange(self.var_di.n_dof, dtype=nm.int32)
else:
self._mark_unused(field)
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq_ebc = nm.empty((0,), dtype=nm.int32)
self.master = nm.empty((0,), dtype=nm.int32)
self.slave = nm.empty((0,), dtype=nm.int32)
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
def _mark_unused(self, field):
unused_dofs = field.get('unused_dofs')
if unused_dofs is not None:
unused = expand_nodes_to_equations(field.unused_dofs,
self.dof_names, self.dof_names)
self.eq[unused] = -3
def map_equations(self, bcs, field, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Parameters
----------
bcs : Conditions instance
The Dirichlet or periodic boundary conditions (single
condition instances). The dof names in the conditions must
already be canonized.
field : Field instance
The field of the variable holding the DOFs.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The registered functions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
warn : bool, optional
If True, warn about BC on non-existent nodes.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
Notes
-----
- Periodic bc: master and slave DOFs must belong to the same
field (variables can differ, though).
"""
if bcs is None:
self._init_empty(field)
return set()
eq_ebc = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
val_ebc = nm.zeros((self.var_di.n_dof,), dtype=field.dtype)
master_slave = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
chains = []
active_bcs = set()
for bc in bcs:
# Skip conditions that are not active in the current time.
if not is_active_bc(bc, ts=ts, functions=functions):
continue
active_bcs.add(bc.key)
if isinstance(bc, DGEssentialBC):
ntype = "DGEBC"
region = bc.region
elif isinstance(bc, DGPeriodicBC):
ntype = "DGEPBC"
region = bc.regions[0]
elif isinstance(bc, EssentialBC):
ntype = 'EBC'
region = bc.region
elif isinstance(bc, PeriodicBC):
ntype = 'EPBC'
region = bc.regions[0]
if warn:
clean_msg = ('warning: ignoring nonexistent %s node (%s) in '
% (ntype, self.var_di.var_name))
else:
clean_msg = None
# Get master region nodes.
master_nod_list = field.get_dofs_in_region(region)
if len(master_nod_list) == 0:
continue
if ntype == 'EBC': # EBC.
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
nods, vv = field.set_dofs(fun, region, len(dofs), clean_msg)
eq = expand_nodes_to_equations(nods, dofs, self.dof_names)
# Duplicates removed here...
eq_ebc[eq] = 1
if vv is not None: val_ebc[eq] = nm.ravel(vv)
elif ntype == "DGEBC":
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
values = field.get_bc_facet_values(fun, region, diff=bc.diff)
bc2bfi = field.get_bc_facet_idx(region)
self.dg_ebc_val.setdefault(bc.diff, []).append(values)
self.dg_ebc.setdefault(bc.diff, []).append(bc2bfi)
self.n_dg_ebc += 1
elif ntype == "DGEPBC":
# ensure matching boundaries?
master_bc2bfi = field.get_bc_facet_idx(region)
slave_bc2bfi = field.get_bc_facet_idx(bc.regions[1])
self.dg_epbc.append((master_bc2bfi, slave_bc2bfi))
self.n_dg_epbc += 1
else: # EPBC.
region = bc.regions[1]
slave_nod_list = field.get_dofs_in_region(region)
nmaster = nm.unique(master_nod_list)
# Treat fields not covering the whole domain.
if nmaster[0] == -1:
nmaster = nmaster[1:]
nslave = nm.unique(slave_nod_list)
# Treat fields not covering the whole domain.
if nslave[0] == -1:
nslave = nslave[1:]
## print nmaster + 1
## print nslave + 1
if nmaster.shape != nslave.shape:
msg = 'EPBC list lengths do not match!\n(%s,\n %s)' %\
(nmaster, nslave)
raise ValueError(msg)
if (nmaster.shape[0] == 0) and (nslave.shape[0] == 0):
continue
mcoor = field.get_coor(nmaster)
scoor = field.get_coor(nslave)
fun = get_condition_value(bc.match, functions, 'EPBC', bc.name)
if isinstance(fun, Function):
i1, i2 = fun(mcoor, scoor)
else:
i1, i2 = fun
## print nm.c_[mcoor[i1], scoor[i2]]
## print nm.c_[nmaster[i1], nslave[i2]] + 1
meq = expand_nodes_to_equations(nmaster[i1], bc.dofs[0],
self.dof_names)
seq = expand_nodes_to_equations(nslave[i2], bc.dofs[1],
self.dof_names)
m_assigned =
|
nm.where(master_slave[meq] != 0)
|
numpy.where
|
# -*- coding: utf-8 -*-
########################################################
### estimate mutual information (dependency) between ###
### feature vectors with different search radius for ###
### local feature estimation and target ###
########################################################
import numpy as np
from sklearn import preprocessing
from sklearn.feature_selection import mutual_info_classif
import os
import matplotlib.pyplot as plt
base_path = "/media/shao/TOSHIBA EXT/data_object_velodyne/feature_matrix_with_label/train"
# calculate and save statistic info about every label #
def get_samples_num ():
read_path = os.path.join(base_path, "data")
filelist = os.listdir(read_path)
for file in filelist:
read_file = os.path.join(read_path, file)
if os.path.isdir(read_file):
continue
dataset =
|
np.loadtxt(read_file)
|
numpy.loadtxt
|
# general libraries
import warnings
import numpy as np
# image processing libraries
from scipy import ndimage, interpolate, fft, signal
from scipy.optimize import fsolve
from skimage.feature import match_template
from skimage.transform import radon
from skimage.measure import ransac
from sklearn.cluster import KMeans
from ..generic.filtering_statistical import make_2D_Gaussian, mad_filtering
from ..generic.handler_im import get_grad_filters
from ..preprocessing.shadow_transforms import pca
from .matching_tools import get_integer_peak_location
def is_estimate_away_from_border(C, i, j, ds=1):
if np.abs(i)+ds >= (C.shape[0]+1) // 2 or \
np.abs(j)+ds >= (C.shape[1]+1) // 2:
verdict = False
else:
verdict = True
return verdict
# sub-pixel localization of the correlation peak
def get_top_moment(C, ds=1, top=np.array([])):
""" find location of highest score through bicubic fitting
Parameters
----------
C : np.array, size=(_,_)
similarity surface
ds : integer, default=1
size of the radius to use neighboring information
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] <NAME> al. "A subpixel registration algorithm for low PSNR images"
IEEE international conference on advanced computational intelligence,
pp. 626-630, 2012.
[2] Messerli & Grinstad, "Image georectification and feature tracking
toolbox: ImGRAFT" Geoscientific instrumentation, methods and data systems,
vol. 4(1) pp. 23-34, 2015.
"""
(subJ,subI) = np.meshgrid(np.linspace(-ds,+ds, 2*ds+1), np.linspace(-ds,+ds, 2*ds+1))
subI = subI.ravel()
subJ = subJ.ravel()
if top.size==0:
# find highest score
di,dj,max_corr,snr = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
if is_estimate_away_from_border(C,di,dj,ds): # estimate sub-pixel top
idx_mid = int(np.floor((2.*ds+1)**2/2))
i_sub = C.shape[0]//2 + np.arange(-ds,+ds+1) + di
j_sub = C.shape[1]//2 + np.arange(-ds,+ds+1) + dj
try:
Csub = C[i_sub[:,None], j_sub[None,:]].ravel()
except:
print('iets mis')
Csub = Csub - np.mean(np.hstack((Csub[0:idx_mid],Csub[idx_mid+1:])))
IN = Csub>0
m = np.array([ np.divide(np.sum(subI[IN]*Csub[IN]), np.sum(Csub[IN])) ,
np.divide(np.sum(subJ[IN]*Csub[IN]), np.sum(Csub[IN]))])
ddi, ddj = m[0], m[1]
else: # top at the border
ddi, ddj = 0, 0
return ddi,ddj, i_int,j_int
def get_top_blue(C, ds=1): # todo
""" find top of correlation peak through best linear unbiased estimation
"""
(subJ,subI) = np.meshgrid(np.linspace(-ds,+ds, 2*ds+1), np.linspace(-ds,+ds, 2*ds+1))
subI = subI.ravel()
subJ = subJ.ravel()
# find highest score
y,x,max_corr,snr = get_integer_peak_location(C)
# estimate Jacobian
H_x = np.array([[-17., 0., 17.],
[-61., 0., 61.],
[-17., 0., 17.]]) / 95
# estimate Hessian
H_xx = 8 / np.array([[105, -46, 105],
[ 50, -23, 50],
[ 105, -46, 105]] )
H_xy = 11 / np.array([[-114, np.inf, +114],
[np.inf, np.inf, np.inf],
[+114, np.inf, -114]] )
# estimate sub-pixel top
Csub = C[y-ds:y+ds+1,x-ds:x+ds+1]
Jac = np.array([[Csub*H_x], [Csub*H_x.T]])
Hes = np.array([[Csub*H_xx , Csub*H_xy],
[Csub*H_xy.T, Csub*H_xx.T]]
)
m0 = np.array([[x], [y]]) - np.linalg.inv(Hes) * Jac
return m0[0], m0[1]
def get_top_gaussian(C, top=np.array([])):
""" find location of highest score through 1D gaussian fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
ddi : float
estimated subpixel location on the vertical axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Willert & Gharib, "Digital particle image velocimetry", Experiments
in fluids, vol.10 pp.181-193, 1991.
[2] <NAME>, "A Study of sub-pixel motion estimation using
phase correlation" Proceeding of the British machine vision conference,
pp.387-396, 2006.
[3] Raffel et al. "Particle Image Velocimetry" Ch.6 pp.184 2018.
"""
if top.size==0: # find highest score
di,dj,max_corr,snr = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
ddi = (np.log(C[di+1,dj]) - np.log(C[di-1,dj])) / \
2*( (2*np.log(C[di,dj])) -np.log(C[di-1,dj]) -np.log(C[di+1,dj]))
ddj = (np.log(C[di,dj+1]) - np.log(C[di,dj-1])) / \
2*( (2*np.log(C[di,dj])) -np.log(C[di,dj-1]) -np.log(C[di,dj+1]))
else:
ddi, ddj = 0, 0
return ddi,ddj, i_int,j_int
def get_top_centroid(C, top=np.array([])):
""" find location of highest score through 1D centorid fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
ddi : float
estimated subpixel location on the vertical axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Raffel et al. "Particle Image Velocimetry" Ch.6 pp.184, 2018.
"""
if top.size==0: # find highest score
di,dj,max_corr,snr = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
ddi = ((i_int-1)*C[di-1,dj] + i_int*C[di,dj] + (i_int+1)*C[di+1,dj]) / \
(C[di-1,dj] + C[di,dj] + C[di+1,dj])
ddj = ((j_int-1)*C[di,dj-1] + j_int*C[di,dj] + (j_int+1)*C[di,dj+1]) / \
(C[di,dj-1] + C[di,dj] + C[di,dj+1])
ddi -= i_int
ddj -= j_int
else:
ddi, ddj = 0, 0
return ddi,ddj, i_int,j_int
def get_top_mass(C, top=np.array([])):
""" find location of highest score through 1D center of mass
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Fisher & Naidu, "A Comparison of algorithms for subpixel peak
detection" in Image Technology - Advances in image processing, multimedia
and machine vision pp.385-404, 1996.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
ddi = (C[di+1,dj] - C[di-1,dj]) / \
( C[di-1,dj] + C[di,dj] + C[di+1,dj])
ddj = (C[di,dj+1] - C[di,dj-1]) / \
( C[di,dj-1] + C[di,dj] + C[di,dj+1])
else:
ddi, ddj = 0, 0
def get_top_blais(C, top=np.array([])):
""" find location of highest score through forth order filter
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Blais & Rioux, "Real-time numerical peak detector" Signal processing
vol.11 pp.145-155, 1986.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
if C[di+1,dj]>C[di-1,dj]:
gx_0 = C[di-2,dj] + C[di-1,dj] - C[di+1,dj] - C[di+2,dj]
gx_1 = C[di-1,dj] + C[di-0,dj] - C[di+2,dj] - C[di+3,dj]
ddi = (gx_0/(gx_0 - gx_1))
else:
gx_0 = C[di-2,dj] + C[di-1,dj] - C[di+1,dj] - C[di+2,dj]
gx_1 = C[di-3,dj] + C[di-2,dj] - C[di+0,dj] - C[di+1,dj]
ddi = (gx_1/(gx_1 - gx_0)) -1
if C[di,dj+1]>C[di,dj-1]:
gx_0 = C[di,dj-2] + C[di,dj-1] - C[di,dj+1] - C[di,dj+2]
gx_1 = C[di,dj-1] + C[di,dj-0] - C[di,dj+2] - C[di,dj+3]
ddj = (gx_0/(gx_0 - gx_1))
else:
gx_0 = C[di,dj-2] + C[di,dj-1] - C[di,dj+1] - C[di,dj+2]
gx_1 = C[di,dj-3] + C[di,dj-2] - C[di,dj+0] - C[di,dj+1]
ddj = (gx_1/(gx_1 - gx_0)) -1
else:
ddi, ddj = 0, 0
return ddi,ddj, i_int,j_int
def get_top_parabolic(C, top=np.array([])):
""" find location of highest score through 1D parabolic fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Argyriou & Vlachos, "A Study of sub-pixel motion estimation using
phase correlation" Proceeding of the British machine vision conference,
pp. 387-396), 2006.
[2] Raffel et al. "Particle Image Velocimetry" Ch.6 pp.184 2018.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
ddi = (C[di+1,dj] - C[di-1,dj]) / \
2*( (2*C[di,dj]) -C[di-1,dj] -C[di+1,dj])
ddj = (C[di,dj+1] - C[di,dj-1]) / \
2*( (2*C[di,dj]) -C[di,dj-1] -C[di,dj+1])
else:
ddi, ddj = 0, 0
return ddi,ddj, i_int,j_int
def get_top_equiangular(C, top=np.array([])):
""" find location of highest score along each axis by equiangular line
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the crossing
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Shimizu & Okutomi. "Sub-pixel estimation error cancellation on
area-based matching" International journal of computer vision, vol. 63(3),
pp.207–224, 2005.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
# estimate sub-pixel along each axis
if C[di+1,dj]<C[di-1,dj]:
ddi = .5* (C[di+1,dj]-C[di-1,dj])/(C[di,dj]-C[di-1,dj])
else:
ddi = .5* (C[di+1,dj]-C[di-1,dj])/(C[di,dj]-C[di+1,dj])
if C[di,dj+1]<C[di,dj-1]:
ddj = .5* (C[di,dj+1]-C[di,dj-1])/(C[di,dj]-C[di,dj-1])
else:
ddj = .5* (C[di,dj+1]-C[di,dj-1])/(C[di,dj]-C[di,dj+1])
else:
ddi, ddj = 0, 0
return ddi, ddj, i_int,j_int
def get_top_birchfield(C, top=np.array([])):
""" find location of highest score along each axis
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Birchfield & Tomasi. "Depth discontinuities by pixel-to-pixel stereo"
International journal of computer vision, vol. 35(3)3 pp. 269-293, 1999.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
# estimate sub-pixel along each axis
I_m,I_p = .5*(C[di-1,dj] + C[di,dj]), .5*(C[di+1,dj] + C[di,dj])
I_min = np.amin([I_m, I_p, C[di,dj]])
I_max = np.amax([I_m, I_p, C[di,dj]])
# swapped, since Birchfield uses dissimilarity
ddi = np.amax([0, I_max-C[di,dj], C[di,dj]-I_min])
I_m,I_p = .5*(C[di,dj-1] + C[di,dj]), .5*(C[di,dj+1] + C[di,dj])
I_min = np.amin([I_m, I_p, C[di,dj]])
I_max = np.amax([I_m, I_p, C[di,dj]])
ddj = np.amax([0, I_max-C[di,dj], C[di,dj]-I_min])
else:
ddi, ddj = 0, 0
return ddi, ddj, i_int,j_int
def get_top_ren(C, top=np.array([])):
""" find location of highest score
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Ren et al. "High-accuracy sub-pixel motion estimation from noisy
images in Fourier domain." IEEE transactions on image processing,
vol. 19(5) pp. 1379-1384, 2010.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
# estimate sub-pixel along each axis
D_i = C[di+1,dj] - C[di-1,dj]
ddi = np.sign(D_i)/(1 + ( C[di,dj] / np.abs(D_i) ))
D_j = C[di,dj+1] - C[di,dj-1]
ddj = np.sign(D_j)/(1 + ( C[di,dj] / np.abs(D_j) ))
else:
ddi, ddj = 0, 0
return ddi,ddj, i_int,j_int
def get_top_triangular(C, top=np.array([])):
""" find location of highest score through triangular fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] <NAME>, "Real-time vergence control for binocular robots"
International journal of computer vision, vol. 7(1), pp. 67-89, 1991.
"""
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
I_m,I_p = C[di-1,dj], C[di+1,dj]
I_min,I_max = np.amin([I_m, I_p]), np.amax([I_m, I_p])
I_sign = 2*(I_p>I_m)-1
ddi = I_sign * (1- (I_max-I_min)/(C[di,dj]-I_min) )
I_m,I_p = C[di,dj-1], C[di,dj+1]
I_min,I_max = np.amin([I_m, I_p]), np.amax([I_m, I_p])
I_sign = 2*(I_p>I_m)-1
ddj = I_sign * (1- (I_max-I_min)/(C[di,dj]-I_min) )
else:
ddi, ddj = 0, 0
return ddi, ddj, i_int,j_int
def get_top_esinc(C, ds=1, top=np.array([])):
'''find location of highest score using exponential esinc function
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Argyriou & Vlachos, "A study of sub-pixel motion estimation using
phase correlation", proceedings of the British machine vision conference,
2006
'''
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along each axis
# estimate sub-pixel per axis
Cj = C[di,dj-ds:dj+ds+1].ravel()
def funcJ(x):
a, b, c = x
return [(Cj[0] - a*np.exp(-(b*(-1-c))**2)* \
( np.sin(np.pi*(-1-c))/ np.pi*(-1-c)) )**2,
(Cj[1] - a*np.exp(-(b*(+0-c))**2)* \
( np.sin(np.pi*(+0-c))/ np.pi*(+0-c)) )**2,
(Cj[2] - a*np.exp(-(b*(+1-c))**2)* \
( np.sin(np.pi*(+1-c))/ np.pi*(+1-c)) )**2]
jA, jB, jC = fsolve(funcJ, (1.0, 1.0, 0.1))
Ci = C[di-ds:di+ds+1,dj].ravel()
def funcI(x):
a, b, c = x
return [(Ci[0] - a*np.exp(-(b*(-1-c))**2)* \
( np.sin(np.pi*(-1-c))/ np.pi*(-1-c)) )**2,
(Ci[1] - a*np.exp(-(b*(+0-c))**2)* \
( np.sin(np.pi*(+0-c))/ np.pi*(+0-c)) )**2,
(Ci[2] - a*np.exp(-(b*(+1-c))**2)* \
( np.sin(np.pi*(+1-c))/ np.pi*(+1-c)) )**2]
iA, iB, iC = fsolve(funcI, (1.0, 1.0, 0.1))
else:
iC, jC = 0, 0
return iC,jC, i_int,j_int
def get_top_2d_gaussian(C, top=np.array([])):
'''find location of highest score using 2D Gaussian
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
i_int : integer
location of highest score on the vertical axis
j_int : integer
location of highest score on the horizontal axis
Notes
-----
[1] Nobach & Honkanen, "Two-dimensional Gaussian regression for sub-pixel
displacement estimation in particle image velocimetry or particle
position estimation in particle tracking velocimetry", Experiments in
Fluids, vol.38 pp.511-515, 2005
'''
(Jsub,Isub) = np.meshgrid(np.linspace(-1,+1, 3), np.linspace(-1,+1, 3))
Isub = Isub.ravel()
Jsub = Jsub.ravel()
if top.size==0: # find highest score
di,dj,_,_ = get_integer_peak_location(C)
else:
di, dj = top[0], top[1]
i_int,j_int = np.copy(di), np.copy(dj)
di += C.shape[0]//2 # using a central coordinate system
dj += C.shape[1]//2
if is_estimate_away_from_border: # estimate sub-pixel along both axis
i_sub =
|
np.arange(-1,+2)
|
numpy.arange
|
import math
import os
import time
import xml.etree.ElementTree as ET
from xml.dom import minidom
import multiprocessing as mp
import cv2
import matplotlib.pyplot as plt
import numpy as np
import openslide
from PIL import Image
import pdb
import h5py
import math
from wsi_core.wsi_utils import savePatchIter_bag_hdf5, initialize_hdf5_bag, coord_generator, save_hdf5, sample_indices, screen_coords, isBlackPatch, isWhitePatch, to_percentiles
import itertools
from wsi_core.util_classes import isInContourV1, isInContourV2, isInContourV3_Easy, isInContourV3_Hard, Contour_Checking_fn
# from utils.file_utils import load_pkl, save_pkl
Image.MAX_IMAGE_PIXELS = 933120000
class WholeSlideImage(object):
def __init__(self, path):
"""
Args:
path (str): fullpath to WSI file
"""
self.name = ".".join(path.split("/")[-1].split('.')[:-1])
self.wsi = openslide.open_slide(path)
self.level_downsamples = self._assertLevelDownsamples()
self.level_dim = self.wsi.level_dimensions
self.contours_tissue = None
self.contours_tumor = None
self.hdf5_file = None
def getOpenSlide(self):
return self.wsi
def initXML(self, xml_path):
def _createContour(coord_list):
return np.array([[[int(float(coord.attributes['X'].value)),
int(float(coord.attributes['Y'].value))]] for coord in coord_list], dtype = 'int32')
xmldoc = minidom.parse(xml_path)
annotations = [anno.getElementsByTagName('Coordinate') for anno in xmldoc.getElementsByTagName('Annotation')]
self.contours_tumor = [_createContour(coord_list) for coord_list in annotations]
self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)
def initTxt(self,annot_path):
def _create_contours_from_dict(annot):
all_cnts = []
for idx, annot_group in enumerate(annot):
contour_group = annot_group['coordinates']
if annot_group['type'] == 'Polygon':
for idx, contour in enumerate(contour_group):
contour = np.array(contour).astype(np.int32).reshape(-1,1,2)
all_cnts.append(contour)
else:
for idx, sgmt_group in enumerate(contour_group):
contour = []
for sgmt in sgmt_group:
contour.extend(sgmt)
contour = np.array(contour).astype(np.int32).reshape(-1,1,2)
all_cnts.append(contour)
return all_cnts
with open(annot_path, "r") as f:
annot = f.read()
annot = eval(annot)
self.contours_tumor = _create_contours_from_dict(annot)
self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)
def initSegmentation(self, mask_file):
# load segmentation results from pickle file
import pickle
asset_dict = load_pkl(mask_file)
self.holes_tissue = asset_dict['holes']
self.contours_tissue = asset_dict['tissue']
def saveSegmentation(self, mask_file):
# save segmentation results using pickle
asset_dict = {'holes': self.holes_tissue, 'tissue': self.contours_tissue}
save_pkl(mask_file, asset_dict)
def segmentTissue(self, seg_level=0, sthresh=20, sthresh_up = 255, mthresh=7, close = 0, use_otsu=False,
filter_params={'a_t':100}, ref_patch_size=512, exclude_ids=[], keep_ids=[]):
"""
Segment the tissue via HSV -> Median thresholding -> Binary threshold
"""
def _filter_contours(contours, hierarchy, filter_params):
"""
Filter contours by: area.
"""
filtered = []
# find indices of foreground contours (parent == -1)
hierarchy_1 = np.flatnonzero(hierarchy[:,1] == -1)
all_holes = []
# loop through foreground contour indices
for cont_idx in hierarchy_1:
# actual contour
cont = contours[cont_idx]
# indices of holes contained in this contour (children of parent contour)
holes =
|
np.flatnonzero(hierarchy[:, 1] == cont_idx)
|
numpy.flatnonzero
|
import json
import os
import pickle
from os import listdir
from os.path import isfile, join
import fire
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from skimage import feature
# data_list = [
# "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/demo_files/roi_list1.json",
# "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/demo_files/roi_list2.json"]
# background_image_path = "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/demo_files/embedding_norm_image.png"
# eigen_path = "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/demo_files/eigen_vectors"
# data_list = [
# "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/KDA79_A_keep121.json",
# "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/roi_list.json"
# ]
# background_image_path = "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/embedding_norm_image.png"
# eigen_path = "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/eigen_vectors"
# data_list = [
# "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/KDA79_A_keep121.json",
# "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/roi_list.json"
# ]
# background_image_path = "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/kdan/embedding_norm_image.png"
# eigen_path = "/Users/sschickler/Code_Devel/LSSC-python/plotting_functions/neurofinder2.0"
def create_image_from_eigen_vectors(path, shape):
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
vectors = []
for x in onlyfiles:
with open(os.path.join(path, x), "rb") as file:
vectors.append(pickle.load(file)[:, 1:])
all_vectors = np.hstack(vectors)
all_vectors_sum = np.power(np.sum(np.power(all_vectors, 2), axis=1), .5)
all_vectors_shaped = np.reshape(all_vectors_sum, shape)
all_vectors_shaped[all_vectors_shaped < 0] = 0
# if all_vectors_shaped.min()<0:
# all_vectors_shaped+=all_vectors_shaped.min()*-1
return all_vectors_shaped * 255 / (all_vectors_shaped.max())
def create_roi_image(size, color, path, blobs=True, offset=0):
image = np.zeros((size[0], size[1], 3), dtype="int")
with open(path, "r") as json_true:
json_b_actual = json.load(json_true)
for num, x in enumerate(json_b_actual):
cords = x["coordinates"]
if len(cords) < 600:
image_temp = np.zeros((size[0], size[1], 3), dtype="int")
for pixel in cords:
try:
image_temp[pixel[0] + offset, pixel[1] + offset] = color
except IndexError:
print("bad pixel location: " + str([x + offset for x in pixel]))
if not blobs:
edge = feature.canny(
np.sum(image_temp, axis=2) / np.max(np.sum(image_temp, axis=2)))
image[edge] = color
else:
image[image_temp != 0] = image_temp[image_temp != 0]
return image
def create_graph(bg_path="", shape=None, e_dir="", data_1="", data_2="", data_3="",
data_4="", out_file="",
percent=99, blobs=True, pad=(0, 0),
color_1=(234, 32, 39), color_2=(247, 159, 31), color_3=(6, 82, 221),
color_4=(217, 128, 250), overlap_c=(211, 84, 0),
offset=0):
if bg_path != "":
if ".npy" in bg_path:
background_image = np.load(bg_path)
else:
background_image = mpimg.imread(bg_path) * 200 / 255
if pad[0] != 0:
background_image = np.pad(background_image,
[(pad[0], pad[0] + 1), (pad[1], pad[1] + 1)])
# background_image = gaussian_filter(background_image, .02)
if shape is None:
shape = background_image.shape
if e_dir != "":
background_image = (255 / 255 * create_image_from_eigen_vectors(e_dir,
shape))
background_image_temp = background_image.copy()
background_image[background_image < 0] = 0
background_image = (((background_image - np.percentile(background_image, 1)) / (
np.percentile(background_image, percent) - np.percentile(
background_image, 1))))
if np.percentile(background_image, 30) > .25:
background_image = (
((background_image_temp - np.percentile(background_image_temp, 10)) / (
|
np.percentile(background_image_temp, percent)
|
numpy.percentile
|
try:
from vrep import*
except:
print ('--------------------------------------------------------------')
print ('"vrep.py" could not be imported. This means very probably that')
print ('either "vrep.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "vrep.py"')
print ('--------------------------------------------------------------')
print ('')
import time
import os
import sys
import numpy.random as random
import numpy as np
import math
from collections import defaultdict
import PIL.Image as Image
import array
import json
import shapely
from shapely.geometry import Polygon
import cv2 as cv
class Camera(object):
"""
# kinect camera in simulation
"""
def __init__(self, clientID):
"""
Initialize the Camera in simulation
"""
self.RAD2EDG = 180 / math.pi
self.EDG2RAD = math.pi / 180
self.Save_IMG = True
self.Save_PATH_COLOR = r'./color'
self.Save_PATH_DEPTH = r'./depth'
self.Dis_FAR = 10
self.depth_scale = 1000
self.Img_WIDTH = 224
self.Img_HEIGHT = 224
self.border_pos = [120,375,100,430]# [68,324,112,388] #up down left right of the box
self.theta = 70
self.Camera_NAME = r'kinect'
self.Camera_RGB_NAME = r'kinect_rgb'
self.Camera_DEPTH_NAME = r'kinect_depth'
self.clientID = clientID
self._setup_sim_camera()
self._mkdir_save(self.Save_PATH_COLOR)
self._mkdir_save(self.Save_PATH_DEPTH)
def _mkdir_save(self, path_name):
if not os.path.isdir(path_name):
os.mkdir(path_name)
def _euler2rotm(self,theta):
"""
-- Get rotation matrix from euler angles
"""
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def _setup_sim_camera(self):
"""
-- Get some param and handles from the simulation scene
and set necessary parameter for camera
"""
# Get handle to camera
_, self.cam_handle = simxGetObjectHandle(self.clientID, self.Camera_NAME, simx_opmode_oneshot_wait)
_, self.kinectRGB_handle = simxGetObjectHandle(self.clientID, self.Camera_RGB_NAME, simx_opmode_oneshot_wait)
_, self.kinectDepth_handle = simxGetObjectHandle(self.clientID, self.Camera_DEPTH_NAME, simx_opmode_oneshot_wait)
# Get camera pose and intrinsics in simulation
_, self.cam_position = simxGetObjectPosition(self.clientID, self.cam_handle, -1, simx_opmode_oneshot_wait)
_, cam_orientation = simxGetObjectOrientation(self.clientID, self.cam_handle, -1, simx_opmode_oneshot_wait)
self.cam_trans = np.eye(4,4)
self.cam_trans[0:3,3] = np.asarray(self.cam_position)
self.cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]
self.cam_rotm = np.eye(4,4)
self.cam_rotm[0:3,0:3] = np.linalg.inv(self._euler2rotm(cam_orientation))
self.cam_pose = np.dot(self.cam_trans, self.cam_rotm) # Compute rigid transformation representating camera pose
self._intri_camera()
def _intri_camera(self): #the paramter of camera
"""
Calculate the intrinstic parameters of camera
"""
fx = -self.Img_WIDTH/(2.0 * math.tan(self.theta * self.EDG2RAD / 2.0))
fy = fx
u0 = self.Img_HEIGHT/ 2
v0 = self.Img_WIDTH / 2
self.intri = np.array([[fx, 0, u0],
[0, fy, v0],
[0, 0, 1]])
def get_camera_data(self):
"""
-- Read images data from vrep and convert into np array
"""
# Get color image from simulation
res, resolution, raw_image = simxGetVisionSensorImage(self.clientID, self.kinectRGB_handle, 0, simx_opmode_oneshot_wait)
# self._error_catch(res)
color_img = np.array(raw_image, dtype=np.uint8)
color_img.shape = (resolution[1], resolution[0], 3)
color_img = color_img.astype(np.float)/255
color_img[color_img < 0] += 1
color_img *= 255
color_img = np.flipud(color_img)
color_img = color_img.astype(np.uint8)
# Get depth image from simulation
res, resolution, depth_buffer = simxGetVisionSensorDepthBuffer(self.clientID, self.kinectDepth_handle, simx_opmode_oneshot_wait)
# self._error_catch(res)
depth_img = np.array(depth_buffer)
#print(depth_img)
depth_img.shape = (resolution[1], resolution[0])
depth_img = np.flipud(depth_img)
depth_img[depth_img < 0] = 0
depth_img[depth_img > 1] = 0.9999
depth_img = depth_img * self.Dis_FAR * self.depth_scale
self.cur_depth = depth_img
return depth_img, color_img
def save_image(self, cur_depth, cur_color, img_idx):
"""
-- Save Color&Depth images
"""
img = Image.fromarray(cur_color.astype('uint8')).convert('RGB')
img_path = os.path.join(self.Save_PATH_COLOR, str(img_idx) + '_Rgb.png')
img.save(img_path)
depth_img = Image.fromarray(cur_depth.astype(np.uint32),mode='I')
depth_path = os.path.join(self.Save_PATH_DEPTH, str(img_idx) + '_Depth.png')
depth_img.save(depth_path)
return depth_path, img_path
def _error_catch(self, res):
"""
-- Deal with error unexcepted
"""
if res == simx_return_ok:
print ("--- Image Exist!!!")
elif res == simx_return_novalue_flag:
print ("--- No image yet")
else:
print ("--- Error Raise")
def pixel2ur5(self, u, v, ur5_position, push_depth, depth = 0.0, is_dst = True):
"""
from pixel u,v and correspondent depth z -> coor in ur5 coordinate (x,y,z)
"""
if is_dst == False:
depth = self.cur_depth[int(u)][int(v)] / self.depth_scale
x = depth * (u - self.intri[0][2]) / self.intri[0][0]
y = depth * (v - self.intri[1][2]) / self.intri[1][1]
camera_coor = np.array([x, y, depth - push_depth])
"""
from camera coor to ur5 coor
Notice the camera faces the plain directly and we needn't convert the depth to real z
"""
camera_coor[2] = - camera_coor[2]
location = camera_coor + self.cam_position - np.asarray(ur5_position)
return location, depth
def world2pixel(self,location):
"""
from coor in world coordinate (x,y,z) to pixel u.v
"""
x=location[0]
y=location[1]
z=0.75
# extrinsic parameter
if x<0.0 or x> 1.0 or abs(y)>0.7 or z<0.7:
return [1024,1024]
#print(self.cam_position)
z_1 = self.cam_position[2]-z
x_1 = x-self.cam_position[0]
y_1 = y-self.cam_position[1]
# internal parameter
u = int((x_1 / z_1)*self.intri[0][0] +self.intri[0][2]) #u x_1
v = int((y_1 / z_1)*self.intri[1][1] +self.intri[1][2]) #v y_1
if u<0 or v<0:
return [1024,1024]
return [u,v]
'''
x=location[0]
y=location[1]
z=0.75
# extrinsic parameter
if x<0.0 or x> 1.0 or abs(y)>0.7 or z<0.7:
return [1024,1024]
z_1 = self.cam_position[2]-z
x_1 = x-self.cam_position[0]
y_1 = y-self.cam_position[1]
f= -self.Img_WIDTH/(2.0 * math.tan(self.theta * self.EDG2RAD / 2.0))
u = f/z_1 * y_1 + self.intri[0][2]
v = f/z_1 * x_1 + self.intri[1][2]
'''
def pixel2world(self, u, v, push_depth = 0):
"""
from pixel u,v and correspondent depth z -> coor in world coordinate (x,y,z)
"""
if u >0:
u = int(u)%224
else:
u = 0
if v>0:
v = int(v)%224
else:
v= 0
depth = self.cur_depth[int(u)][int(v)] / self.depth_scale
x = depth * (u - self.intri[0][2]) / self.intri[0][0]
y = depth * (v - self.intri[1][2]) / self.intri[1][1]
camera_coor = np.array([x, y, depth-push_depth])
"""
from camera coor to world coor
Notice the camera faces the plain directly and we needn't convert the depth to real z
"""
camera_coor[2] = - camera_coor[2]
location = camera_coor + self.cam_position
return location
class UR5(object):
def __init__(self,testing_file='table-00-scene-00.txt',obj_num=20):
#test
self.testing_file = testing_file
self.targetPosition = np.zeros(3,dtype = np.float)
self.targetQuaternion = np.array([0.707,0,0.707,0])
self.baseName = r'UR5'
self.IkName = r'UR5_ikTip'
table_file = 'data/box.txt'
bound_dir = "data/boundary_size.json"
bound_file = open(bound_dir,encoding='utf-8')
self.bound_dic = json.load(bound_file)
file = open(table_file, 'r')
file_content = file.readlines()
file.close()
self.table_para = file_content[0].split()
self.workspace_limits = np.asarray([[float(self.table_para[0]), float(self.table_para[1])], [float(self.table_para[2]), float(self.table_para[3])] ])
self.drop_height =0.1
self.color_space = np.asarray([[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167]]) / 255.0 # pink
# Read files in object mesh directory
self.test_file_dir = os.path.abspath('data/test_cases/')
self.test_preset_file = os.path.join(self.test_file_dir, self.testing_file)
self.obj_mesh_dir= os.path.abspath('data/mesh/')
self.obj_num = obj_num
self.obj_dict = defaultdict(dict)
simxFinish(-1) # just in case, close all opened connections
self.clientID = simxStart('127.0.0.1', 19997, True, True, 5000, 5) # Connect to V-REP
if self.clientID != -1:
print ('Connected to remote API server')
# If testing, read object meshes and poses from test case file
scene_file = open(self.test_preset_file, 'r')
file_content = scene_file.readlines()
self.test_obj_mesh_files = []
self.test_obj_name = []
self.test_obj_type = []
self.test_obj_mesh_colors = []
self.test_obj_positions = []
self.test_obj_orientations = []
for i in range(self.obj_num):
file_content_curr_object = file_content[i].split()
self.test_obj_mesh_files.append(os.path.join(self.obj_mesh_dir, file_content_curr_object[0]))
self.test_obj_name.append(file_content_curr_object[0])
self.test_obj_type.append(file_content_curr_object[0][:-5])
self.test_obj_positions.append(
[float(file_content_curr_object[1]), float(file_content_curr_object[2]),
float(file_content_curr_object[3])])
self.test_obj_orientations.append(
[float(file_content_curr_object[4]), float(file_content_curr_object[5]),
float(file_content_curr_object[6])])
scene_file.close()
simxStartSimulation(self.clientID, simx_opmode_blocking)
#self.add_objects()
else:
print ('Failed connecting to remote API server')
_, self.ur5_handle = simxGetObjectHandle(self.clientID,self.baseName,simx_opmode_oneshot_wait)
_, self.ur5_position = simxGetObjectPosition(self.clientID,self.ur5_handle,-1,simx_opmode_oneshot_wait)
self.Ik_handle = simxGetObjectHandle(self.clientID,self.IkName,simx_opmode_oneshot_wait)
self.add_objects()
self.ankleinit()
def ankleinit(self):
"""
# initial the ankle angle for ur5
"""
simxSynchronousTrigger(self.clientID)
simxPauseCommunication(self.clientID, True)
simxSetIntegerSignal(self.clientID, 'ICECUBE_0', 11, simx_opmode_oneshot)
simxPauseCommunication(self.clientID, False)
simxSynchronousTrigger(self.clientID)
simxGetPingTime(self.clientID)
# pause for 1s
time.sleep(1)
def disconnect(self):
"""
# disconnect from v-rep
# and stop simulation
"""
simxStopSimulation(self.clientID,simx_opmode_oneshot)
time.sleep(2)
simxFinish(self.clientID)
print ('Simulation ended!')
def get_clientID(self):
return self.clientID
def ur5push(self, move_begin, move_to):
"""
The action of the ur5 in a single push action including:
Get to push beginning
Push to the destination
Return to the init pose
"""
time.sleep(1)
self.ur5moveto(move_begin)
time.sleep(0.5)
self.ur5moveto(move_to)
time.sleep(0.5)
# Return to the initial pose
self.ankleinit()
time.sleep(0.5)
def ur5moveto(self, dst_location):
"""
Push the ur5 hand to the location of dst_location
"""
simxSynchronousTrigger(self.clientID)
self.targetPosition = dst_location
simxPauseCommunication(self.clientID, True)
simxSetIntegerSignal(self.clientID, 'ICECUBE_0', 21, simx_opmode_oneshot)
for i in range(3):
simxSetFloatSignal(self.clientID, 'ICECUBE_'+str(i+1),self.targetPosition[i],simx_opmode_oneshot)
for i in range(4):
simxSetFloatSignal(self.clientID, 'ICECUBE_'+str(i+4),self.targetQuaternion[i], simx_opmode_oneshot)
simxPauseCommunication(self.clientID, False)
simxSynchronousTrigger(self.clientID)
simxGetPingTime(self.clientID)
def add_objects(self):
# Add objects to robot workspace at x,y location and orientation
self.object_handles = []
for i in range(self.obj_num):
curr_shape_name = 'shape'+str(i)
curr_mesh_file = self.test_obj_mesh_files[i]
curr_obj_type = self.test_obj_type[i]
curr_type = 't'
object_position = [self.test_obj_positions[i][0], self.test_obj_positions[i][1], self.test_obj_positions[i][2]]
object_orientation = [self.test_obj_orientations[i][0]*np.pi/180, self.test_obj_orientations[i][1]*np.pi/180, self.test_obj_orientations[i][2]*np.pi/180]
#print (object_position + object_orientation, [curr_mesh_file, curr_shape_name])
ret_resp,ret_ints,_,ret_strings,_ = simxCallScriptFunction(self.clientID, 'remoteApiCommandServer',sim_scripttype_childscript,'importShape',[0,0,255,0], object_position + object_orientation, [curr_mesh_file, curr_shape_name,curr_type], bytearray(), simx_opmode_blocking)
time.sleep(1)
if ret_resp == 8:
print('Failed to add new objects to simulation. Please restart.')
print (ret_strings)
exit()
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
def get_obj_positions_and_orientations(self):
for i in range(self.obj_num):
obj_handle = self.object_handles[i]
self.obj_dict[i]['handle'] = obj_handle
_, object_position = simxGetObjectPosition(self.clientID, obj_handle, -1, simx_opmode_blocking)
_, object_orientation = simxGetObjectOrientation(self.clientID, obj_handle, -1, simx_opmode_blocking)
self.obj_dict[i]['position'] = object_position
self.obj_dict[i]['orientation'] = object_orientation
object_matrix = self.euler2rotm(object_orientation, object_position)
# object_matrix = self.euler2rotm_1(i)
self.obj_dict[i]['matrix'] = object_matrix
obj_name = self.test_obj_name[i]
self.obj_dict[i]['name'] = obj_name
self.obj_dict[i]['boundary_size'] = self.bound_dic[obj_name]
self.obj_dict[i]['rect'],self.obj_dict[i]['boundary']= self.caculate_projection_rect(object_matrix,self.bound_dic[obj_name])
#print(obj_name)
#print(object_position)
#print(object_orientation)
#print(self.obj_dict[i]['boundary'])
return self.obj_dict
def adjust_obj_positions_and_oritentations(self):
for i in range(self.obj_num):
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 1]
object_orientation = [2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample()]
handle = self.object_handles[i]
simxPauseCommunication(self.clientID,True)
simxSetObjectOrientation(self.clientID,handle,-1,object_orientation,simx_opmode_oneshot)
simxPauseCommunication(self.clientID,False)
simxPauseCommunication(self.clientID,True)
simxSetObjectPosition(self.clientID,handle,-1,object_position,simx_opmode_oneshot)
simxPauseCommunication(self.clientID,False)
time.sleep(1)
def euler2rotm(self,theta,position):
"""
-- Get rotation matrix from euler angles
"""
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
position_get = np.array([position])
R1 = np.vstack((R,position_get))
return R1
def caculate_projection_rect(self,object_matrix,boundary_size):
obj_points =np.array( [
[boundary_size[0]/2,boundary_size[0]/2,-boundary_size[0]/2,-boundary_size[0]/2,
boundary_size[0]/2,boundary_size[0]/2,-boundary_size[0]/2,-boundary_size[0]/2],
[boundary_size[1]/2,-boundary_size[0]/2,boundary_size[1]/2,-boundary_size[0]/2,
boundary_size[1]/2,-boundary_size[0]/2,boundary_size[1]/2,-boundary_size[0]/2,],
[boundary_size[2]/2,boundary_size[2]/2,boundary_size[2]/2,boundary_size[2]/2,
-boundary_size[2]/2,-boundary_size[2]/2,-boundary_size[2]/2,-boundary_size[2]/2],
[1,1,1,1,1,1,1,1]
])
obj_points = obj_points.T
obj_points_transform = np.dot(obj_points,object_matrix)
obj_points_transform = np.array(obj_points_transform)
obj_points_transform = obj_points_transform.reshape(8,3)
obj_x_array = obj_points_transform[:,0].T
obj_y_array = obj_points_transform[:,1].T
x_max_point = np.where(obj_x_array == np.max(obj_x_array))[0][0]
x_min_point = np.where(obj_x_array ==
|
np.min(obj_x_array)
|
numpy.min
|
import numpy as np
import pandas as pa
import requests, sys
import json
from Bio.Seq import Seq
import os
class TF3DScan:
def __init__(self,genes,PWM_directory,seqs=None):
self.gene_names=genes
self.PWM_dir=PWM_directory
self.seq=None
self.PWM=None
self.weights=None
self.proteins=None
self.initialize()
def initialize(self):
self.seq=self.get_seq_by_name(self.gene_names)
self.PWM=self.convolutional_filter_for_each_TF(self.PWM_dir)
self.weights, self.proteins= self.get_Weights(self.PWM)
return
def softmax(self,x):
e_x = np.exp(x - np.max(x))
return (e_x / e_x.sum(axis=0))
def convolutional_filter_for_each_TF(self,PWM_directory):
path = PWM_directory
#print(path)
filelist = os.listdir(path)
TF_kernel_PWM={}
for file in filelist:
TF_kernel_PWM[file.split("_")[0]] = pa.read_csv(path+file, sep="\t", skiprows=[0], header=None)
return TF_kernel_PWM
def get_reverse_scaning_weights(self, weight):
return np.flipud(weight[:,[3,2,1,0]])
def get_Weights(self, filter_PWM_human):
#forward and reverse scanning matrix with reverse complement
#forward_and_reverse_direction_filter_list=[{k:np.dstack((filter_PWM_human[k],self.get_reverse_scaning_weights(np.array(filter_PWM_human[k]))))} for k in filter_PWM_human.keys()]
#forward and reverse scanning with same matrix
forward_and_reverse_direction_filter_list=[{k:np.dstack((filter_PWM_human[k],filter_PWM_human[k]))} for k in filter_PWM_human.keys()]
forward_and_reverse_direction_filter_dict=dict(j for i in forward_and_reverse_direction_filter_list for j in i.items())
unequefilter_shape=pa.get_dummies([filter_PWM_human[k].shape for k in filter_PWM_human])
TF_with_common_dimmention=[{i:list(unequefilter_shape.loc[list(unequefilter_shape[i]==1),:].index)} for i in unequefilter_shape.columns]
filterr={}
for i in TF_with_common_dimmention:
#print(list(i.keys()))
aa=[list(forward_and_reverse_direction_filter_list[i].keys()) for i in list(i.values())[0]]
aa=sum(aa,[])
#print(aa)
xx=[forward_and_reverse_direction_filter_dict[j] for j in aa]
#print(xx)
xxx=np.stack(xx,axis=-1)
#xxx=xxx.reshape(xxx.shape[1],xxx.shape[2],xxx.shape[3],xxx.shape[0])
filterr["-".join(aa)]=xxx
weights=[v for k,v in filterr.items()]
protein_names=[k for k,v in filterr.items()]
protein_names=[n.split("-") for n in protein_names]
return (weights,protein_names)
def get_sequenceBy_Id(self, EnsemblID,content="application/json",expand_5prime=2000, formatt="fasta",
species="homo_sapiens",typee="genomic"):
server = "http://rest.ensembl.org"
ext="/sequence/id/"+EnsemblID+"?expand_5prime="+str(expand_5prime)+";format="+formatt+";species="+species+";type="+typee
r = requests.get(server+ext, headers={"Content-Type" : content})
_=r
if not r.ok:
r.raise_for_status()
sys.exit()
return(r.json()['seq'][0:int(expand_5prime)+2000])
def seq_to3Darray(self, sequence):
seq3Darray=pa.get_dummies(list(sequence))
myseq=Seq(sequence)
myseq=str(myseq.reverse_complement())
reverseseq3Darray=pa.get_dummies(list(myseq))
array3D=np.dstack((seq3Darray,reverseseq3Darray))
return array3D
def get_seq_by_name(self, target_genes):
promoter_inhancer_sequence=list(map(self.get_sequenceBy_Id, target_genes))
threeD_sequence=list(map(self.seq_to3Darray, promoter_inhancer_sequence))
input_for_convolutional_scan=np.stack((threeD_sequence)).astype('float32')
return input_for_convolutional_scan
def from_2DtoSeq(self, twoD_seq):
indToSeq={0:"A",1:"C",2:"G",3:"T"}
seq=str(''.join([indToSeq[i] for i in np.argmax(twoD_seq, axis=1)]))
return seq
def conv_single_step(self, seq_slice, W):
s = seq_slice*W
# Sum over all entries of the volume s.
Z = np.sum(s)
return Z
def conv_single_filter(self, seq,W,stridev,strideh):
(fv, fh, n_C_prev, n_C) = W.shape
m=seq.shape[0]
pad=0
n_H = int((((seq.shape[1]-fv)+(2*pad))/stridev)+1)
n_W = int((((seq.shape[2]-fh)+(2*pad))/strideh)+1)
Z = np.zeros((m, n_H, n_W ,n_C_prev, n_C))
for i in range(m):
for h in range(int(n_H)):
vert_start = h*stridev
vert_end = stridev*h+fv
for w in range(int(n_W)):
horiz_start = w*strideh
horiz_end = strideh*w+fh
for c in range(int(n_C)):
a_slice_prev = seq[i,vert_start:vert_end,horiz_start:horiz_end,:]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)
for d in range(n_C_prev):
Z[i, h, w,d, c] = self.conv_single_step(a_slice_prev[:,:,d], W[:,:,d,c])
Z=self.softmax(Z)
return Z
def conv_total_filter(self, Weights, seqs,stridev,strideh):
return [self.conv_single_filter(seqs,i,stridev,strideh) for i in Weights]
def single_sigmoid_pool(self, motif_score):
n=sum(motif_score>.5)
score=[motif_score[i] for i in np.argsort(motif_score)[::-1][:n]]
index=[j for j in
|
np.argsort(motif_score)
|
numpy.argsort
|
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
def linear1(x_matrix, is_benefit_x):
"""Python implementation of the Linear1 normalization method.
For more information, see the following publications:
* <NAME> and <NAME>, Multiple attribute decision making,
ser. Lecture Notes in Economics and Mathematical Systems.
Springer-Verlag Berlin Heidelberg, 1981, vol. 186,
ISBN: 9783540105589.
* <NAME>, <NAME>, and <NAME>, "An extension of TOPSIS for
group decision making," Mathematical and Computer Modelling,
vol. 45, no. 7--8, pp. 801--813, 2007. DOI: 10.1016/j.mcm.2006.03.023.
"""
# Make sure that the decision matrix is a float64 NumPy array
x_matrix = np.array(x_matrix, dtype=np.float64)
# Sanity check
if len(is_benefit_x) != x_matrix.shape[1]:
raise ValueError("The number of variables in the list that "
"determines whether each criterion is a benefit "
"or a cost criterion does not match the number "
"of columns in the decision matrix")
# Make sure that it does not contain any negative numbers
if np.sum(np.less(x_matrix, 0.0)) > 0:
raise ValueError("The decision matrix must not contain any "
"negative numbers in order to apply the "
"Linear1 normalization method")
# Construct the normalized decision matrix
z_matrix = np.zeros(x_matrix.shape, dtype=np.float64)
for j in range(x_matrix.shape[1]):
if is_benefit_x[j]:
max_value =
|
np.amax(x_matrix[:, j])
|
numpy.amax
|
import os
import unittest
import time
from datetime import datetime
try:
import torch
GPU = torch.cuda.is_available() and not os.environ.get("USE_CPU")
TORCH_INSTALLED = True
except ModuleNotFoundError:
GPU = False
TORCH_INSTALLED = False
class TestModule(unittest.TestCase):
def test_hbb(self):
from deep_sort_realtime.deepsort_tracker import DeepSort
import numpy as np
today = datetime.now().date()
if TORCH_INSTALLED:
embedder = 'mobilenet'
embeds = None
else:
embedder = None
tracker = DeepSort(
max_age=30,
nn_budget=100,
nms_max_overlap=1.0,
embedder=embedder,
today=today,
embedder_gpu=GPU,
)
tic = time.perf_counter()
print()
print("FRAME1")
frame1 = np.ones((1080, 1920, 3), dtype=np.uint8) * 255
detections1 = [
([0, 0, 50, 50], 0.5, "person"),
([50, 50, 50, 50], 0.5, "person"),
]
if embedder is None:
embeds = [np.array([0.1, 0.1, 0.1, 0.1]), np.array([-1.0, 1.0, 0.5, -0.5])]
tracks = tracker.update_tracks(
detections1, frame=frame1, today=datetime.now().date(), embeds=embeds
)
for track in tracks:
print(track.track_id)
print(track.to_tlwh())
print()
print("FRAME2")
# assume new frame
frame2 = frame1
detections2 = [
([10, 10, 60, 60], 0.8, "person"),
([60, 50, 50, 50], 0.7, "person"),
]
if embedder is None:
embeds = [np.array([0.1, 0.1, 0.1, 0.1]), np.array([-1.1, 1.0, 0.5, -0.5])]
tracks = tracker.update_tracks(
detections2, frame=frame2, today=datetime.now().date(), embeds=embeds
)
for track in tracks:
print(track.track_id)
print(track.to_tlwh())
print()
print("FRAME3")
# assume new frame
frame3 = frame1
detections3 = [
([20, 20, 70, 70], 0.8, "person"),
([70, 50, 50, 50], 0.7, "person"),
]
if embedder is None:
embeds = [np.array([0.1, 0.1, 0.1, 0.1]), np.array([-1.1, 1.0, 0.5, -0.5])]
tracks = tracker.update_tracks(
detections3, frame=frame3, today=datetime.now().date(), embeds=embeds
)
for track in tracks:
print(track.track_id)
print(track.to_tlwh())
print()
print("FRAME4")
# assume new frame
frame4 = frame1
detections4 = [([10, 10, 60, 60], 0.8, "person")]
if embedder is None:
embeds = [np.array([0.1, 0.1, 0.1, 0.1])]
tracks = tracker.update_tracks(detections4, frame=frame4, embeds=embeds)
for track in tracks:
print(track.track_id)
print(track.to_tlwh())
toc = time.perf_counter()
print(f"Avrg Duration per update: {(toc-tic)/4}")
return True
def test_obb(self):
from deep_sort_realtime.deepsort_tracker import DeepSort
import numpy as np
if TORCH_INSTALLED:
embedder = 'mobilenet'
embeds = None
else:
embedder = None
embeds = [np.array([0., 0., 0., 0.]), np.array([0.1, 0.1, 0.1, 0.1])]
tracker = DeepSort(
max_age=30,
nn_budget=100,
nms_max_overlap=1.0,
embedder=embedder,
polygon=True,
embedder_gpu=GPU,
)
tic = time.perf_counter()
print()
print("FRAME1")
frame1 = np.ones((1080, 1920, 3), dtype=np.uint8) * 255
detections1 = [
[[0, 0, 10, 0, 10, 10, 0, 10], [20, 20, 30, 20, 30, 30, 20, 30]],
[0, 1],
[0.5, 0.5],
]
tracks = tracker.update_tracks(detections1, frame=frame1, embeds=embeds)
correct_ans = [
np.array([0.0, 0.0, 11.0, 11.0]),
np.array([20.0, 20.0, 11.0, 11.0]),
]
for track, ans in zip(tracks, correct_ans):
print(track.track_id)
ltwh = track.to_ltwh()
print(ltwh)
np.testing.assert_allclose(ltwh, ans)
print()
print("FRAME2")
# assume new frame
frame2 = frame1
detections2 = [
[[0, 0, 10, 0, 15, 10, 0, 15], [25, 20, 30, 20, 30, 30, 25, 30]],
[0, 1],
[0.5, 0.6],
]
tracks = tracker.update_tracks(detections2, frame=frame2, embeds=embeds)
correct_ans = [
|
np.array([0.0, 0.0, 15.33884298, 15.33884298])
|
numpy.array
|
from collections import defaultdict
import copy
import numpy as np
ACTIVE = 1
INACTIVE = 0
def day17a(input_path):
num_cycles = 6
dimension = Dimension(input_path, 3)
print(dimension.total)
for step in range(num_cycles):
dimension.step()
return dimension.total
def test17a():
assert 112 == day17a('test_input.txt')
class Dimension:
def __init__(self, input_path, n_dims):
self.n_dims = n_dims
self.state = defaultdict(int)
lines = np.array([list(line.strip()) for line in open(input_path)])
lines[lines == '#'] = 1
lines[lines == '.'] = 0
init_slice = 0
x, y = np.meshgrid(np.arange(lines.shape[0]), np.arange(lines.shape[1]))
x = x.flatten().tolist()
y = y.flatten().tolist()
for ix, iy in zip(x, y):
self.state[(ix, iy) + (n_dims - 2) * (0,)] = int(lines[ix, iy])
meshgrid_input = n_dims * ((-1, 0, 1),)
self.deltas = np.stack(np.meshgrid(*meshgrid_input))
self.deltas = np.reshape(np.transpose(self.deltas), (-1, n_dims))
all_zeros = np.all(self.deltas == 0, axis=1)
self.deltas = self.deltas[~all_zeros, :]
@property
def total(self):
return sum(self.state.values())
def step(self):
next_state = copy.deepcopy(self.state)
current_keys = list(self.state.keys())
mins =
|
np.array(current_keys)
|
numpy.array
|
'''
-------------------------
SETUP THE MODEL
-------------------------
A) Environmental parameters
B) True wind angle and velocity range
C) Initial guess for solving the VPP
D) Delft coefficients for resistance estimation
E) Derivated elementary dimensions
-------------------------
VPP MAIN ROUTINE
-------------------------
1 PRE-CALCULATION
2 LIFT FORCES AND MOMENTS
A) Keel lift force
B) Bulb lift force
C) Rudder lift force
D) Canoe body side force
E) GZ estimation
F) Righting Moment
G) Munk moment
H) Centre of Effort (CE)
3 RESISTANCE CALCULATION
3.1 Viscous Resistance
A) Parameters
B) Canoe body resistance
C) Keel and bulb resistance
D) Rudder resistance
E) Total viscous resistance
3.2 Residual Resistance
A) Canoe body resitance
B) Keel residual resistance
C) Total residual resistance
3.3 Induced resistance
A) Canoe body induced resistance
B) Keel induced resistance
C) Rudder induced resistance
D) Total induced resistance
3.4 Resistance increase due to heel
A) Canoe body viscous resistance
B) Canoe body residual resistance
C) Keel residual resistance
D) Total heel resistance
3.5 Added resistance in waves
3.6 Total resistance
4) AERODYNAMIC MODELING
4.1) Sail area and centre of effort
A) Main sail
B) Jib and foretriangle
C) Spinnaker
D) Mizzen
E) Nominal area
F) Centre of effort above deck line
4.2) Lift and drag coefficients for each sail
A) Main sail
B) Jib sail
C) Spinnaker
D) Mast drag coefficient
E) Lift and drag for all sails combined
4.3) Lift and drag forces and centre of efforts
5 FORCES AND MOMENTS IN GLOBAL COORDINATES [X, Y, Z]
5.1 Coordinates matrix
A) Leeway
B) Heel angle
C) Keel angle attack due to heel
D) Rudder angle due to heel
E) Rudder angle attack due to rudder angle
F) Rudder angle attack due to rudder with no lift
G) Sail angle attack due to heel
5.2 Forces
A) Total resistance
B) Lift force keel
C) Lift force rudder
D) Lift force rudder for delta = 0
E) Bulb side force
F) Canoe body side force
G) Sail lift force
H) Sail drag force
5.3 Centre of effort
A) Aerodynamic CE
B) Hydrodynamic CE
C) Rudder hydrodynamic CE
5.4 Moments
A) Munk Moment
B) Righting Moment
C) Aerodrynamic moment
D) Hydrodynamic moment
E) Rudder moment
5.5 Resulting forces and moments
5.6 Equilibrium system to be solved
-------------------------
NOMENCLATURE & ACRONYMS
-------------------------
- Variables' name are composed of "attribute" + "_" + "object", e.g. velocity_boat, angle_rudder
- Dimensions are given in radians and meters per second, otherwise a suffix will be added, e.g. angle_tw_deg
tw: true wind
aw: apparent wind
cb: canoe body
deg: degrees
temp: temporary
avg: average
-------------------------
REFERENCES
-------------------------
<NAME>. Sailing yacht performance in calm water and in waves. The 11th Chesapeake Sailing Yacht Symposium SNAME, Jan. 1993.
<NAME>., and <NAME>. Approximation of the hydrodynamic forces on a sailing yacht based on the 'Delft Systematic Yacht Hull Series'. Delft University of Technology, Faculty of Mechanical Engineering and Marine Technology, Ship Hydromechanics Laboratory, 1998.
<NAME>., <NAME>, and <NAME>. An approximation Method for the added resistance in waves of a sailing yacht. 2 nd International Symposium on Design and Production of Motor and Sailing Yachts MDY ‘06, Madrid, Spain. 2006.
<NAME>. Predicting the speed of sailing yachts, 1993.
<NAME>. A concept exploration model for sailing yachts. Transactions of RINA, p. 17–28, 2003.
<NAME>. Basic Naval Architecture: Ship Stability. Springer, 2018.
'''
### IMPORT PACKAGES
import numpy as np # high-level mathematical functions
from scipy import optimize # optimization functions to solve the VPP's system of equations
from scipy import interpolate # interpolatation methods applied to obtain the Delft coefficients
import csv # export the results as CSV
import codecs, json # export the results as JSON
import re, os, os.path # uxiliary package to build the JSON files
from _ctypes import PyObj_FromPtr # see https://stackoverflow.com/a/15012814/355230
def vpp_solve(sailset, loa, lwl, boa, bwl, tc, lcb, lcf, cb, cm, cp, cwp, lat_surface_cb, KG, free_board, lead_rudder, lead_sail, \
mass_crew, height_mainsail, base_mainsail, height_foretriangle, base_foretriangle, boom_heigth_deck, length_spinnaker, \
perpendicular_jib, span_rudder, tip_chord_rudder, root_chord_rudder, tip_thickness_rudder, root_thickness_rudder, \
sweep_rudder_deg, span_keel, tip_chord_keel, root_chord_keel, tip_thickness_keel, root_thickness_keel, sweep_keel_deg, \
naca_keel, naca_rudder, height_mast, diameter_mast, height_surface_rudder, height_mizzen, base_mizzen, boom_height_mizzen, \
chord_bulb_keel, diameter_bulb, surface_area_bulb, minimum_tw_knots, maximum_tw_knots, minimum_tw_angle, maximum_tw_angle):
### SETUP THE MODEL
# A) Environmental parameters
density_air = 1.3 # air density [kg/m3]
density_water = 1025 # water density [kg/m3]
viscosity_water = 1e-6 # water kynematic viscosity [m2/s]
gravity = 9.80665 # gravity acceleration [m/s2]
# B) True wind angle and velocity range
pi = np.pi # pi number
step_angle = 30 # true wind angle step [degrees]
step_velocity = 1.02889 # true wind speed step [m/s] equivalent to 2 knots
minimum_tw = minimum_tw_knots*0.514444 # true wind speed range [m/s]
maximum_tw = maximum_tw_knots*0.514444
if (maximum_tw - minimum_tw) < step_velocity: # in case the range of wind speed is lower than its step
step_velocity = (maximum_tw - minimum_tw)*0.99
# Arrays for true wind angle and velocity
angle_tw_deg = np.arange(minimum_tw_angle, maximum_tw_angle, step_angle) # polar diagram range
angle_tw = np.radians(angle_tw_deg)
velocity_tw = np.arange(minimum_tw, maximum_tw, step_velocity)
# Matrix to store the boat velocity for each true wind angle and speed
angle_tw_matrix = np.zeros((np.size(velocity_tw), np.size(angle_tw_deg)))
velocity_boat_matrix = np.zeros((np.size(velocity_tw), np.size(angle_tw_deg)))
vmg_matrix = np.zeros((np.size(velocity_tw), np.size(angle_tw_deg)))
# C) Initial guess for solving the VPP
# Velocity [m/s], leeway [rad], heel [rad], rudder angle [rad])
initial_guess = np.array([4, np.radians(5), np.radians(15) ,np.radians(-4)])
# D) Delft coefficients for resistance estimation (Keuning et al, 1998)
# Residual resistance - bare hull
coefficient_residual_hull = [
['FroudeNo' 'a0' 'a1' 'a2' 'a3' 'a4' 'a5' 'a6' 'a7' 'a8'],
[0.10, -0.0014, 0.0403, 0.047, -0.0227, -0.0119, 0.0061, -0.0086, -0.0307, -0.0553],
[0.15, 0.0004, -0.1808, 0.1793, -0.0004, 0.0097, 0.0118, -0.0055, 0.1721, -0.1728],
[0.20, 0.0014, -0.1071, 0.0637, 0.009, 0.0153, 0.0011, 0.0012,0.1021, -0.0648],
[0.25, 0.0027, 0.0463, -0.1263, 0.015, 0.0274, -0.0299, 0.011, -0.0595, 0.122],
[0.30, 0.0056, -0.8005, 0.4891, 0.0269, 0.0519, -0.0313, 0.0292, 0.7314, -0.3619],
[0.35, 0.0032, -0.1011, -0.0813, -0.0382, 0.032, -0.1481, 0.0837, 0.0233, 0.1587],
[0.40, -0.0064, 2.3095, -1.5152, 0.0751, -0.0858, -0.5349, 0.1715, -2.455, 1.1865],
[0.45, -0.0171, 3.4017, -1.9862, 0.3242, -0.145, -0.8043, 0.2952, -3.5284, 1.3575],
[0.50, -0.0201, 7.1576, -6.3304, 0.5829, 0.163, -0.3966, 0.5023, -7.1579, 5.2534],
[0.55, 0.0495, 1.5618, -6.0661, 0.8641, 1.1702, 1.761, 0.9176, -2.1191, 5.4281],
[0.60, 0.0808, -5.3233, -1.1513, 0.9663, 1.6084, 2.7459, 0.8491, 4.7129, 1.1089]
]
# Residual resistance - keel
coefficient_residual_keel = [
['FroudeNo' 'A0' 'A1' 'A2' 'A3'],
[0.2, -0.00104, 0.00172, 0.00117, -0.00008],
[0.25, -0.0055, 0.00597, 0.0039, -0.00009],
[0.3, -0.0111, 0.01421, 0.00069, 0.00021],
[0.35, -0.00713, 0.02632, -0.00232, 0.00039],
[0.4, -0.03581, 0.08649, 0.00999, 0.00017],
[0.45, -0.0047, 0.11592, -0.00064, 0.00035],
[0.5, 0.00553, 0.07371, 0.05991, -0.00114],
[0.55, 0.04822, 0.0066, 0.07048, -0.00035],
[0.6, 0.01021, 0.14173, 0.06409, -0.00192]
]
# Residual resistance increase due to heel
coefficient_residual_heel = [
['FroudeNo' 'u0' 'u1' 'u2' 'u3' 'u4' 'u5'],
[0.25, -0.0268, -0.0014, -0.0057, 0.0016, -0.007, -0.0017],
[0.30, 0.6628, -0.0632, -0.0699, 0.0069, 0.0459, -0.0004],
[0.35, 1.6433, -0.2144, -0.164, 0.0199, -0.054, -0.0268],
[0.40,-0.8659, -0.0354, 0.2226, 0.0188, -0.58, -0.1133],
[0.45, -3.2715, 0.1372, 0.5547, 0.0268, -1.0064, 0.2026],
[0.50, -0.1976, -0.148, -0.6593, 0.1862, -0.7489, -0.1648],
[0.55, 1.5873, -0.3749, -0.7105, 0.2146, -0.4818, -0.1174]
]
# Viscous resistance increase due to heel
coefficient_viscous_heel = [
['phi' 's0' 's1' 's2' 's3'],
[5, -4.112, 0.054, -0.027, 6.329],
[10, -4.522, -0.132, -0.077, 8.738],
[15, -3.291, -0.389, -0.118, 8.949],
[20, 1.85, -1.2, -0.109, 5.364],
[25, 6.51, -2.305, -0.066, 3.443],
[30, 12.334, -3.911, 0.024, 1.767],
[35, 14.648, -5.182, 0.102, 3.497]
]
# Added resistance in waves for a = 100 and wavelength/lwl = 1
coefficient_waves = [
['FroudeNo' 'a0' 'a1' 'a2' 'a3' 'a4' 'a5' 'a6' 'a7' 'a8' 'a9'],
[0.20, 0.135971706, -0.079712707, 0.011040044, -0.000512737, 0.00177368, -0.000207076, 0.000133095 , 0.252647483, -0.359794615, 0.14069324],
[0.25, 0.144740648, -0.087875806, 0.0121882, -0.000563218, 0.00256833, -0.00033302, 5.17839E-05, 0.223659113, -0.160193869, -0.073440481],
[0.30, 0.125369414, -0.092281743, 0.012800398, -0.000592109, 0.00119098, -0.000139619, 8.14003E-05, 0.357822779, -0.327040392, -0.020221069],
[0.35, 0.139011133, -0.108178384, 0.01491584, -0.000692116, 0.004351508, -0.000336362, 0.000360906, 0.319067432, -0.031271366, -0.332228687],
[0.40, 0.125891281, -0.120856359, 0.01672588, -0.0007783, 0.003887939, -0.000272325, 0.00038914, 0.481253166, -0.176587773, -0.344081072],
[0.45, 0.139240049, -0.142907914, 0.019939832, -0.000934437, 0.006308615, -0.000543945, 0.000457244, 0.578174665, -0.22452672, -0.390073693]
]
# E) Derivated elementary dimensions
# Displacement [m3]
disp = cb*lwl*bwl*tc
# Waterplane area [m2]
awp = cwp*lwl*bwl
# awp = lwl*bwl*(1.313*cp - 0.0857*cp*lwl/disp**(1/3) + 0.0371*lwl/disp**(1/3))
# Rudder and keel sweep angle [rad]
sweep_rudder = np.radians(sweep_rudder_deg)
sweep_keel = np.radians(sweep_keel_deg)
# Rudder and keel average chord [m]
avg_chord_keel = (root_chord_keel + tip_chord_keel)/2
avg_chord_rudder = (root_chord_rudder + tip_chord_rudder)/2
# Surface area for canoe body, keel, and rudder [m2]
surface_area_cb = (1.97 + 0.171*bwl/tc)*(0.65/cm)**(1/3)*(disp*lwl)**0.5 # Gerritsma et al (1992)
lat_surface_keel = avg_chord_keel*span_keel
surface_area_keel = 2*lat_surface_keel
lat_surface_rudder = avg_chord_rudder*span_rudder
surface_area_rudder = 2*lat_surface_rudder
# Rudder and keel average thickness [m]
avg_thickness_rudder = (tip_thickness_rudder + root_thickness_rudder)/2
avg_thickness_keel = (tip_thickness_keel + root_thickness_keel)/2
# Taper ratio: ratio of the chord length at the tip to that at the root [-]
taper_ratio_keel = tip_chord_keel/root_chord_keel
taper_ratio_rudder = tip_chord_rudder/root_chord_rudder
# Effective aspect ratio [-]
aspect_ratio_keel = 2*(span_keel+diameter_bulb/5)**2/lat_surface_keel
# Canoe body ratio [-]
ratio_cb = 2*tc/(0.75*lwl)
# Keel displacement and volumetric centre, if not provide [m3]
kb_keel = span_keel*(2*tip_chord_keel + root_chord_keel)/(3*(tip_chord_keel + root_chord_keel))
disp_keel = 0.6*span_keel*avg_thickness_keel*avg_chord_keel**2
# LCB and LCF measured from the stern's perpendicular [m]
LCBfpp = lwl/2 + lcb
LCFfpp = lwl/2 + lcf
# Form coefficient [-]
form_coeff_cb = 0.09
if naca_keel == '6digit':
form_coeff_keel = 2*(avg_thickness_keel) + 60*(avg_thickness_keel)**4
visc_keel = 1 # efeito da viscosidade na inclinacao da curva de sustentacao do escoamento 2D
else:
form_coeff_keel = 1.2*(avg_thickness_keel) + 70*(avg_thickness_keel)**4
visc_keel = 0.9
if naca_rudder == '6digit':
form_coeff_rudder = 2*(avg_thickness_rudder) + 60*(avg_thickness_rudder)**4
visc_rudder = 1
else:
form_coeff_rudder = 1.2*(avg_thickness_rudder) + 70*(avg_thickness_rudder)**4
visc_rudder = 0.9
# Cross-flow drag coefficient [-]
crossflow_coeff_rudder = 0.1 + 0.7*taper_ratio_rudder
if chord_bulb_keel > 0:
crossflow_coeff_keel = 0
else:
crossflow_coeff_keel = 0.1 + 0.7*taper_ratio_keel # faired tip
# crossflow_coeff_keel = 0.1 + 1.6*taper_ratio_keel # squared tip
### VELOCITY PREDICTION PROGRAMME
def vpp_solve_main(solution):
### 1 PRE-CALCULATION
# The solution for the VPP routine optimization is given in terms of four parameters
velocity_boat = solution[0] # Boat velocity [m/s]
leeway = solution[1] # Leeway angle [radians]
heel = solution[2] # Heel angle [radians]
angle_rudder = solution[3] # Rudder angle [radians]
# Velocity and leeway will be positive
velocity_boat = abs(velocity_boat)
leeway = abs(leeway)
# Froude number
Fn = (velocity_boat)/(gravity * lwl)**0.5 # Froude number hull [-]
Fn_rudder = velocity_boat/(gravity*avg_chord_rudder)**0.5 # Froude number rudder [-]
# Apparent wind calculation
velocity_aw = (velocity_boat**2 + velocity_tw[t]**2 - 2*velocity_boat*velocity_tw[t]*np.cos(pi - leeway - angle_tw[u]))**0.5
angle_aw = np.arctan2((velocity_tw[t]*np.sin(angle_tw[u]) - velocity_boat*np.sin(leeway)), (velocity_tw[t]*np.cos(angle_tw[u]) + velocity_boat*np.cos(leeway)))
# Heel and leeway shall have opposite signs
if leeway > 0 and heel > 0:
heel = -heel
if leeway < 0 and heel < 0:
heel = -heel
### 2 LIFT FORCES AND MOMENTS
# Lift forces calculated according to Oossanen, 1993, page 27
# A) Keel lift force [N]
angle_keel = np.arctan2(np.cos(heel)*np.sin(leeway), np.cos(leeway))
# Keel lift force influencing factors
keel_linear_factor = 2*pi*visc_keel*aspect_ratio_keel/(2*visc_keel + np.cos(sweep_keel)*(4 + aspect_ratio_keel**2/(np.cos(sweep_keel))**4)**0.5)
keel_quad_factor = crossflow_coeff_keel/aspect_ratio_keel
keel_tip_factor = 1 - 0.135/aspect_ratio_keel**(2/3) # faired tip
#keel_tip_factor = 1 # squared tip
keel_cb_factor = 1
keel_bulb_factor = (1 + 0.4*diameter_bulb/span_keel)**2
keel_factor_total = keel_linear_factor*keel_tip_factor*keel_cb_factor*keel_bulb_factor + keel_quad_factor*abs(angle_keel)
# Keel lift force
lift_keel = - 0.5*density_water*(velocity_boat**2)*angle_keel*lat_surface_keel*keel_factor_total
# B) Bulb lift force [N]
if chord_bulb_keel > 0 and surface_area_bulb > 0:
side_force_bulb = - 0.5*density_water*(velocity_boat**2)*(pi*diameter_bulb**2/2 + 1.8*surface_area_bulb*abs(angle_keel))*angle_keel
else:
side_force_bulb = 0
# C) Rudder lift force [N]
# Free-surface influence factor of the rudder
fsr = 1 - 4*height_surface_rudder/(avg_chord_rudder)
if fsr > 0:
if Fn_rudder <= 0.5:
factor_aspect_ratio_rudder = 2
elif Fn_rudder > 0.5 and Fn_rudder < 0.6413:
factor_aspect_ratio_rudder = 4.246*Fn_rudder - 0.1230
else:
factor_aspect_ratio_rudder = 1 + 0.422/(Fn_rudder)**3
aspect_ratio_rudder = fsr*factor_aspect_ratio_rudder*(span_rudder**2/lat_surface_rudder)
else:
aspect_ratio_rudder = 2*(span_rudder**2/lat_surface_rudder)
# Taylor wake fraction [-]
w = 0.10 + 4.5*(tc/(height_surface_rudder + span_rudder))*cb*cp*bwl/(lwl*(7*cwp - 6*cb)*(2.8 - 1.8*cp))
# Induced flow angle at the keel [rad]
angle_induced_keel = 1.5*(lead_rudder/(3*avg_chord_keel))**0.25*keel_factor_total*angle_keel/(pi*aspect_ratio_keel)
# Induced flow angle at the rudder due to the downwash of the keel [rad]
angle_rudder_attack = - angle_induced_keel + np.arctan2(-np.cos(leeway)*np.sin(angle_rudder) + np.cos(heel)*np.sin(leeway)*np.cos(angle_rudder), np.cos(leeway)*np.cos(angle_rudder) + np.cos(heel)*np.sin(leeway)*np.sin(angle_rudder))
angle_rudder_delta0 = angle_keel - angle_induced_keel
# Rudder lift force influencing factors
rudder_linear_factor = 2*pi*visc_rudder*aspect_ratio_rudder/(2*visc_rudder + np.cos(sweep_rudder)*(4 + aspect_ratio_rudder**2/(np.cos(sweep_rudder))**4)**0.5)
rudder_quad_factor = crossflow_coeff_rudder/aspect_ratio_rudder
rudder_tip_factor = 1 - 0.135/aspect_ratio_rudder**(2/3)
rudder_factor_total = rudder_linear_factor*rudder_tip_factor + rudder_quad_factor*abs(angle_rudder_attack)
rudder_factor_delta0_total = rudder_linear_factor*rudder_tip_factor + rudder_quad_factor*abs(angle_rudder_delta0)
# Rudder lift force
lift_rudder = - 0.5*density_water*((1 - w)*velocity_boat)**2*lat_surface_rudder*rudder_factor_total*angle_rudder_attack
lift_rudder_delta0 = - 0.5 * density_water * ((1 - w) * velocity_boat)**2 * lat_surface_rudder * rudder_factor_delta0_total * angle_rudder_delta0
# D) Canoe body side force [N]
side_force_cb = - 0.5*density_water*(velocity_boat**2)*(0.5*pi*tc**2 + 1.8*lat_surface_cb*abs(angle_keel))*angle_keel
# E) GZ estimation (Oossanen, 2003)
# coefficients b0 and b1 built as fitting polynomials from the plot provided in the paper above
b0 = 8*10**(-10)*np.degrees(heel)**4 - 3*10**(-7)*np.degrees(heel)**3 + 4*10**(-5)*np.degrees(heel)**2 - 0.002*np.degrees(heel) + 0.0754
b1 = 5*10**(-12)*np.degrees(heel)**5 - 4*10**(-9)*np.degrees(heel)**4 + 9*10**(-7)*np.degrees(heel)**3 + 9*10**(-5)*np.degrees(heel)**2 + 0.0038*np.degrees(heel) + 0.0153
c1 = b0 + b1*(boa/bwl)**2*(tc/bwl)/cb
BM = c1*bwl**2/tc
KB = tc*(5/6-cb/(3*cwp)) # Wilson, 2018
GZ = (KB + BM - KG)*np.sin(heel)
# F) Righting moment
# Tranversal righting moment [N*m]
M_hull_trans = GZ*disp*density_water*gravity
# Righting moment of crew sitting on weather rail [N*m]
factor_esc = 1
if abs(angle_tw[u]) <= (pi/3):
arm_crew = factor_esc*(0.475*boa - 0.305)
elif abs(angle_tw[u]) < (2*pi/3):
arm_crew = factor_esc*(0.475*boa - 0.305)*np.cos(abs(angle_tw[u])*3/2 - pi/2)
else:
arm_crew = 0
M_crew = np.sign(M_hull_trans)*abs(mass_crew*gravity*arm_crew*np.cos(heel))
# Total righting moment [N*m]
M_righting = M_hull_trans + M_crew
# G) Munk moment [N*m]
# Slender bodies in near-axial flow experience a destabilising moment
M_munk = - 0.9*disp*density_water*leeway*velocity_boat**2
# H) Centre of Effort (CE)
# Rudder hydrodynamic CE [m]
CE_rudder_x = -lwl/2 - root_chord_rudder/4 - np.tan(sweep_rudder)*span_rudder/3*(1 + 2*taper_ratio_rudder)/(1 + taper_ratio_rudder)
CE_rudder_z = height_surface_rudder + span_rudder/3*(1 + 2*taper_ratio_rudder)/(1 + taper_ratio_rudder)
# Global hydrodynamic CE [m]
CE_hydro_x = CE_rudder_x + lead_rudder
CE_hydro_z = 0.45*(span_keel + tc)
### 3 RESISTANCE CALCULATION
### 3.1 Viscous Resistance
# A) Parameters
# Reynolds number [-]
reynolds_cb = (velocity_boat*0.7*lwl)/viscosity_water
reynolds_keel = (velocity_boat*avg_chord_keel)/viscosity_water
reynolds_rudder = (velocity_boat*avg_chord_rudder)/viscosity_water
reynolds_bulb = (velocity_boat*chord_bulb_keel)/viscosity_water
# Friction coefficient [-]
friction_coeff_cb = (0.075/((np.log(reynolds_cb)/np.log(10)) - 2)**2) - (1800/reynolds_cb)
friction_coeff_keel = (0.075/((np.log(reynolds_keel)/np.log(10)) - 2)**2) - (1800/reynolds_keel)
friction_coeff_rudder = (0.075/((np.log(reynolds_rudder)/np.log(10)) - 2)**2) - (1800/reynolds_rudder)
if chord_bulb_keel > 0:
friction_coeff_bulb = (0.075/((np.log(reynolds_bulb)/np.log(10)) - 2)**2) - (1800/reynolds_bulb)
else:
friction_coeff_bulb = 0
# B) Canoe body resistance [N]
Rv_cb = 0.5*density_water*velocity_boat**2*friction_coeff_cb*(1 + form_coeff_cb)*surface_area_cb
# C) Keel and bulb resistance [N]
Rv_keel = 0.5*density_water*velocity_boat**2*friction_coeff_keel*(1 + form_coeff_keel)*surface_area_keel
Rv_bulb = 0.5*density_water*velocity_boat**2*friction_coeff_bulb*surface_area_bulb
# D) Rudder resistance [N]
Rv_rudder = 0.5*density_water*velocity_boat**2*friction_coeff_rudder*(1 + form_coeff_rudder)*surface_area_rudder
# E) Total viscous resistance [N]
R_viscous = Rv_cb + Rv_keel + Rv_rudder + Rv_bulb
### 3.2 Residual resistance
# A) Canoe Body resistance [N]
vector_res_cb = np.zeros(9)
if Fn > 0.6:
Fn_temp = 0.6
elif Fn < 0.1:
Fn_temp = 0.1
else:
Fn_temp = Fn
for k in range (1, 10, 1):
if float(coefficient_residual_hull[k][0]) <= Fn_temp and Fn_temp <= float(coefficient_residual_hull[k + 1][0]):
CC = [float(coefficient_residual_hull[k][0]), float(coefficient_residual_hull[k + 1][0])]
for j in range (1, 10, 1):
DD = [float(coefficient_residual_hull[k][j]), float(coefficient_residual_hull[k + 1][j])]
vector_res_cb[j - 1] = interpolate.interp1d(CC, DD)(Fn_temp)
if Fn_temp < 0.1:
Rr_cb = 0
else:
Rr_cb = disp*density_water*gravity*(vector_res_cb[0] + (vector_res_cb[1]*LCBfpp/lwl + vector_res_cb[2]*cp + vector_res_cb[3]*disp**(2/3)/awp + \
vector_res_cb[4]*bwl/lwl)*disp**(1/3)/lwl + (vector_res_cb[5]*disp**(2/3)/surface_area_cb + vector_res_cb[6]*LCBfpp/LCFfpp + \
vector_res_cb[7]*(LCBfpp/lwl)**2 + vector_res_cb[8]*cp**2)*disp**(1/3)/lwl)
# B) Keel residual resitance [N]
vector_res_keel = np.zeros(4)
if Fn > 0.6:
Fn3 = 0.6
elif Fn < 0.2:
Fn3 = 0.2
else:
Fn3 = Fn
for k in range (1, 9, 1):
if float(coefficient_residual_keel[k][0]) <= Fn3 and Fn3 <= float(coefficient_residual_keel[k + 1][0]):
GG = [float(coefficient_residual_keel[k][0]), float(coefficient_residual_keel[k + 1][0])]
for j in range (1, 5, 1):
HH = [float(coefficient_residual_keel[k][j]), float(coefficient_residual_keel[k + 1][j])]
vector_res_keel[j - 1] = interpolate.interp1d(GG, HH)(Fn3)
if Fn3 < 0.2:
Rr_keel = 0
else:
Rr_keel = disp_keel*density_water*gravity*(vector_res_keel[0] + vector_res_keel[1]*((tc + span_keel)/bwl) + \
vector_res_keel[2]*((tc + kb_keel)**3/disp_keel) + vector_res_keel[3]*(disp/disp_keel))
# C) Total residual resistance
R_residual = Rr_cb + Rr_keel
### 3.3 Induced resistance
# Fator de correcao para folios de carregamento nao-eliptico
tr_cb = 0.3 # razao de afilamento do casco
sig_keel = aspect_ratio_keel*(0.012 - 0.057*taper_ratio_keel + 0.095*taper_ratio_keel**2 - 0.04*taper_ratio_keel**3)
sig_rudder = aspect_ratio_rudder*(0.012 - 0.057*taper_ratio_rudder + 0.095*taper_ratio_rudder**2 - 0.04*taper_ratio_rudder**3)
sig_cb = ratio_cb*(0.012 - 0.057*tr_cb + 0.095*tr_cb**2 - 0.04*tr_cb**3)
# A) Canoe body induced resistance [N]
Ri_cb = (side_force_cb/np.cos(heel))**2*(1 + sig_cb)/(0.5*density_water*velocity_boat**2*lat_surface_cb*pi*ratio_cb)
# B) Keel induced resistance [N]
Ri_keel = ((lift_keel**2)*(1 + sig_keel))/(0.5*density_water*(velocity_boat**2)*lat_surface_keel*pi*aspect_ratio_keel)
# C) Rudder induced resistance [N]
Ri_rudder = ((lift_rudder**2)*(1 + sig_rudder))/(0.5*density_water*(velocity_boat**2)*lat_surface_rudder*pi*aspect_ratio_rudder)
# D) Total induced resistance [N]
R_induced = Ri_cb + Ri_keel + Ri_rudder
### 3.4 Resitance increase due to heel
# A) Canoe body viscous resitance [N]
vector_heel_visc = np.zeros(4)
if (abs(heel)) > 0.35:
heel_temp = 0.35
else:
heel_temp = abs(heel)
heel_temp = np.degrees(heel_temp)
for k in range (1, 7, 1):
if float(coefficient_viscous_heel[k][0]) <= heel_temp and heel_temp <= float(coefficient_viscous_heel[k + 1][0]):
AA = [float(coefficient_viscous_heel[k][0]), float(coefficient_viscous_heel[k + 1][0])]
for j in range (1, 5, 1):
BB = [float(coefficient_viscous_heel[k][j]), float(coefficient_viscous_heel[k + 1][j])]
vector_heel_visc[j - 1] = interpolate.interp1d(AA, BB)(heel_temp)
if abs(heel) < np.radians(5):
surface_area_cb_incl = surface_area_cb
else:
surface_area_cb_incl = surface_area_cb*(1 + 1/100*(vector_heel_visc[0] + vector_heel_visc[1]*bwl/tc + vector_heel_visc[2]*(bwl/tc)**2 + vector_heel_visc[3]*cm))
Rv_heel_cb = 0.5*density_water*velocity_boat**2*friction_coeff_cb*surface_area_cb_incl - Rv_cb
# B) Canoe body residual resistance increase [N]
vector_heel_cb = np.zeros(6)
if Fn > 0.55:
Fn_temp = 0.55
elif Fn < 0.25:
Fn_temp = 0.25
else:
Fn_temp = Fn
for k in range (1, 7 , 1):
if float(coefficient_residual_heel[k][0]) <= Fn_temp and Fn_temp <= float(coefficient_residual_heel[k + 1][0]):
EE = [float(coefficient_residual_heel[k][0]), float(coefficient_residual_heel[k + 1][0])]
for j in range(1, 7, 1):
FF = [float(coefficient_residual_heel[k][j]), float(coefficient_residual_heel[k + 1][j])]
vector_heel_cb[j - 1] = interpolate.interp1d(EE, FF)(Fn_temp)
vector_heel_cb[:] = [x / 1000 for x in vector_heel_cb]
# Extrapolating resistance from 20 degrees to the real heel angle
if Fn_temp < 0.25:
Rincli20 = 0
else:
Rincli20 = disp*density_water*gravity*(vector_heel_cb[0] + vector_heel_cb[1]*lwl/bwl + vector_heel_cb[2]*bwl/tc + \
vector_heel_cb[3]*(bwl/tc)**2 + vector_heel_cb[4]*lcb + vector_heel_cb[5]*lcb**2)
Rr_heel_cb = Rincli20*6*abs(heel)**1.7
# C) Keel residual resistance increase [N]
CH = -3.5837*(tc/(tc + span_keel)) - 0.0518*(bwl/tc) + 0.5958*(bwl/(tc+span_keel)) + 0.2055*(lwl/disp**(1/3))
Rr_heel_keel = density_water*gravity*disp_keel*Fn**2*abs(heel)*CH
# D) Total heel resistance [N]
R_heel = Rv_heel_cb + Rr_heel_cb + Rr_heel_keel
### 3.5 Added resistance in waves (Keuning et al, 2006)
vector_addwave = np.zeros(10)
if Fn > 0.45:
Fn_temp = 0.45
if Fn < 0.20:
Fn_temp = 0.20
else:
Fn_temp = Fn
for k in range(1, 6, 1):
if float(coefficient_waves[k][0]) <= Fn_temp and Fn_temp <= float(coefficient_waves[k + 1][0]):
GG = [float(coefficient_waves[k][0]), float(coefficient_waves[k + 1][0])]
for j in range(1, 11, 1):
HH = [float(coefficient_waves[k][j]), float(coefficient_waves[k + 1][j])]
vector_addwave[j - 1] = interpolate.interp1d(GG, HH)(Fn_temp)
# Added resistance in waves for Froude > 0.25
if Fn_temp < 0.25:
R_addwaves = 0
else:
R_addwaves = vector_addwave[0]+vector_addwave[1]*(lwl/disp**(1/3)) + vector_addwave[2]*(lwl/disp**(1/3))**2 + \
vector_addwave[3]*(lwl/disp**(1/3))**3 + vector_addwave[4]*(lwl/bwl) + vector_addwave[5]*(lwl/bwl)**2 + \
vector_addwave[6]*(bwl/tc) + vector_addwave[7]*cp + vector_addwave[8]*cp**2 + vector_addwave[9]*cp**3
wave_amplitude = 0.3
R_addwaves = R_addwaves*gravity*density_water*lwl*wave_amplitude**2
### 3.6 Total resistance [N]
R_total = abs(R_viscous) + abs(R_residual) + abs(R_induced) + abs(R_heel) + abs(R_addwaves)
### 4 AERODYNAMIC MODELING
### 4.1 Sail area and centre of effort
# A) Main sail (m)
Am = 0.5*height_mainsail*base_mainsail
CEm = (0.39*height_mainsail) + boom_heigth_deck
# B) Jib (j) and Foretriangle (f)
if sailset == 'main+genoa' or sailset == 'main+genoa+spinnaker':
Aj = perpendicular_jib*(height_foretriangle**2 + base_foretriangle**2)**0.5/2
CEj = 0.39*height_foretriangle
Af = 0.5*height_foretriangle*base_foretriangle
else:
Aj = 0
CEj = 0
Af = 0
# C) Spinnaker (s)
if (sailset == 'main+spinnaker' and angle_tw[u] > (2*pi/3)) or (sailset == 'main+genoa+spinnaker' and angle_tw[u] > (2*pi/3)):
As = 1.15*length_spinnaker*base_foretriangle
CEs = 0.59*height_foretriangle
else:
As = 0
CEs = 0
# D) Mizzen (mz)
Amz = 0.5*height_mizzen*base_mizzen
Cbase_mizzen = 0.39*height_mizzen + boom_height_mizzen
# E) Nominal area
An = Af + Am/1.16
# F) Centre of effort above deck line
CE_sail = (CEm*Am + CEj*Aj + CEs*As + Cbase_mizzen*Amz)/An
### 4.2 Lift and drag coefficients for each sail
angle_sail = np.arctan2(np.cos(heel)*np.sin(angle_aw), np.cos(angle_aw))
# A) Main sail (full)
# coefficients of clm (y) and cdm (z)
x = [0.0, 0.12211111, 0.157, 0.20933333, 1.04666667, 1.57, 2.09333333, 2.61666667, 3.14]
y = [0, 1.15, 1.4,1.55, 1.44, 0.96, 0.58, 0.25, -0.1]
z = [0.027, 0.027, 0.027, 0.027, 0.103, 0.275, 0.671, 1.11, 1.2]
Clm = np.interp(angle_sail, x, y)
Cdm = np.interp(angle_sail, x, z)
# B) Jib
# coefficients of clj (y) and cdj (z)
x = [0.12211111, 0.26166667, 0.34888889, 0.471, 0.87222222, 1.04666667, 1.74444444, 2.61666667, 3.14]
y = [0.0, 1.0, 1.375, 1.45, 1.43, 1.25, 0.4, 0.0, -0.1]
z = [0.05, 0.023, 0.031, 0.037, 0.25, 0.35, 0.73, 0.95, 0.9]
Clj = np.interp(angle_sail, x, y)
Cdj = np.interp(angle_sail, x, z)
# C) Spinnaker
# coefficients of cls (y) and cds (z)
x = [0.48844444, 0.72, 0.8, 1.05, 1.31, 1.74, 2.27, 2.62, 3.14]
y = [0.0, 1.31, 1.56, 1.71, 1.69, 1.4, 0.83, 0.5, 0.0]
z = [0.1, 0.15, 0.2, 0.4, 0.7, 1.0, 1.1, 1.1, 1.1]
Cls = np.interp(angle_sail, x, y)
Cds = np.interp(angle_sail, x, z)
# Coefficient drag separacao
KPm = 0.016
KPj = 0.016
KPs = 0.019
# D) Mast drag coefficient
coeff_drag_mast = 1.13*((boa*free_board) + (height_mast*diameter_mast))/An
# E) Lift and drag for all sails combined
# Sail aspect ratio
if (angle_tw[u]) < (pi/3):
aspect_ratio_sail = (1.1*(height_mast + free_board))**2/An
else:
aspect_ratio_sail = (1.1*height_mast)**2/An
if sailset == 'main' or sailset == 'main+genoa' or (sailset == 'main+spinnaker' and angle_tw[u] < (2*pi/3)) or (sailset == 'main+genoa+spinnaker' and angle_tw[u] < (2*pi/3)):
coeff_lift = (Clm*Am + Clj*Aj)/An
coeff_drag_par = (Cdm*Am + Cdj*Aj)/An
coeff_drag_ind = (Clm**2*Am + Clj**2*Aj)/(An*pi*aspect_ratio_sail)
coeff_drag_sep = (Clm**2*Am*KPm + Clj**2*Aj*KPj)/An
coeff_drag_global = coeff_drag_par + coeff_drag_ind + coeff_drag_sep + coeff_drag_mast
elif (sailset == 'main+spinnaker' or sailset == 'main+genoa+spinnaker') and angle_tw > (2*pi/3):
coeff_lift = (Clm*Am + Cls*As)/An
coeff_drag_par = (Cdm*Am + Cds*As)/An
coeff_drag_ind = (Clm**2*Am + Cls**2*As)/(An*pi*aspect_ratio_sail)
coeff_drag_sep = (Clm**2*Am*KPm + Cls**2*As*KPs)/An
coeff_drag_global = coeff_drag_par + coeff_drag_ind + coeff_drag_sep + coeff_drag_mast
### 4.3 Lift and draf forces and centre of efforts
# Forces
lift_force_sail = 0.5*density_air*velocity_aw**2*(1 - np.sin(heel)**2*np.sin(angle_aw)**2)*coeff_lift*An*np.sign(angle_sail)
drag_force_sail = - 0.5*density_air*velocity_aw**2*(1 - np.sin(heel)**2*np.sin(angle_aw)**2)*coeff_drag_global*An
# Centre of effort
CE_aero_x = CE_hydro_x + lead_sail
CE_aero_z = - CE_sail - free_board
heel = abs(heel)
### 5 FORCES AND MOMENTS IN GLOBAL COORDINATES [X, Y, Z]
### 5.1 Coordinates matrix
# A) Leeway
Mrot_leeway = np.matrix([
[np.cos(leeway), -np.sin(leeway), 0],
[np.sin(leeway), np.cos(leeway), 0],
[0, 0, 1]
])
# B) Heel angle
Mrot_heel = np.matrix([
[1, 0, 0],
[0, np.cos(heel), - np.sin(heel)],
[0, np.sin(heel), np.cos(heel)]
])
# C) Keel angle attack due to heel
Mrot_keel_heel = np.matrix([
[np.cos(angle_keel), -np.sin(angle_keel), 0],
[np.sin(angle_keel), np.cos(angle_keel), 0],
[0, 0, 1]
])
# D) Rudder angle due to heel
Mrot_rudder_heel = np.matrix([
[np.cos(angle_rudder), -np.sin(angle_rudder), 0],
[np.sin(angle_rudder), np.cos(angle_rudder), 0],
[0, 0, 1]
])
# E) Rudder angle attack due to rudder angle
Mrot_rudder_attack = np.matrix([
[np.cos(angle_rudder_attack), -np.sin(angle_rudder_attack), 0],
[np.sin(angle_rudder_attack), np.cos(angle_rudder_attack), 0],
[0, 0, 1]
])
# F) Rudder angle attack due to rudder with no lift
Mrot_rudder_delta0 = np.matrix([
[np.cos(angle_rudder_delta0), -np.sin(angle_rudder_delta0), 0],
[np.sin(angle_rudder_delta0), np.cos(angle_rudder_delta0), 0],
[0, 0, 1]
])
# G) Sail angle attack due to heel
Mrot_sail_heel = np.matrix([
[
|
np.cos(angle_sail)
|
numpy.cos
|
#Author: <NAME>
import numpy as np
import matplotlib.pyplot as plt
#perform experiments
def main():
training_data = read_training_data("train-images-idx3-ubyte")
training_data = np.divide(training_data, 255)
training_label = read_training_label("train-labels-idx1-ubyte")
test_data = read_test_data("t10k-images-idx3-ubyte")
test_data = np.divide(test_data, 255)
test_label = read_test_label("t10k-labels-idx1-ubyte")
experiment1(training_data, training_label, test_data, test_label)
experiment2(training_data, training_label, test_data, test_label)
experiment3(training_data, training_label, test_data, test_label)
def experiment1(training_data, training_label, test_data, test_label):
hidden_units = 20
input_weights = np.random.uniform(low=-.05, high=.05, size=(hidden_units,784))
input_bias = np.random.uniform(low=-.05, high=.05, size=(hidden_units))
hidden_weights = np.random.uniform(low=-.05, high=.05, size=(10,hidden_units))
hidden_bias = np.random.uniform(low=-.05, high=.05, size=(10))
momentum = 0.9
learning_rate = 0.1
x = np.linspace(0,50,51)
training_20_nodes = np.zeros(51)
training_50_nodes = np.zeros(51)
training_100_nodes = np.zeros(51)
test_20_nodes = np.zeros(51)
test_50_nodes = np.zeros(51)
test_100_nodes = np.zeros(51)
#train model with 20 nodes
for i in range(51):
permutation = np.random.permutation(training_data.shape[0])
training_data = training_data[permutation]
training_label = training_label[permutation]
training_20_nodes[i] = network_accuracy(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, training_data, training_label)
test_20_nodes[i] = network_accuracy(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, test_data, test_label)
if (i == 50):
break
train_network(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, learning_rate, momentum, training_data, training_label)
matrix_1 = confusion_matrix(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, test_data, test_label)
hidden_units = 50
input_weights = np.random.uniform(low=-.05, high=.05, size=(hidden_units,784))
input_bias = np.random.uniform(low=-.05, high=.05, size=(hidden_units))
hidden_weights = np.random.uniform(low=-.05, high=.05, size=(10,hidden_units))
hidden_bias = np.random.uniform(low=-.05, high=.05, size=(10))
#train model with 50 nodes
for i in range(51):
permutation = np.random.permutation(training_data.shape[0])
training_data = training_data[permutation]
training_label = training_label[permutation]
training_50_nodes[i] = network_accuracy(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, training_data, training_label)
test_50_nodes[i] = network_accuracy(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, test_data, test_label)
if (i == 50):
break
train_network(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, learning_rate, momentum, training_data, training_label)
matrix_2 = confusion_matrix(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, test_data, test_label)
hidden_units = 100
input_weights = np.random.uniform(low=-.05, high=.05, size=(hidden_units,784))
input_bias = np.random.uniform(low=-.05, high=.05, size=(hidden_units))
hidden_weights = np.random.uniform(low=-.05, high=.05, size=(10,hidden_units))
hidden_bias = np.random.uniform(low=-.05, high=.05, size=(10))
#train model with 100 nodes
for i in range(51):
permutation = np.random.permutation(training_data.shape[0])
training_data = training_data[permutation]
training_label = training_label[permutation]
training_100_nodes[i] = network_accuracy(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, training_data, training_label)
test_100_nodes[i] = network_accuracy(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, test_data, test_label)
if (i == 50):
break
train_network(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, learning_rate, momentum, training_data, training_label)
matrix_3 = confusion_matrix(input_weights, input_bias, hidden_weights, hidden_bias, hidden_units, test_data, test_label)
#print confusion matricies
print("20 nodes:")
print(matrix_1)
print("50 nodes:")
print(matrix_2)
print("100 nodes:")
print(matrix_3)
#print graph
plt.figure()
plt.axis([0,50,0,1])
plt.plot(x, training_20_nodes, 'r', label='20 nodes')
plt.plot(x,test_20_nodes, 'r--')
plt.plot(x, training_50_nodes, 'g', label='50 nodes')
plt.plot(x,test_50_nodes, 'g--')
plt.plot(x,training_100_nodes, 'b', label='100 nodes')
plt.plot(x, test_100_nodes, 'b--')
plt.legend()
plt.show()
def experiment2(training_data, training_label, test_data, test_label):
hidden_units = 100
input_weights = np.random.uniform(low=-.05, high=.05, size=(hidden_units,784))
input_bias = np.random.uniform(low=-.05, high=.05, size=(hidden_units))
hidden_weights = np.random.uniform(low=-.05, high=.05, size=(10,hidden_units))
hidden_bias = np.random.uniform(low=-.05, high=.05, size=(10))
momentum = 0
learning_rate = 0.1
x = np.linspace(0,50,51)
training_0_momentum = np.zeros(51)
training_25_momentum = np.zeros(51)
training_50_momentum = np.zeros(51)
test_0_momentum = np.zeros(51)
test_25_momentum = np.zeros(51)
test_50_momentum = np.zeros(51)
#train model with 0 momentum
for i in range(51):
permutation =
|
np.random.permutation(training_data.shape[0])
|
numpy.random.permutation
|
import unittest
import numpy as np
from io import BytesIO
import h5py
from exetera.core import session
from exetera.core import fields
from exetera.core import persistence as per
from exetera.core import operations as ops
from exetera.core import utils
class TestOpsUtils(unittest.TestCase):
def test_chunks(self):
lc_it = iter(ops.chunks(54321, 10000))
self.assertTupleEqual(next(lc_it), (0, 10000))
self.assertTupleEqual(next(lc_it), (10000, 20000))
self.assertTupleEqual(next(lc_it), (20000, 30000))
self.assertTupleEqual(next(lc_it), (30000, 40000))
self.assertTupleEqual(next(lc_it), (40000, 50000))
self.assertTupleEqual(next(lc_it), (50000, 54321))
with self.assertRaises(StopIteration):
next(lc_it)
actual = list(ops.chunks(54321, 10000))
self.assertListEqual(actual,
[(0, 10000), (10000, 20000), (20000, 30000), (30000, 40000),
(40000, 50000), (50000, 54321)])
def test_count_back(self):
self.assertEqual(ops.count_back(np.asarray([1, 2, 3, 4, 5], dtype=np.int32)), 4)
self.assertEqual(ops.count_back(np.asarray([1, 2, 3, 4, 4], dtype=np.int32)), 3)
self.assertEqual(ops.count_back(np.asarray([1, 2, 3, 3, 3], dtype=np.int32)), 2)
self.assertEqual(ops.count_back(np.asarray([1, 2, 2, 2, 2], dtype=np.int32)), 1)
self.assertEqual(ops.count_back(np.asarray([1, 1, 1, 1, 1], dtype=np.int32)), 0)
def test_next_chunk(self):
self.assertTupleEqual(ops.next_chunk(0, 4, 3), (0, 3))
self.assertTupleEqual(ops.next_chunk(0, 4, 4), (0, 4))
self.assertTupleEqual(ops.next_chunk(0, 4, 5), (0, 4))
self.assertTupleEqual(ops.next_chunk(4, 8, 3), (4, 7))
self.assertTupleEqual(ops.next_chunk(4, 8, 4), (4, 8))
self.assertTupleEqual(ops.next_chunk(4, 8, 5), (4, 8))
def test_calculate_chunk_decomposition(self):
def _impl(indices, chunk_size, expected):
actual = list()
ops.calculate_chunk_decomposition(0, len(indices)-1, indices, chunk_size, actual)
self.assertListEqual(actual, expected)
indices = [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
_impl(indices, 5000, [(0, 8)])
_impl(indices, 4000, [(0, 8)])
_impl(indices, 3999, [(0, 4), (4, 8)])
_impl(indices, 2000, [(0, 4), (4, 8)])
_impl(indices, 1999, [(0, 2), (2, 4), (4, 6), (6, 8)])
_impl(indices, 1000, [(0, 2), (2, 4), (4, 6), (6, 8)])
_impl(indices, 999, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8)])
_impl(indices, 500, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8)])
_impl(indices, 499, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8)])
indices = [0, 1000, 2000, 2500, 3000, 3500, 4000]
_impl(indices, 5000, [(0, 6)])
_impl(indices, 4000, [(0, 6)])
_impl(indices, 3999, [(0, 3), (3, 6)])
_impl(indices, 2000, [(0, 1), (1, 3), (3, 6)])
_impl(indices, 1999, [(0, 1), (1, 3), (3, 6)])
_impl(indices, 1000, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 6)])
_impl(indices, 999, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)])
_impl(indices, 500, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)])
_impl(indices, 499, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)])
indices = [0, 0, 0, 0, 1000, 1000, 1000]
_impl(indices, 1000, [(0, 6)])
_impl(indices, 999, [(0, 3), (3, 4), (4, 6)])
def test_get_valid_value_extents(self):
chunk = np.asarray([-1, -1, -1, -1, -1], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [-1, -1])
chunk = np.asarray([-1], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [-1, -1])
chunk = np.asarray([3], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [3, 3])
chunk = np.asarray([-1, -1, 3], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [3, 3])
chunk = np.asarray([3, -1, -1], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [3, 3])
chunk = np.asarray([3, -1, -1, 3], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [3, 3])
chunk = np.asarray([-1, 2, 3, -1, 4, 5, -1], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [2, 5])
chunk = np.asarray([2, 3, -1, 4, 5], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [2, 5])
chunk = np.asarray([2, 3, 4, 5], dtype=np.int32)
first, last = ops.get_valid_value_extents(chunk, 0, len(chunk), -1)
self.assertListEqual([first, last], [2, 5])
def test_next_map_subchunk(self):
map_values = np.asarray([-1, -1, -1, -1, -1, -1])
result = ops.next_map_subchunk(map_values, 0, -1, 4)
self.assertEqual(result, 6)
map_values = np.asarray([-1, 1, 10, 10, 11])
result = ops.next_map_subchunk(map_values, 0, -1, 4)
self.assertEqual(result, 2)
result = ops.next_map_subchunk(map_values, result, -1, 4)
self.assertEqual(result, 5)
map_values = np.asarray([0, 10, 10])
result = ops.next_map_subchunk(map_values, 0, -1, 4)
self.assertEqual(result, 1)
result = ops.next_map_subchunk(map_values, result, -1, 4)
self.assertEqual(result, 3)
map_values = np.asarray([0, 0, 10])
result = ops.next_map_subchunk(map_values, 0, -1, 4)
self.assertEqual(result, 2)
result = ops.next_map_subchunk(map_values, result, -1, 4)
self.assertEqual(result, 3)
map_values = np.asarray([0, 0, 0])
result = ops.next_map_subchunk(map_values, 0, -1, 4)
self.assertEqual(result, 3)
result = ops.next_map_subchunk(map_values, result, -1, 4)
self.assertEqual(result, 3)
map_values = np.asarray([1, 2, 3, 4])
result = ops.next_map_subchunk(map_values, 0, -1, 4)
self.assertEqual(result, 4)
class TestSafeMap(unittest.TestCase):
def _impl_safe_map_index_values(self, indices, values, map_indices,
expected_indices, expected_values, empty_value):
map_filter = map_indices != ops.INVALID_INDEX
actual_indices, actual_values =\
ops.safe_map_indexed_values(indices, values, map_indices, map_filter, empty_value)
self.assertTrue(np.array_equal(actual_indices, expected_indices))
self.assertTrue(np.array_equal(actual_values, expected_values))
def test_safe_map_index_values(self):
self._impl_safe_map_index_values(
np.asarray([0, 1, 3, 6, 10, 15, 15, 20, 24, 27, 29, 30], dtype=np.int32),
np.frombuffer(b'abbcccddddeeeeeggggghhhhiiijjk', dtype='S1'),
np.asarray([0, 4, 10, ops.INVALID_INDEX, 8, 2, 1, ops.INVALID_INDEX, 6, 5, 9]),
np.asarray([0, 1, 6, 7, 8, 11, 14, 16, 17, 22, 22, 24]),
np.frombuffer(b'aeeeeekxiiicccbbxgggggjj', dtype='S1'), b'x')
def test_safe_map_index_values_zero_empty(self):
self._impl_safe_map_index_values(
np.asarray([0, 1, 3, 6, 10, 15, 15, 20, 24, 27, 29, 30], dtype=np.int32),
np.frombuffer(b'abbcccddddeeeeeggggghhhhiiijjk', dtype='S1'),
np.asarray([0, 4, 10, ops.INVALID_INDEX, 8, 2, 1, ops.INVALID_INDEX, 6, 5, 9]),
np.asarray([0, 1, 6, 7, 7, 10, 13, 15, 15, 20, 20, 22]),
np.frombuffer(b'aeeeeekiiicccbbgggggjj', dtype='S1'), b'')
def _impl_safe_map_values(self, values, map_indices, expected_values, empty_value):
map_filter = map_indices != ops.INVALID_INDEX
actual_values = ops.safe_map_values(values, map_indices, map_filter, empty_value)
self.assertTrue(np.array_equal(actual_values, expected_values))
def test_safe_map_values(self):
self._impl_safe_map_values(
np.asarray([1, 3, 6, 10, 15, 21, 28, 36, 45, 55]),
np.asarray([1, 8, 2, 7, ops.INVALID_INDEX, 0, 9, 1, 8]),
np.asarray([3, 45, 6, 36, -1, 1, 55, 3, 45]), -1)
def test_safe_map_values(self):
self._impl_safe_map_values(
np.asarray([1, 3, 6, 10, 15, 21, 28, 36, 45, 55]),
np.asarray([1, 8, 2, 7, ops.INVALID_INDEX, 0, 9, 1, 8]),
np.asarray([3, 45, 6, 36, 0, 1, 55, 3, 45]), 0)
class TestAggregation(unittest.TestCase):
def test_apply_spans_indexed_field(self):
indices = np.asarray([0, 2, 4, 7, 10, 12, 14, 16, 18, 20, 22, 24], dtype=np.int32)
values = np.frombuffer(b'a1a2a2ab2ab2b1c1c2d2d1e1', dtype=np.int8)
spans = np.asarray([0, 3, 6, 8, 10, 11], dtype=np.int32)
dest = np.zeros(len(spans)-1, dtype=np.int32)
ops.apply_spans_index_of_min_indexed(spans, indices, values, dest)
self.assertListEqual(dest.tolist(), [0, 5, 6, 9, 10])
ops.apply_spans_index_of_max_indexed(spans, indices, values, dest)
self.assertListEqual(dest.tolist(), [2, 3, 7, 8, 10])
def test_non_indexed_apply_spans(self):
values = np.asarray([1, 2, 3, 3, 2, 1, 1, 2, 2, 1, 1], dtype=np.int32)
spans = np.asarray([0, 3, 6, 8, 10, 11], dtype=np.int32)
dest = np.zeros(len(spans)-1, dtype=np.int32)
ops.apply_spans_index_of_min(spans, values, dest)
self.assertTrue(np.array_equal(dest, np.asarray([0, 5, 6, 9, 10], dtype=np.int32)))
ops.apply_spans_index_of_max(spans, values, dest)
self.assertTrue(np.array_equal(dest, np.asarray([2, 3, 7, 8, 10], dtype=np.int32)))
ops.apply_spans_index_of_first(spans, dest)
self.assertTrue(np.array_equal(dest, np.asarray([0, 3, 6, 8, 10], dtype=np.int32)))
ops.apply_spans_index_of_last(spans, dest)
self.assertTrue(np.array_equal(dest, np.asarray([2, 5, 7, 9, 10], dtype=np.int32)))
def test_non_indexed_apply_spans_filter(self):
values = np.asarray([1, 2, 3, 3, 2, 1, 1, 2, 2, 1, 1], dtype=np.int32)
spans = np.asarray([0, 3, 6, 8, 10, 11], dtype=np.int32)
dest = np.zeros(len(spans)-1, dtype=np.int32)
flt = np.zeros(len(spans)-1, dtype=np.int32)
ops.apply_spans_index_of_min_filter(spans, values, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([0, 5, 6, 9, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 1, 1, 1, 1], dtype=bool)))
ops.apply_spans_index_of_max_filter(spans, values, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([2, 3, 7, 8, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 1, 1, 1, 1], dtype=bool)))
ops.apply_spans_index_of_first_filter(spans, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([0, 3, 6, 8, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 1, 1, 1, 1], dtype=bool)))
ops.apply_spans_index_of_last_filter(spans, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([2, 5, 7, 9, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 1, 1, 1, 1], dtype=bool)))
spans = np.asarray([0, 3, 3, 6, 8, 8, 10, 11], dtype=np.int32)
dest = np.zeros(len(spans)-1, dtype=np.int32)
flt = np.zeros(len(spans)-1, dtype=np.int32)
ops.apply_spans_index_of_min_filter(spans, values, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([0, 0, 5, 6, 0, 9, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 0, 1, 1, 0, 1, 1], dtype=bool)))
ops.apply_spans_index_of_max_filter(spans, values, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([2, 0, 3, 7, 0, 8, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 0, 1, 1, 0, 1, 1], dtype=bool)))
ops.apply_spans_index_of_first_filter(spans, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([0, 0, 3, 6, 0, 8, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 0, 1, 1, 0, 1, 1], dtype=bool)))
ops.apply_spans_index_of_last_filter(spans, dest, flt)
self.assertTrue(np.array_equal(dest, np.asarray([2, 0, 5, 7, 0, 9, 10], dtype=np.int32)))
self.assertTrue(np.array_equal(flt, np.asarray([1, 0, 1, 1, 0, 1, 1], dtype=bool)))
class TestOrderedMap(unittest.TestCase):
def test_ordered_map_valid_stream_old(self):
bio = BytesIO()
with session.Session() as s:
dst = s.open_dataset(bio, 'r+', 'dst')
hf = dst.create_dataframe('hf')
map_field = np.asarray([0, 0, 0, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5,
-1, -1, 7, 7, 7],
dtype=np.int64)
data_field = np.asarray([-1, -2, -3, -4, -5, -6, -8, -9], dtype=np.int32)
f_map_field = s.create_numeric(hf, "map_field", "int64")
f_map_field.data.write(map_field)
f_data_field = s.create_numeric(hf, "data_field", "int32")
f_data_field.data.write(data_field)
result_field = np.zeros(len(map_field), dtype=np.int32)
ops.ordered_map_valid_stream_old(f_data_field, f_map_field, result_field, -1, chunksize=4)
expected = np.asarray([-1, -1, -1, -2, -2, -4, -4, -4, -4, -6, -6, -6, -6, 0, 0, -9, -9, -9],
dtype=np.int32)
self.assertTrue(np.array_equal(result_field, expected))
def test_ordered_map_valid_indexed(self):
s = session.Session()
src = fields.IndexedStringMemField(s)
src.data.write(['a', 'bb', 'ccc', 'dddd', 'eeeee'])
map = fields.NumericMemField(s, 'int32')
map.data.write(np.asarray([0, 2, 2, -1, 4, 4]))
map_ = map.data[0:4]
src_indices_ = src.indices[0:5]
src_values_ = src.values[src_indices_[0]:src_indices_[-1]]
result_i = np.zeros(4, dtype=np.int32)
result_v = np.zeros(32, dtype=np.uint8)
i_off, m_off, i, m, ri, rv, r_accum = 0, 0, 0, 0, 0, 0, 0
ops.ordered_map_valid_indexed_partial(map_, 0, 4, src_indices_, 0, 4, src_values_, 0,
result_i, result_v, -1,
m, ri, rv, r_accum)
# streaming - left to right - neither unique
def test_ordered_map_right_to_left_partial(self):
i_off, j_off, i, j, r, ii, jj, iimax, jjmax, inner = 0, 0, 0, 0, 0, 0, 0, -1, -1, False
left = np.asarray([10, 20, 30, 40, 40, 50, 50], dtype=np.int32)
right = np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32)
l_results = np.zeros(8, dtype=np.int32)
r_results = np.zeros(8, dtype=np.int32)
res = ops.generate_ordered_map_to_left_partial(
left, len(left), right, len(right), l_results, r_results,
-1, i_off, j_off, i, j, r, ii, jj, iimax, jjmax, inner)
self.assertTupleEqual(res, (3, 2, 8, 1, 2, 2, 3, True))
self.assertListEqual(r_results.tolist(), [-1, 0, 1, 2, 3, 4, 2, 3])
l_results = np.zeros(8, dtype=np.int32)
r_results = np.zeros(8, dtype=np.int32)
res = ops.generate_ordered_map_to_left_partial(
left, len(left), right, len(right), l_results, r_results,
-1, 0, 0, 3, 2, 0, 1, 2, 2, 3, True)
def test_generate_ordered_map_right_to_left_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 40, 50, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6]
r_expected = [-1, 0, 1, 2, 3, 4, 2, 3, 4, -1, -1]
ops.generate_ordered_map_to_left_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_streaming_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6]
r_expected = [-1, 0, 1, 2, 3, 4, 2, 3, 4, -1, -1]
ops.generate_ordered_map_to_left_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_streaming_left_final_multi(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 40, 50, 80, 90, 100, 110, 120, 130, 140, 150],
dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
r_expected = [-1, 0, 1, 2, 3, 4, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1]
ops.generate_ordered_map_to_left_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - map right to left - left unique
# -------------------------------------------
def test_generate_ordered_map_right_to_left_left_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 20, 30, 40, 40, 40, 60, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [0, 1, 1, 2, 3, 3, 3, 4]
r_expected = [-1, 0, 1, 2, 3, 4, 5, -1]
ops.generate_ordered_map_to_left_left_unique_streamed(left, right,
l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_left_unique_streaming_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 20, 30, 40, 40, 40, 60, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [0, 1, 1, 2, 3, 3, 3, 4, 5]
r_expected = [-1, 0, 1, 2, 3, 4, 5, -1, -1]
ops.generate_ordered_map_to_left_left_unique_streamed(left, right,
l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_left_unique_streaming_left_final_multi(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80, 90, 100, 110, 120, 130, 140],
dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 20, 30, 40, 40, 40, 60, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11]
r_expected = [-1, 0, 1, 2, 3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1]
ops.generate_ordered_map_to_left_left_unique_streamed(left, right,
l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - map right to left - right unique
# --------------------------------------------
def test_generate_ordered_map_right_to_left_right_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 40, 50, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
r_result = fields.NumericMemField(None, 'int32')
r_expected = [-1, 0, 1, 2, 2, -1, -1]
ops.generate_ordered_map_to_left_right_unique_streamed(left, right,
r_result, -1, chunksize=4)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_right_unique_streaming_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 40, 50, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
r_result = fields.NumericMemField(None, 'int32')
r_expected = [-1, 0, 1, 2, 2, -1, -1, -1]
ops.generate_ordered_map_to_left_right_unique_streamed(left, right,
r_result, -1, chunksize=4)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_right_unique_streaming_left_final_multi(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 40, 50, 50, 80, 90, 100, 110, 120, 130, 140, 150],
dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
r_result = fields.NumericMemField(None, 'int32')
r_expected = [-1, 0, 1, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
ops.generate_ordered_map_to_left_right_unique_streamed(left, right,
r_result, -1, chunksize=4)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - map right to left - both unique
# -------------------------------------------
def test_generate_ordered_map_right_to_left_both_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
# l_expected = [0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6]
r_expected = [-1, 0, 1, 2, -1]
ops.generate_ordered_map_to_left_both_unique_streamed(left, right,
r_result, -1, chunksize=4)
# self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_both_unique_streaming_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
# l_expected = [0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6]
r_expected = [-1, 0, 1, 2, -1, -1]
ops.generate_ordered_map_to_left_both_unique_streamed(left, right,
r_result, -1, chunksize=4)
# self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_generate_ordered_map_right_to_left_both_unique_streaming_left_final_multi(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80, 90, 100, 110, 120, 130, 140],
dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
# l_expected = [0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6]
r_expected = [-1, 0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1]
ops.generate_ordered_map_to_left_both_unique_streamed(left, right,
r_result, -1, chunksize=4)
# self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# non-streaming - map right to left - both unique
# -----------------------------------------------
def test_ordered_map_to_right_both_unique(self):
raw_ids = [0, 1, 2, 3, 5, 6, 7, 9]
a_ids = np.asarray(raw_ids, dtype=np.int64)
b_ids = np.asarray([1, 2, 3, 4, 5, 7, 8, 9], dtype=np.int64)
results = np.zeros(len(b_ids), dtype=np.int64)
ops.generate_ordered_map_to_left_both_unique(b_ids, a_ids, results, -1)
expected = np.array([1, 2, 3, -1, 4, 6, -1, 7],
dtype=np.int64)
self.assertTrue(np.array_equal(results, expected))
def test_ordered_map_to_right_right_unique(self):
raw_ids = [0, 1, 2, 3, 5, 6, 7, 9]
a_ids = np.asarray(raw_ids, dtype=np.int64)
b_ids = np.asarray([1, 2, 3, 4, 5, 7, 8, 9], dtype=np.int64)
results = np.zeros(len(b_ids), dtype=np.int64)
ops.generate_ordered_map_to_left_right_unique(b_ids, a_ids, results, -1)
expected = np.array([1, 2, 3, -1, 4, 6, -1, 7],
dtype=np.int64)
self.assertTrue(np.array_equal(results, expected))
def test_ordered_map_to_right_right_unique_2(self):
a_ids = np.asarray([10, 20, 30, 40, 50], dtype=np.int64)
b_ids = np.asarray([20, 20, 30, 30, 60], dtype=np.int64)
results = np.zeros(len(b_ids), dtype=np.int64)
ops.generate_ordered_map_to_left_right_unique(b_ids, a_ids, results, -1)
expected = np.array([1, 1, 2, 2, -1],
dtype=np.int64)
self.assertListEqual(results.tolist(), expected.tolist())
def test_ordered_map_to_right_right_unique_3(self):
a_ids = np.asarray([10, 20, 30, 40, 60], dtype=np.int64)
b_ids = np.asarray([20, 20, 30, 30, 50], dtype=np.int64)
results = np.zeros(len(b_ids), dtype=np.int64)
ops.generate_ordered_map_to_left_right_unique(b_ids, a_ids, results, -1)
expected = np.array([1, 1, 2, 2, -1],
dtype=np.int64)
self.assertListEqual(results.tolist(), expected.tolist())
def test_ordered_map_to_right_left_unique_streamed(self):
bio = BytesIO()
with session.Session() as s:
dst = s.open_dataset(bio, 'r+', 'dst')
hf = dst.create_dataframe('hf')
a_ids = np.asarray([0, 1, 2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18],
dtype=np.int64)
b_ids = np.asarray([0, 1, 1, 2, 4, 5, 5, 6, 8, 9, 9, 10, 12, 13, 13, 14,
16, 17, 17, 18], dtype=np.int64)
a_ids_f = s.create_numeric(hf, 'a_ids', 'int64')
a_ids_f.data.write(a_ids)
b_ids_f = s.create_numeric(hf, 'b_ids', 'int64')
b_ids_f.data.write(b_ids)
left_to_right_result = s.create_numeric(hf, 'left_result', 'int64')
ops.generate_ordered_map_to_left_right_unique_streamed_old(a_ids_f, b_ids_f,
left_to_right_result)
expected = np.asarray([0, 1, 3, -1, 5, 7, -1, 8, 11, -1, 12, 13, -1, 16, 17, 19])
self.assertTrue(np.array_equal(left_to_right_result.data[:], expected))
# streaming - inner - neither unique
def test_ordered_inner_map_streamed_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 30, 40, 40, 50, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3, 4, 4, 4, 5, 5, 5]
r_expected = [0, 1, 1, 2, 3, 4, 2, 3, 4]
ops.generate_ordered_map_to_inner_streamed(left, right, l_result, r_result, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_streaming_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 30, 40, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3, 4, 4, 4, 5, 5, 5]
r_expected = [0, 1, 1, 2, 3, 4, 2, 3, 4]
ops.generate_ordered_map_to_inner_streamed(left, right, l_result, r_result, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - inner - left unique
def test_ordered_inner_map_left_unique_streamed_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3, 3, 3]
r_expected = [0, 1, 2, 3, 4]
ops.generate_ordered_map_to_inner_left_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_left_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 40, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3, 3, 3]
r_expected = [0, 1, 2, 3, 4]
ops.generate_ordered_map_to_inner_left_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - inner - left unique
def test_ordered_inner_map_right_unique_streamed_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 30, 30, 40, 40, 50, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3, 4, 5, 6]
r_expected = [0, 1, 1, 1, 2, 2]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_right_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 30, 30, 40, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3, 4, 5, 6]
r_expected = [0, 1, 1, 1, 2, 2]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - inner - right unique
def test_ordered_inner_map_right_unique_streamed_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3]
r_expected = [0, 1, 2]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_right_unique_streamed_left_final_2(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 50, 50, 60], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 5]
r_expected = [0, 1, 3]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_right_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3]
r_expected = [0, 1, 2]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_right_unique_streaming_right_final_2(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 50, 50, 60, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 5]
r_expected = [0, 1, 3]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# streaming - inner - right unique
def test_ordered_inner_map_both_unique_streamed_left_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 50], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3]
r_expected = [0, 1, 2]
ops.generate_ordered_map_to_inner_both_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_both_unique_streamed_left_final_2(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 50, 50, 60], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 5]
r_expected = [0, 1, 3]
ops.generate_ordered_map_to_inner_both_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_both_unique_streaming_right_final(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 40, 50, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 3]
r_expected = [0, 1, 2]
ops.generate_ordered_map_to_inner_both_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
def test_ordered_inner_map_both_unique_streaming_right_final_2(self):
left = fields.NumericMemField(None, 'int32')
left.data.write(np.asarray([10, 20, 30, 50, 50, 60, 80], dtype=np.int32))
right = fields.NumericMemField(None, 'int32')
right.data.write(np.asarray([20, 30, 40, 60, 70], dtype=np.int32))
l_result = fields.NumericMemField(None, 'int32')
r_result = fields.NumericMemField(None, 'int32')
l_expected = [1, 2, 5]
r_expected = [0, 1, 3]
ops.generate_ordered_map_to_inner_right_unique_streamed(left, right, l_result, r_result, -1, chunksize=4)
self.assertListEqual(l_result.data[:].tolist(), l_expected)
self.assertListEqual(r_result.data[:].tolist(), r_expected)
# old inner / outer map functionality
# ===========================
def test_ordered_inner_map_result_size(self):
a_ids = np.asarray([0, 1, 2, 2, 3, 5, 5, 5, 6, 8], dtype=np.int64)
b_ids =
|
np.asarray([1, 1, 2, 3, 5, 5, 6, 7, 8, 8, 8], dtype=np.int64)
|
numpy.asarray
|
'''
measure/prepare.py
TODO:
- data fitting
- data evaluation/interpolation
'''
import os
import sys
import time
import numpy as np
import scipy.linalg as linalg
import matplotlib.pyplot as plt
def weighted_average_filter(a, w, count=1,
overwrite_a=False, overwrite_w=False):
'''Weighted mean filter along first dimension.
Returns
-------
a : numpy.ndarray (nD)
The array after the filtering has been applied `count` times.
w : numpy.ndarray (1D)
'''
a = np.array(a, float, ndmin=1, copy=not overwrite_a)
w = np.array(w, float, ndmin=1, copy=not overwrite_w)
if len(w) % 2 != 1:
raise ValueError('Number of weights (`len(w)`) must be an odd number.')
a_tmp = []
w /= w.sum()
i0 = (len(w)-1)//2
for _ in range(count):
for i in range(i0, len(a)-i0):
a_tmp.append(w.dot(a[i-i0:i+i0+1]))
a[i0:len(a)-i0] = a_tmp; a_tmp.clear()
return a
class ForceMeasurement:
def __init__(self, uk, fk):
'''
Parameters
----------
uk : 1D or 2D array of floats
Displacement vector (row) of a point at each time.
fk : 1D or 2D array of floats
Correspondin force vector (row) for each time.
Returns
-------
None
'''
uk =
|
np.asarray(uk, float)
|
numpy.asarray
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d as o3d
import numpy as np
import pytest
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/..")
from open3d_test import list_devices
@pytest.mark.parametrize("device", list_devices())
@pytest.mark.parametrize("dtype", [
o3d.core.Dtype.Int32, o3d.core.Dtype.Int64, o3d.core.Dtype.Float32,
o3d.core.Dtype.Float64
])
def test_matmul(device, dtype):
# Shape takes tuple, list or o3d.core.SizeVector
a = o3d.core.Tensor([[1, 2.5, 3], [4, 5, 6.2]], dtype=dtype, device=device)
b = o3d.core.Tensor([[7.5, 8, 9, 10], [11, 12, 13, 14], [15, 16, 17.8, 18]],
dtype=dtype,
device=device)
c = o3d.core.matmul(a, b)
assert c.shape == o3d.core.SizeVector([2, 4])
c_numpy = a.cpu().numpy() @ b.cpu().numpy()
np.testing.assert_allclose(c.cpu().numpy(), c_numpy, 1e-6)
# Non-contiguous test
a = a[:, 1:]
b = b[[0, 2], :]
c = a.matmul(b)
assert c.shape == o3d.core.SizeVector([2, 4])
c_numpy = a.cpu().numpy() @ b.cpu().numpy()
np.testing.assert_allclose(c.cpu().numpy(), c_numpy, 1e-6)
# Incompatible shape test
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros((3, 4, 5), dtype=dtype)
b = o3d.core.Tensor.zeros((4, 5), dtype=dtype)
c = a @ b
assert 'Tensor A must be 2D' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros((3, 4), dtype=dtype)
b = o3d.core.Tensor.zeros((4, 5, 6), dtype=dtype)
c = a @ b
assert 'Tensor B must be 1D (vector) or 2D (matrix)' in str(
excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros((3, 4), dtype=dtype)
b = o3d.core.Tensor.zeros((3, 7), dtype=dtype)
c = a @ b
assert 'mismatch with' in str(excinfo.value)
for shapes in [((0, 0), (0, 0)), ((2, 0), (0, 3)), ((0, 2), (2, 0)),
((2, 0), (0, 0))]:
with pytest.raises(RuntimeError) as excinfo:
a_shape, b_shape = shapes
a = o3d.core.Tensor.zeros(a_shape, dtype=dtype, device=device)
b = o3d.core.Tensor.zeros(b_shape, dtype=dtype, device=device)
c = a @ b
assert 'dimensions with zero' in str(excinfo.value)
@pytest.mark.parametrize("device", list_devices())
@pytest.mark.parametrize("dtype", [
o3d.core.Dtype.Int32, o3d.core.Dtype.Int64, o3d.core.Dtype.Float32,
o3d.core.Dtype.Float64
])
def test_det(device, dtype):
a = o3d.core.Tensor([[-5, 0, -1], [1, 2, -1], [-3, 4, 1]],
dtype=dtype,
device=device)
if dtype in [o3d.core.Dtype.Int32, o3d.core.Dtype.Int64]:
with pytest.raises(RuntimeError) as excinfo:
a.det()
assert 'Only tensors with Float32 or Float64 are supported' in str(
excinfo.value)
return
np.testing.assert_allclose(a.det(), np.linalg.det(a.cpu().numpy()))
# Non-2D
for shape in [(), [1], (3, 4, 5)]:
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros(shape, dtype=dtype, device=device)
a.det()
assert 'must be 2D' in str(excinfo.value)
# Non-square
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros((2, 3), dtype=dtype, device=device)
a.det()
assert 'must be square' in str(excinfo.value)
@pytest.mark.parametrize("device", list_devices())
@pytest.mark.parametrize("dtype", [
o3d.core.Dtype.Int32, o3d.core.Dtype.Int64, o3d.core.Dtype.Float32,
o3d.core.Dtype.Float64
])
def test_lu(device, dtype):
a = o3d.core.Tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=dtype,
device=device)
if dtype in [o3d.core.Dtype.Int32, o3d.core.Dtype.Int64]:
with pytest.raises(RuntimeError) as excinfo:
o3d.core.lu(a)
assert 'Only tensors with Float32 or Float64 are supported' in str(
excinfo.value)
return
p, l, u = o3d.core.lu(a)
np.testing.assert_allclose(a.cpu().numpy(), (p @ l @ u).cpu().numpy(),
rtol=1e-5,
atol=1e-5)
p, l2, u2 = o3d.core.lu(a, True)
np.testing.assert_allclose(a.cpu().numpy(), (l2 @ u2).cpu().numpy(),
rtol=1e-5,
atol=1e-5)
# Non-2D
for shape in [(), [1], (3, 4, 5)]:
with pytest.raises(RuntimeError) as excinfo:
a_ = o3d.core.Tensor.zeros(shape, dtype=dtype, device=device)
o3d.core.lu(a_)
assert 'must be 2D' in str(excinfo.value)
@pytest.mark.parametrize("device", list_devices())
@pytest.mark.parametrize("dtype", [
o3d.core.Dtype.Int32, o3d.core.Dtype.Int64, o3d.core.Dtype.Float32,
o3d.core.Dtype.Float64
])
def test_lu_ipiv(device, dtype):
a = o3d.core.Tensor([[2, 3, 1], [3, 3, 1], [2, 4, 1]],
dtype=dtype,
device=device)
if dtype in [o3d.core.Dtype.Int32, o3d.core.Dtype.Int64]:
with pytest.raises(RuntimeError) as excinfo:
o3d.core.lu_ipiv(a)
assert 'Only tensors with Float32 or Float64 are supported' in str(
excinfo.value)
return
ipiv, a_lu = o3d.core.lu_ipiv(a)
a_lu_ = o3d.core.Tensor(
[[3.0, 3.0, 1.0], [0.666667, 2.0, 0.333333], [0.666667, 0.5, 0.166667]],
dtype=dtype)
ipiv_ = o3d.core.Tensor([2, 3, 3], dtype=dtype)
np.testing.assert_allclose(a_lu.cpu().numpy(),
a_lu_.numpy(),
rtol=1e-5,
atol=1e-5)
np.testing.assert_allclose(ipiv.cpu().numpy(), ipiv_.numpy(), 1e-6)
# Non-2D
for shape in [(), [1], (3, 4, 5)]:
with pytest.raises(RuntimeError) as excinfo:
a_ = o3d.core.Tensor.zeros(shape, dtype=dtype, device=device)
o3d.core.lu_ipiv(a_)
assert 'must be 2D' in str(excinfo.value)
@pytest.mark.parametrize("device", list_devices())
@pytest.mark.parametrize("dtype", [
o3d.core.Dtype.Int32, o3d.core.Dtype.Int64, o3d.core.Dtype.Float32,
o3d.core.Dtype.Float64
])
def test_inverse(device, dtype):
a = o3d.core.Tensor([[7, 2, 1], [0, 3, -1], [-3, 4, 2]],
dtype=dtype,
device=device)
if dtype in [o3d.core.Dtype.Int32, o3d.core.Dtype.Int64]:
with pytest.raises(RuntimeError) as excinfo:
o3d.core.inv(a)
assert 'Only tensors with Float32 or Float64 are supported' in str(
excinfo.value)
return
a_inv = o3d.core.inv(a)
a_inv_numpy = np.linalg.inv(a.cpu().numpy())
np.testing.assert_allclose(a_inv.cpu().numpy(),
a_inv_numpy,
rtol=1e-5,
atol=1e-5)
# Non-2D
for shape in [(), [1], (3, 4, 5)]:
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros(shape, dtype=dtype, device=device)
a.inv()
assert 'must be 2D' in str(excinfo.value)
# Non-square
with pytest.raises(RuntimeError) as excinfo:
a = o3d.core.Tensor.zeros((2, 3), dtype=dtype, device=device)
a.inv()
assert 'must be square' in str(excinfo.value)
a = o3d.core.Tensor([[1]], dtype=dtype, device=device)
np.testing.assert_allclose(a.cpu().numpy(), a.inv().cpu().numpy(), 1e-6)
# Singular condition
for a in [
o3d.core.Tensor([[0, 0], [0, 1]], dtype=dtype, device=device),
o3d.core.Tensor([[0]], dtype=dtype, device=device)
]:
with pytest.raises(RuntimeError) as excinfo:
a_inv = a.inv()
assert 'singular condition' in str(excinfo.value)
with pytest.raises(np.linalg.LinAlgError) as excinfo:
a_inv = np.linalg.inv(a.cpu().numpy())
assert 'Singular matrix' in str(excinfo.value)
@pytest.mark.parametrize("device", list_devices())
@pytest.mark.parametrize("dtype", [
o3d.core.Dtype.Int32, o3d.core.Dtype.Int64, o3d.core.Dtype.Float32,
o3d.core.Dtype.Float64
])
def test_svd(device, dtype):
a = o3d.core.Tensor([[2, 4], [1, 3], [0, 0], [0, 0]],
dtype=dtype,
device=device)
if dtype in [o3d.core.Dtype.Int32, o3d.core.Dtype.Int64]:
with pytest.raises(RuntimeError) as excinfo:
o3d.core.svd(a)
assert 'Only tensors with Float32 or Float64 are supported' in str(
excinfo.value)
return
u, s, vt = o3d.core.svd(a)
assert u.shape == o3d.core.SizeVector([4, 4])
assert s.shape == o3d.core.SizeVector([2])
assert vt.shape == o3d.core.SizeVector([2, 2])
# u and vt are orthogonal matrices
uut = u @ u.T()
eye_uut = o3d.core.Tensor.eye(4, dtype=dtype)
np.testing.assert_allclose(uut.cpu().numpy(),
eye_uut.cpu().numpy(),
atol=1e-6)
vvt = vt.T() @ vt
eye_vvt = o3d.core.Tensor.eye(2, dtype=dtype)
np.testing.assert_allclose(vvt.cpu().numpy(),
eye_vvt.cpu().numpy(),
atol=1e-6)
usvt = u[:, :2] @ o3d.core.Tensor.diag(s) @ vt
# The accuracy of svd over Float32 is very low...
np.testing.assert_allclose(a.cpu().numpy(), usvt.cpu().numpy(), atol=1e-5)
u = u.cpu().numpy()
s = s.cpu().numpy()
vt = vt.cpu().numpy()
u_numpy, s_numpy, vt_numpy = np.linalg.svd(a.cpu().numpy())
# u, vt can be different by several signs
np.testing.assert_allclose(np.abs(u), np.abs(u_numpy), 1e-6)
np.testing.assert_allclose(np.abs(vt), np.abs(vt_numpy), 1e-6)
|
np.testing.assert_allclose(s, s_numpy, 1e-6)
|
numpy.testing.assert_allclose
|
"""ResNet50 model for Keras.
Adapted from tf.keras.applications.resnet50.ResNet50().
This is ResNet model version 1.5.
Related papers/blogs:
- https://arxiv.org/abs/1512.03385
- https://arxiv.org/pdf/1603.05027v2.pdf
- http://torch.ch/blog/2016/02/04/resnets.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras import models
from tensorflow.python.keras import regularizers
layers = tf.keras.layers
def _gen_l2_regularizer(use_l2_regularizer=True, l2_weight_decay=1e-4):
return regularizers.l2(l2_weight_decay) if use_l2_regularizer else None
def fixed_padding(inputs, kernel_size, data_format='channels_last'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def identity_block(input_tensor,
kernel_size,
filters,
stage,
block,
use_l2_regularizer=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_trainable=True):
"""The identity block is the block that has no conv layer at shortcut.
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Kernel: 1
# Strides: 1
x = layers.Conv2D(
filters1,
kernel_size=(1, 1),
strides=(1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
# Kernel: 3
# Strides: 1
x = layers.Conv2D(
filters2,
kernel_size=kernel_size,
strides=(1,1),
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
# Kernel: 1
# Strides: 1
x = layers.Conv2D(
filters3,
kernel_size=(1, 1),
strides=(1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2),
use_l2_regularizer=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_trainable=True):
"""A block that has a conv layer at shortcut.
Note that from stage 3,
the second conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Kernel: 1
# Stride: Dynamic
shortcut = layers.Conv2D(
filters3,
kernel_size=(1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '1')(input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '1')(shortcut)
# Kernel: 1
# Stride: 1
x = layers.Conv2D(
filters1,
kernel_size=(1, 1),
strides=(1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
# Kernel: 3
# Strides: Dynamic
if strides[0] > 1:
x = fixed_padding(x, kernel_size, data_format=backend.image_data_format())
padding = 'valid'
else:
padding = 'same'
x = layers.Conv2D(
filters2,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
# Kernel: 1
# Stride: 1
x = layers.Conv2D(
filters3,
kernel_size=(1, 1),
strides=(1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name=bn_name_base + '2c')(x)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def resnet50(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
batch_size=None,
use_l2_regularizer=True,
rescale_inputs=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_trainable=True,
width_multiplier=1):
"""Instantiates the ResNet50 architecture.
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
rescale_inputs: whether to rescale inputs from 0 to 1.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
A Keras model instance.
"""
if input_tensor is None:
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = img_input
if backend.image_data_format() == 'channels_first':
x = layers.Permute((3, 1, 2))(x)
bn_axis = 1
else: # channels_last
bn_axis = 3
block_config = dict(
use_l2_regularizer=use_l2_regularizer,
batch_norm_decay=batch_norm_decay,
batch_norm_epsilon=batch_norm_epsilon,
batch_norm_trainable=batch_norm_trainable)
#x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
x = fixed_padding(x, 7, backend.image_data_format())
x = layers.Conv2D(
64 * width_multiplier,
kernel_size=(7, 7),
strides=(2, 2),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='conv1')(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
trainable=batch_norm_trainable,
name='bn_conv1')(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv_block(x, 3, np.array([64, 64, 256]) * width_multiplier, stage=2, block='a', strides=(1, 1), **block_config)
x = identity_block(x, 3, np.array([64, 64, 256]) * width_multiplier, stage=2, block='b', **block_config)
x = identity_block(x, 3, np.array([64, 64, 256]) * width_multiplier, stage=2, block='c', **block_config)
x = conv_block(x, 3, np.array([128, 128, 512]) * width_multiplier, stage=3, block='a', **block_config)
x = identity_block(x, 3, np.array([128, 128, 512]) * width_multiplier, stage=3, block='b', **block_config)
x = identity_block(x, 3,
|
np.array([128, 128, 512])
|
numpy.array
|
import numpy as np
import PIL, PIL.Image
import math
def imbounds(width, height, transform):
# calc output bounds based on transforming source image pixel edges and diagonal distance, ala GDAL
# TODO: alternatively based on internal grid or just all the pixels
# see https://github.com/OSGeo/gdal/blob/60d8a9ca09c466225508cb82e30a64aefa899a41/gdal/alg/gdaltransformer.cpp#L135
# NOTE: uses forward transform to calc output bounds, and backward transform for resampling
# but for polynomial order >1 backward transform is reestimated on the points (inverse doesnt work)
# and can be noticably different than the forward transform, thus miscalculating the bounds
# TODO: maybe need a fix somehow...
# get sample pixels at intervals
imw,imh = width,height
cols = np.linspace(0, imw-1, 100)
rows = np.linspace(0, imh-1, 100)
cols,rows =
|
np.meshgrid(cols, rows)
|
numpy.meshgrid
|
import cv2,torch
import numpy as np
from PIL import Image
import torchvision.transforms as T
import torch.nn.functional as F
import scipy.signal
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = cv2.applyColorMap(x, cmap)
return x_, [mi,ma]
def init_log(log, keys):
for key in keys:
log[key] = torch.tensor([0.0], dtype=float)
return log
def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
if type(depth) is not np.ndarray:
depth = depth.cpu().numpy()
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi =
|
np.min(x[x>0])
|
numpy.min
|
import numpy as np
from math import *
from pylab import *
from matplotlib import pyplot as plt
mRatio = 2.0 #0.223/0.203 #Mass ratio M_1/M_2 = M_1 (because M_2 = 1)
L=4.5 # Distance between nuclei times number of nuclei (for lattice constant a=1)
#xMass = range(1,int(L))
xMass = np.arange(0.5,L,0.5)
#n = 2 # Normal mode number n => (1,..,L-1)
mass=0.203
def getK(n):
return float(n)*np.pi/float(L) # Constant k = (n*pi)/(L*a)
period = 0.72116470588235293
C = 0.04*(2.0*np.pi/period)**2
omega0 = np.sqrt(C*(1+mRatio)/mass)
def getAmplitudes(n,xMass):
k = getK(n)
AmplRatio = getAmplRatio(n)
fjMassa = len(xMass)
Ampl = [0]*fjMassa # Initialize array of amplitudes (one for each mass).
g=0
while g < fjMassa: #Calculate the actual amplitudes of each mass
Ampl[g] = AmplRatio*np.sin(k*xMass[g])
if (g+1) < fjMassa:
Ampl[g+1] =
|
np.sin(k*xMass[g+1])
|
numpy.sin
|
#<NAME>
#(cc) <EMAIL>
import numpy as np
import pandas as pd
# standard parameters after Carsel & Parrish 1988
carsel=pd.DataFrame(
[[ 'C', 30., 15., 55., 0.068, 0.38, 0.008*100., 1.09, 0.200/360000.],
[ 'CL', 37., 30., 33., 0.095, 0.41, 0.019*100., 1.31, 0.258/360000.],
[ 'L', 40., 40., 20., 0.078, 0.43, 0.036*100., 1.56, 1.042/360000.],
[ 'LS', 13., 81., 6., 0.057, 0.43, 0.124*100., 2.28, 14.592/360000.],
[ 'S', 4., 93., 3., 0.045, 0.43, 0.145*100., 2.68, 29.700/360000.],
[ 'SC', 11., 48., 41., 0.100, 0.38, 0.027*100., 1.23, 0.121/360000.],
[ 'SCL', 19., 54., 27., 0.100, 0.39, 0.059*100., 1.48, 1.308/360000.],
[ 'SI', 85., 6., 9., 0.034, 0.46, 0.016*100., 1.37, 0.250/360000.],
[ 'SIC', 48., 6., 46., 0.070, 0.36, 0.005*100., 1.09, 0.021/360000.],
['SICL', 59., 8., 33., 0.089, 0.43, 0.010*100., 1.23, 0.071/360000.],
[ 'SIL', 65., 17., 18., 0.067, 0.45, 0.020*100., 1.41, 0.450/360000.],
[ 'SL', 26., 63., 11., 0.065, 0.41, 0.075*100., 1.89, 4.421/360000.]],
columns=['Typ','Silt','Sand','Clay','thr','ths','alpha','n','ks'],index=np.arange(12).astype(int)+1)
#DEBUG: in the calculations type errors occur.
#This should be fixed some time. However, I override warnings for now:
np.seterr(all='ignore')
# conversions
def ku_psi(psi, ks, alpha, n, m=None, l=0.5):
#Calculate unsaturated hydraulic conductivity (ku) from matrix head (psi)
if m is None:
m=1.-1./n
v = 1. + (alpha*np.abs(psi))**n
ku = ks* v**(-1.*m*l) * (1. - (1. - 1/v)**m)**2
return ku
def ku_thst(thst, ks, alpha, n, m=None, l=0.5):
#Calculate unsaturated hydraulic conductivity (ku) relative saturation (theta*)
if m is None:
m=1.-1./n
ku = ks*thst**l * (1 - (1-thst**(1/m))**m)**2#
return ku
def ku_theta(theta, ths, thr, ks, alpha, n, m=None):
#Calculate unsaturated hydraulic conductivity (ku) from matrix head (psi)
if m is None:
m=1.-1./n
th_star=thst_theta(theta,ths,thr)
ku = ku_thst(th_star,ks, alpha, n, m)
return ku
def thst_theta(theta,ths,thr):
#Calculate relative saturation (theta*) from soil moisture (theta)
th_star=(theta-thr)/(ths-thr) #
return th_star
def theta_thst(th_star,ths,thr):
#Calculate soil moisture (theta) from relative saturation (theta*)
theta=th_star*(ths-thr)+thr
return theta
def theta_psi(psi,ths,thr,alpha,n,m=None):
#Calculate soil moisture (theta) from matrix head (psi)
if m is None:
m=1.-1./n
theta=theta_thst(thst_psi(psi,alpha,n,m),ths,thr)
return theta
def psi_thst(th_star,alpha,n,m=None):
#Calculate matrix head (psi) from relative saturation (theta*)
if m is None:
m=1.-1./n
psi = -1./alpha * ( (1-th_star**(1/m))/(th_star**(1/m)) )**(1./n)
if (np.iterable(psi)):
if any(np.isinf(psi)):
if type(alpha)==float:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))
else:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))[np.isinf(psi)]
return psi
def psi_theta(theta,ths,thr,alpha,n,m=None):
#Calculate matrix head (psi) from soil moisture (theta)
if m is None:
m=1.-1./n
th_star=thst_theta(theta,ths,thr)
psi= -1. * ( (1 - th_star**(1./m)) / (th_star**(1./m)) )**(1./n) / alpha
if (np.iterable(psi)):
if any(np.isinf(psi)):
if type(alpha)==float:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))
else:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))[np.isinf(psi)]
return psi
def thst_psi(psi,alpha,n,m=None):
#Calculate relative saturation (theta*) from matrix head (psi)
if m is None:
m=1.-1./n
th_star = (1./(1.+(
|
np.abs(psi)
|
numpy.abs
|
import numpy as np
import pandas as pd
import h5py
from numpy.lib.arraysetops import isin
from scipy.special import erf
from scipy.special import erf
from scipy.signal import find_peaks, convolve
from math import floor, ceil
import time
import matplotlib.pyplot as plt
import multiprocessing as mp
#================================= Wavelet functions =================================#
def ricker(scale=10, N=1, window=1, dt=1):
resolution = scale/dt
# print(resolution)
length = int((10*window)*resolution)
a = resolution/1.25187536
t = np.arange(length)
s = 2/(np.sqrt(3*a)*np.pi**1/4)*(1-(t-length/2)**2/a**2)\
*np.exp(-(t-length/2)**2/(2*a**2))
s_square_norm = np.trapz(s**2, dx=1)
s -= np.mean(s)
# return s*(s_square_norm**2)
return s/np.sqrt(s_square_norm)
def morlet(scale=10, N=6, window=1, is_complex=False, dt=1):
resolution = scale/dt
length = int(2*(N+4)*window*resolution)
t = np.arange(length)
sigma = length/(10*window)
s_exp = np.exp(-(t-length/2)**2/(2*sigma**2))
if (is_complex):
s_sin = np.exp(1j*(2*np.pi/resolution*(t-length/2)-np.pi*(0.75-N%2)))
else:
s_sin = np.sin((2*np.pi/resolution*(t-length/2)-np.pi*(0.5-N%2)))
s = s_exp*s_sin
s -= np.mean(s)
s_square_norm = np.trapz(np.abs(s)**2, dx=1)
return s/np.sqrt(s_square_norm)
def skew_normal(x, mu, sigma, alpha=0):
# mean = mu - sigma*alpha/np.sqrt(1+alpha**2)*np.sqrt(2/np.pi)
delta = alpha/(np.sqrt(1+alpha**2))
mu_z = np.sqrt(2/np.pi)*delta
sigma_z = np.sqrt(1-mu_z**2)
gamma_1 = (4-np.pi)/2*(delta*np.sqrt(2/np.pi))**3/((1-2*delta**2/np.pi)**(3/2))
if alpha == 0:
m_0 = 0
else:
m_0 = mu_z - gamma_1*sigma_z/2 - np.sign(alpha)/2*np.exp(-2*np.pi/np.abs(alpha))
mode = mu + sigma*m_0
xi = mu - sigma*m_0
phi = 1/
|
np.sqrt(2*np.pi)
|
numpy.sqrt
|
# Copyright 2019 <NAME>, <NAME> and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sp
from numpy.testing import (
run_module_suite, assert_equal, assert_almost_equal
)
import normalized_matrix as nm
class TestNormalizedMatrix(object):
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]])]
m = np.matrix([[1.0, 2.0, 1.1, 2.2],
[4.0, 3.0, 3.3, 4.4],
[5.0, 6.0, 3.3, 4.4],
[8.0, 7.0, 1.1, 2.2],
[9.0, 1.0, 3.3, 4.4]])
n_matrix = nm.NormalizedMatrix(s, r, k)
def test_add(self):
n_matrix = self.n_matrix
local_matrix = n_matrix + 2
assert_equal(local_matrix.b, 2)
local_matrix = 3 + n_matrix
assert_equal(local_matrix.b, 3)
def test_sub(self):
n_matrix = self.n_matrix
local_matrix = n_matrix - 2
assert_equal(local_matrix.b, -2)
local_matrix = 3 - n_matrix
assert_equal(local_matrix.a, -1)
assert_equal(local_matrix.b, 3)
def test_mul(self):
n_matrix = self.n_matrix
local_matrix = n_matrix * 2
assert_equal(local_matrix.a, 2)
local_matrix = 3 * n_matrix
|
assert_equal(local_matrix.a, 3)
|
numpy.testing.assert_equal
|
# -*- coding: utf-8 -*-
"""
Academy Color Encoding System - Log Encodings
=============================================
Defines the *Academy Color Encoding System* (ACES) log encodings:
- :func:`colour.models.log_encoding_ACESproxy`
- :func:`colour.models.log_decoding_ACESproxy`
- :func:`colour.models.log_encoding_ACEScc`
- :func:`colour.models.log_decoding_ACEScc`
- :func:`colour.models.log_encoding_ACEScct`
- :func:`colour.models.log_decoding_ACEScct`
References
----------
- :cite:`TheAcademyofMotionPictureArtsandSciences2014q` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2014). Technical
Bulletin TB-2014-004 - Informative Notes on SMPTE ST 2065-1 - Academy Color
Encoding Specification (ACES). Retrieved from
https://github.com/ampas/aces-dev/tree/master/documents
- :cite:`TheAcademyofMotionPictureArtsandSciences2014r` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2014). Technical
Bulletin TB-2014-012 - Academy Color Encoding System Version 1.0 Component
Names. Retrieved from
https://github.com/ampas/aces-dev/tree/master/documents
- :cite:`TheAcademyofMotionPictureArtsandSciences2014s` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2014). Specification
S-2013-001 - ACESproxy, an Integer Log Encoding of ACES Image Data.
Retrieved from https://github.com/ampas/aces-dev/tree/master/documents
- :cite:`TheAcademyofMotionPictureArtsandSciences2014t` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2014). Specification
S-2014-003 - ACEScc, A Logarithmic Encoding of ACES Data for use within
Color Grading Systems. Retrieved from
https://github.com/ampas/aces-dev/tree/master/documents
- :cite:`TheAcademyofMotionPictureArtsandSciences2016c` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project. (2016). Specification S-2016-001 -
ACEScct, A Quasi-Logarithmic Encoding of ACES Data for use within Color
Grading Systems. Retrieved October 10, 2016, from
https://github.com/ampas/aces-dev/tree/v1.0.3/documents
- :cite:`TheAcademyofMotionPictureArtsandSciencese` : The Academy of Motion
Picture Arts and Sciences, Science and Technology Council, & Academy Color
Encoding System (ACES) Project Subcommittee. (n.d.). Academy Color Encoding
System. Retrieved February 24, 2014, from
http://www.oscars.org/science-technology/council/projects/aces.html
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.utilities import (Structure, as_float, as_int, from_range_1,
to_domain_1)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'ACES_PROXY_10_CONSTANTS', 'ACES_PROXY_12_CONSTANTS',
'ACES_PROXY_CONSTANTS', 'ACES_CCT_CONSTANTS', 'log_encoding_ACESproxy',
'log_decoding_ACESproxy', 'log_encoding_ACEScc', 'log_decoding_ACEScc',
'log_encoding_ACEScct', 'log_decoding_ACEScct'
]
ACES_PROXY_10_CONSTANTS = Structure(
CV_min=64,
CV_max=940,
steps_per_stop=50,
mid_CV_offset=425,
mid_log_offset=2.5)
"""
*ACESproxy* 10 bit colourspace constants.
ACES_PROXY_10_CONSTANTS : Structure
"""
ACES_PROXY_12_CONSTANTS = Structure(
CV_min=256,
CV_max=3760,
steps_per_stop=200,
mid_CV_offset=1700,
mid_log_offset=2.5)
"""
*ACESproxy* 12 bit colourspace constants.
ACES_PROXY_12_CONSTANTS : Structure
"""
ACES_PROXY_CONSTANTS = {
10: ACES_PROXY_10_CONSTANTS,
12: ACES_PROXY_12_CONSTANTS
}
"""
Aggregated *ACESproxy* colourspace constants.
ACES_PROXY_CONSTANTS : dict
**{10, 12}**
"""
ACES_CCT_CONSTANTS = Structure(
X_BRK=0.0078125,
Y_BRK=0.155251141552511,
A=10.5402377416545,
B=0.0729055341958355)
"""
*ACEScct* colourspace constants.
ACES_CCT_CONSTANTS : Structure
"""
# pylint: disable=W0102
def log_encoding_ACESproxy(lin_AP1,
bit_depth=10,
out_int=False,
constants=ACES_PROXY_CONSTANTS):
"""
Defines the *ACESproxy* colourspace log encoding curve / opto-electronic
transfer function.
Parameters
----------
lin_AP1 : numeric or array_like
*lin_AP1* value.
bit_depth : int, optional
**{10, 12}**,
*ACESproxy* bit depth.
out_int : bool, optional
Whether to return value as integer code value or float equivalent of a
code value at a given bit depth.
constants : Structure, optional
*ACESproxy* constants.
Returns
-------
numeric or ndarray
*ACESproxy* non-linear value.
Notes
-----
+----------------+-----------------------+---------------+
| **Domain \\*** | **Scale - Reference** | **Scale - 1** |
+================+=======================+===============+
| ``lin_AP1`` | [0, 1] | [0, 1] |
+----------------+-----------------------+---------------+
+----------------+-----------------------+---------------+
| **Range \\*** | **Scale - Reference** | **Scale - 1** |
+================+=======================+===============+
| ``ACESproxy`` | [0, 1] | [0, 1] |
+----------------+-----------------------+---------------+
\\* This definition has an output integer switch, thus the domain-range
scale information is only given for the floating point mode.
References
----------
:cite:`TheAcademyofMotionPictureArtsandSciences2014q`,
:cite:`TheAcademyofMotionPictureArtsandSciences2014r`,
:cite:`TheAcademyofMotionPictureArtsandSciences2014s`,
:cite:`TheAcademyofMotionPictureArtsandSciencese`
Examples
--------
>>> log_encoding_ACESproxy(0.18) # doctest: +ELLIPSIS
0.4164222...
>>> log_encoding_ACESproxy(0.18, out_int=True)
426
"""
lin_AP1 = to_domain_1(lin_AP1)
constants = constants[bit_depth]
CV_min =
|
np.resize(constants.CV_min, lin_AP1.shape)
|
numpy.resize
|
# Copyright (C) 2022 <NAME>
#
# SPDX-License-Identifier: MIT
import numpy as np
import pytest
import ufl
from dolfinx.cpp.mesh import to_type
from dolfinx.io import XDMFFile
import dolfinx.fem as _fem
from dolfinx.graph import create_adjacencylist
from dolfinx.mesh import (CellType, create_mesh, locate_entities_boundary, meshtags)
from mpi4py import MPI
import dolfinx_contact
import dolfinx_contact.cpp
def create_functionspaces(ct, gap, delta):
''' This is a helper function to create the two element function spaces
for custom assembly using quads, triangles, hexes and tetrahedra'''
cell_type = to_type(ct)
if cell_type == CellType.quadrilateral:
x_1 = np.array([[0, 0], [0.8, 0], [0.1, 1.3], [0.7, 1.2]])
x_2 = np.array([[0, 0], [0.8, 0], [-0.1, -1.2], [0.8, -1.1]])
for point in x_2:
point[0] += delta
point[1] -= gap
x_3 = np.array([x_2[0].copy() + [1.6, 0], x_2[2].copy() + [1.6, 0]])
x = np.vstack([x_1, x_2, x_3])
cells = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [5, 8, 7, 9]], dtype=np.int32)
elif cell_type == CellType.triangle:
x = np.array([[0, 0, 0], [0.8, 0, 0], [0.3, 1.3, 0.0], [
0 + delta, -gap, 0], [0.8 + delta, -gap, 0], [0.4 + delta, -1.2 - gap, 0.0]])
for point in x:
point[2] = 3 * point[0] + 2 * point[1] # plane given by z = 3x +2y
cells = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int32)
elif cell_type == CellType.tetrahedron:
x = np.array([[0, 0, 0], [1.1, 0, 0], [0.3, 1.0, 0], [1, 1.2, 1.5], [
0 + delta, 0, -gap], [1.1 + delta, 0, -gap], [0.3 + delta, 1.0, -gap], [0.8 + delta, 1.2, -1.6 - gap]])
cells = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
elif cell_type == CellType.hexahedron:
x_1 = np.array([[0, 0, 0], [1.1, 0, 0], [0.1, 1, 0], [1, 1.2, 0],
[0, 0, 1.2], [1.0, 0, 1], [0, 1, 1], [1, 1, 1]])
x_2 = np.array([[0, 0, -1.2], [1.0, 0, -1.3], [0.1, 1, -1], [1, 1, -1],
[0, 0, 0], [1.1, 0, 0], [0.1, 1, 0], [1, 1.2, 0]])
for point in x_2:
point[0] += delta
point[2] -= gap
x_3 = np.array([x_2[0].copy() + [2.0, 0, 0], x_2[2].copy() + [2.0, 0, 0],
x_2[4].copy() + [2.0, 0, 0], x_2[6].copy() + [2.0, 0, 0]])
x = np.vstack([x_1, x_2, x_3])
cells = np.array([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15],
[9, 16, 10, 17, 13, 18, 15, 19]], dtype=np.int32)
else:
raise ValueError(f"Unsupported mesh type {ct}")
cell = ufl.Cell(ct, geometric_dimension=x.shape[1])
domain = ufl.Mesh(ufl.VectorElement("Lagrange", cell, 1))
mesh = create_mesh(MPI.COMM_WORLD, cells, x, domain)
el = ufl.VectorElement("CG", mesh.ufl_cell(), 1)
V = _fem.FunctionSpace(mesh, el)
with XDMFFile(mesh.comm, "test_mesh.xdmf", "w") as xdmf:
xdmf.write_mesh(mesh)
return V
@pytest.mark.parametrize("ct", ["quadrilateral", "triangle", "tetrahedron", "hexahedron"])
@pytest.mark.parametrize("gap", [0.5, -0.5])
@pytest.mark.parametrize("q_deg", [1, 2, 3])
@pytest.mark.parametrize("delta", [0.0, -0.5])
@pytest.mark.parametrize("surface", [0, 1])
def test_pack_test_fn(ct, gap, q_deg, delta, surface):
# Create function space
V = create_functionspaces(ct, gap, delta)
# Retrieve mesh and mesh data
mesh = V.mesh
tdim = mesh.topology.dim
gdim = mesh.geometry.dim
cmap = mesh.geometry.cmap
geometry_dofmap = mesh.geometry.dofmap
# locate facets
facets1 = locate_entities_boundary(mesh, tdim - 1, lambda x: np.isclose(x[tdim - 1], 0))
facets2 = locate_entities_boundary(mesh, tdim - 1, lambda x: np.isclose(x[tdim - 1], -gap))
facets = [facets1, facets2]
# create meshtags
val0 = np.full(len(facets1), 0, dtype=np.int32)
val1 = np.full(len(facets2), 1, dtype=np.int32)
values = np.hstack([val0, val1])
indices = np.concatenate([facets1, facets2])
sorted_facets = np.argsort(indices)
facet_marker = meshtags(mesh, tdim - 1, indices[sorted_facets], values[sorted_facets])
def func(x):
vals = np.zeros((gdim, x.shape[1]))
vals[0] = 0.1 * x[0]
vals[1] = 0.23 * x[1]
return vals
# Compute function that is known on each side
u = _fem.Function(V)
u.interpolate(func)
# create contact class
opposites = [1, 0]
s = surface
o = opposites[surface]
data = np.array([0, 1], dtype=np.int32)
offsets = np.array([0, 2], dtype=np.int32)
surfaces = create_adjacencylist(data, offsets)
contact = dolfinx_contact.cpp.Contact([facet_marker], surfaces, [(s, o)], V._cpp_object, quadrature_degree=q_deg)
contact.update_submesh_geometry(u._cpp_object)
contact.create_distance_map(0)
# Pack gap on surface, pack test functions and jacobian on opposite surface
gap = contact.pack_gap(0)
test_fn = contact.pack_test_functions(0, gap)
u_packed = contact.pack_u_contact(0, u._cpp_object, gap)
# Retrieve surface facets
s_facets = np.sort(facets[s])
lookup = contact.facet_map(0)
# Create facet to cell connectivity
mesh.topology.create_connectivity(tdim - 1, tdim)
mesh.topology.create_connectivity(tdim, tdim - 1)
f_to_c = mesh.topology.connectivity(tdim - 1, tdim)
# loop over facets in surface
for f in range(len(s_facets)):
# Compute evaluation points
qp_phys = contact.qp_phys(s, f)
num_q_points = qp_phys.shape[0]
points = np.zeros((num_q_points, 3))
points[:, :gdim] = qp_phys[:, :gdim] + \
gap[f].reshape((num_q_points, gdim)) - u_packed[f].reshape((num_q_points, gdim))
# retrieve connected facets
connected_facets = lookup.links(f)
unique_facets = np.unique(np.sort(connected_facets))
# loop over unique connected facets
for link, facet_o in enumerate(unique_facets):
# retrieve cell index and cell dofs for facet_o
cell = f_to_c.links(facet_o)
dofs = V.dofmap.cell_dofs(cell)
# find quadrature points linked to facet_o
q_indices = np.argwhere(connected_facets == facet_o)
zero_ind = np.argwhere(connected_facets != facet_o)
# retrieve cell geometry and compute pull back of physical points to reference cell
gdofs = geometry_dofmap.links(cell)
xg = mesh.geometry.x[gdofs]
x_ref = cmap.pull_back(points, xg)
bs = V.dofmap.index_map_bs
for i, dof in enumerate(dofs):
for k in range(bs):
# Create fem function that is identical with desired test function
v = _fem.Function(V)
v.x.array[:] = 0
v.x.array[dof * bs + k] = 1
# Create expression vor evaluating test function and evaluate
expr = _fem.Expression(v, x_ref)
expr_vals = expr.eval([cell])
# compare values of test functions
offset = link * num_q_points * len(dofs) * bs + i * num_q_points * bs
assert(np.allclose(expr_vals[0][q_indices * bs + k], test_fn[f][offset + q_indices * bs + k]))
# ensure values are zero if q not connected to quadrature point
offset = link * num_q_points * len(dofs) * bs + i * num_q_points * bs
assert(np.allclose(0, test_fn[f][offset + zero_ind * bs + k]))
@pytest.mark.parametrize("ct", ["quadrilateral", "triangle", "tetrahedron", "hexahedron"])
@pytest.mark.parametrize("gap", [0.5, -0.5])
@pytest.mark.parametrize("q_deg", [1, 2, 3])
@pytest.mark.parametrize("delta", [0.0, -0.5])
@pytest.mark.parametrize("surface", [0, 1])
def test_pack_u(ct, gap, q_deg, delta, surface):
# Create function space
V = create_functionspaces(ct, gap, delta)
# Retrieve mesh and mesh data
mesh = V.mesh
tdim = mesh.topology.dim
gdim = mesh.geometry.dim
cmap = mesh.geometry.cmap
geometry_dofmap = mesh.geometry.dofmap
bs = V.dofmap.index_map_bs
# locate facets
facets1 = locate_entities_boundary(mesh, tdim - 1, lambda x:
|
np.isclose(x[tdim - 1], 0)
|
numpy.isclose
|
import unittest
import mapf_gym as MAPF_Env
import numpy as np
# Agent 1
num_agents1 = 1
world1 = [[ 1, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals1 = [[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1
num_agents2 = 1
world2 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, -1, 1, -1, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals2 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1 and 2
num_agents3 = 2
world3 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, -1, 0, 0, 0, 0, 0],
[ 0, 0, -1, 1, 2, -1, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals3 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Agent 1 and 2
num_agents4 = 2
world4 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, -1, -1, -1, 0, 0, 0, 0],
[ 0, 0, -1, 1, 2, -1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
goals4 = [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# action: {0:NOP, 1:MOVE_NORTH, 2:MOVE_EAST, 3:MOVE_south, 4:MOVE_WEST}
# MAPF_Env.ACTION_COST, MAPF_Env.IDLE_COST, MAPF_Env.GOAL_REWARD, MAPF_Env.COLLISION_REWARD
FULL_HELP = False
class MAPFTests(unittest.TestCase):
# Bruteforce tests
def test_validActions1(self):
# MAPF_Env.MAPFEnv(self, num_agents=1, world0=None, goals0=None, DIAGONAL_MOVEMENT=False, SIZE=10, PROB=.2, FULL_HELP=False)
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1), DIAGONAL_MOVEMENT=False)
validActions1 = gameEnv1._listNextValidActions(1)
self.assertEqual(validActions1, [0,1,2])
# With diagonal actions
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1), DIAGONAL_MOVEMENT=True)
validActions1 = gameEnv1._listNextValidActions(1)
self.assertEqual(validActions1, [0,1,2,5])
def test_validActions2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2), DIAGONAL_MOVEMENT=False)
validActions2 = gameEnv2._listNextValidActions(1)
self.assertEqual(validActions2, [0])
# With diagonal actions
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2), DIAGONAL_MOVEMENT=True)
validActions2 = gameEnv2._listNextValidActions(1)
self.assertEqual(validActions2, [0,5,6,7,8])
def test_validActions3(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3), DIAGONAL_MOVEMENT=False)
validActions3a = gameEnv3._listNextValidActions(1)
validActions3b = gameEnv3._listNextValidActions(2)
self.assertEqual(validActions3a, [0])
self.assertEqual(validActions3b, [0,2])
# With diagonal actions
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3), DIAGONAL_MOVEMENT=True)
validActions3a = gameEnv3._listNextValidActions(1)
validActions3b = gameEnv3._listNextValidActions(2)
self.assertEqual(validActions3a, [0,5,6,7])
self.assertEqual(validActions3b, [0,2,5,8])
def test_validActions4(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=False)
validActions4a = gameEnv4._listNextValidActions(1)
validActions4b = gameEnv4._listNextValidActions(2)
self.assertEqual(validActions4a, [0,2])
self.assertEqual(validActions4b, [0,2])
# With diagonal actions
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=True)
validActions4a = gameEnv4._listNextValidActions(1)
validActions4b = gameEnv4._listNextValidActions(2)
self.assertEqual(validActions4a, [0,2,5,6,7])
self.assertEqual(validActions4b, [0,2,5,6])
def testIdle1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal, blocking, valid_action
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,0))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,0))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle3(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,0))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,0))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def testIdle4(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4),DIAGONAL_MOVEMENT=False)
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,0))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,0))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.IDLE_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,1))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,1))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,1))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east4a(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_east4b(self):
gameEnv4 = MAPF_Env.MAPFEnv(num_agents4, world0=np.array(world4), goals0=np.array(goals4))
s0 = gameEnv4.world.state.copy()
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv4.step((2,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv4.step((1,1))
s2 = gameEnv4.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north1(self):
gameEnv1 = MAPF_Env.MAPFEnv(num_agents1, world0=np.array(world1), goals0=np.array(goals1))
s0 = gameEnv1.world.state.copy()
# return state, reward, done, nextActions, on_goal
s1, r, d, _, o_g, _, _ = gameEnv1.step((1,2))
s2 = gameEnv1.world.state.copy()
self.assertEqual(r, MAPF_Env.ACTION_COST)
self.assertFalse(d)
self.assertFalse(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north2(self):
gameEnv2 = MAPF_Env.MAPFEnv(num_agents2, world0=np.array(world2), goals0=np.array(goals2))
s0 = gameEnv2.world.state.copy()
s1, r, d, _, o_g, _, _ = gameEnv2.step((1,2))
s2 = gameEnv2.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north3a(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=np.array(world3), goals0=np.array(goals3))
s0 = gameEnv3.world.state.copy()
# Agent 1
s1, r, d, _, o_g, _, _ = gameEnv3.step((1,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.COLLISION_REWARD)
self.assertFalse(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
# Agent 2
s1, r, d, _, o_g, _, _ = gameEnv3.step((2,2))
s2 = gameEnv3.world.state.copy()
self.assertEqual(r, MAPF_Env.GOAL_REWARD)
self.assertTrue(d)
self.assertTrue(o_g)
self.assertEqual(np.sum(s0), np.sum(s2))
def test_move_north3b(self):
gameEnv3 = MAPF_Env.MAPFEnv(num_agents3, world0=
|
np.array(world3)
|
numpy.array
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Commonly used utility functions."""
from __future__ import absolute_import, division, print_function
import numpy as np
import six
import warnings
import copy
from scipy.spatial.distance import pdist, squareform
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
if six.PY2:
from collections import Iterable
else:
from collections.abc import Iterable
# parameters for transforming between xyz & lat/lon/alt
gps_b = 6356752.31424518
gps_a = 6378137
e_squared = 6.69437999014e-3
e_prime_squared = 6.73949674228e-3
if six.PY2:
def _str_to_bytes(s):
return s
def _bytes_to_str(b):
return b
else:
def _str_to_bytes(s):
return s.encode('utf8')
def _bytes_to_str(b):
return b.decode('utf8')
# polarization constants
# maps polarization strings to polarization integers
POL_STR2NUM_DICT = {'pI': 1, 'pQ': 2, 'pU': 3, 'pV': 4,
'I': 1, 'Q': 2, 'U': 3, 'V': 4, # support straight stokes names
'rr': -1, 'll': -2, 'rl': -3, 'lr': -4,
'xx': -5, 'yy': -6, 'xy': -7, 'yx': -8}
# maps polarization integers to polarization strings
POL_NUM2STR_DICT = {1: 'pI', 2: 'pQ', 3: 'pU', 4: 'pV',
-1: 'rr', -2: 'll', -3: 'rl', -4: 'lr',
-5: 'xx', -6: 'yy', -7: 'xy', -8: 'yx'}
# maps how polarizations change when antennas are swapped
CONJ_POL_DICT = {'xx': 'xx', 'yy': 'yy', 'xy': 'yx', 'yx': 'xy',
'ee': 'ee', 'nn': 'nn', 'en': 'ne', 'ne': 'en',
'rr': 'rr', 'll': 'll', 'rl': 'lr', 'lr': 'rl',
'I': 'I', 'Q': 'Q', 'U': 'U', 'V': 'V',
'pI': 'pI', 'pQ': 'pQ', 'pU': 'pU', 'pV': 'pV'}
# maps jones matrix element strings to jones integers
JONES_STR2NUM_DICT = {'Jxx': -5, 'Jyy': -6, 'Jxy': -7, 'Jyx': -8,
'xx': -5, 'x': -5, 'yy': -6, 'y': -6, 'xy': -7, 'yx': -8, # Allow shorthand
'Jrr': -1, 'Jll': -2, 'Jrl': -3, 'Jlr': -4,
'rr': -1, 'r': -1, 'll': -2, 'l': -2, 'rl': -3, 'lr': -4}
# maps jones integers to jones matrix element strings
JONES_NUM2STR_DICT = {-1: 'Jrr', -2: 'Jll', -3: 'Jrl', -4: 'Jlr',
-5: 'Jxx', -6: 'Jyy', -7: 'Jxy', -8: 'Jyx'}
def LatLonAlt_from_XYZ(xyz, check_acceptability=True):
"""
Calculate lat/lon/alt from ECEF x,y,z.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
check_acceptability : bool
Flag to check XYZ coordinates are reasonable.
Returns
-------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
"""
# convert to a numpy array
xyz = np.array(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
if xyz.shape[0] == 3:
warnings.warn('The expected shape of ECEF xyz array is (Npts, 3). '
'Support for arrays shaped (3, Npts) will go away in '
'version 1.5', DeprecationWarning)
xyz_use = xyz.T
else:
raise ValueError('The expected shape of ECEF xyz array is (Npts, 3).')
else:
xyz_use = xyz
if xyz.shape == (3, 3):
warnings.warn('The xyz array in LatLonAlt_from_XYZ is being '
'interpreted as (Npts, 3). Historically this function '
'has supported (3, Npts) arrays, please verify that '
'array ordering is as expected. This warning will be '
'removed in version 1.5', DeprecationWarning)
if xyz_use.ndim == 1:
xyz_use = xyz_use[np.newaxis, :]
# checking for acceptable values
if check_acceptability:
if (np.any(np.linalg.norm(xyz_use, axis=1) < 6.35e6)
or np.any(np.linalg.norm(xyz_use, axis=1) > 6.39e6)):
raise ValueError(
'xyz values should be ECEF x, y, z coordinates in meters')
# see wikipedia geodetic_datum and Datum transformations of
# GPS positions PDF in docs/references folder
gps_p = np.sqrt(xyz_use[:, 0]**2 + xyz_use[:, 1]**2)
gps_theta = np.arctan2(xyz_use[:, 2] * gps_a, gps_p * gps_b)
latitude = np.arctan2(xyz_use[:, 2] + e_prime_squared * gps_b
* np.sin(gps_theta)**3, gps_p - e_squared * gps_a
* np.cos(gps_theta)**3)
longitude = np.arctan2(xyz_use[:, 1], xyz_use[:, 0])
gps_N = gps_a / np.sqrt(1 - e_squared * np.sin(latitude)**2)
altitude = ((gps_p / np.cos(latitude)) - gps_N)
if xyz.ndim == 1:
longitude = longitude[0]
latitude = latitude[0]
altitude = altitude[0]
return latitude, longitude, altitude
def XYZ_from_LatLonAlt(latitude, longitude, altitude):
"""
Calculate ECEF x,y,z from lat/lon/alt values.
Parameters
----------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
latitude = np.array(latitude)
longitude = np.array(longitude)
altitude = np.array(altitude)
Npts = latitude.size
if longitude.size != Npts:
raise ValueError(
'latitude, longitude and altitude must all have the same length')
if altitude.size != Npts:
raise ValueError(
'latitude, longitude and altitude must all have the same length')
# see wikipedia geodetic_datum and Datum transformations of
# GPS positions PDF in docs/references folder
gps_N = gps_a / np.sqrt(1 - e_squared * np.sin(latitude)**2)
xyz = np.zeros((Npts, 3))
xyz[:, 0] = ((gps_N + altitude) * np.cos(latitude) * np.cos(longitude))
xyz[:, 1] = ((gps_N + altitude) * np.cos(latitude) * np.sin(longitude))
xyz[:, 2] = ((gps_b**2 / gps_a**2 * gps_N + altitude) * np.sin(latitude))
xyz = np.squeeze(xyz)
return xyz
def rotECEF_from_ECEF(xyz, longitude):
"""
Get rotated ECEF positions such that the x-axis goes through the longitude.
Miriad and uvfits expect antenna positions in this frame
(with longitude of the array center/telescope location)
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
longitude : float
longitude in radians to rotate coordinates to
(usually the array center/telescope location).
Returns
-------
ndarray of float
Rotated ECEF coordinates, shape (Npts, 3).
"""
angle = -1 * longitude
rot_matrix = np.array([[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
return rot_matrix.dot(xyz.T).T
def ECEF_from_rotECEF(xyz, longitude):
"""
Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.
longitude : float
longitude in radians giving the x direction of the rotated coordinates
(usually the array center/telescope location).
Returns
-------
ndarray of float
ECEF coordinates, shape (Npts, 3).
"""
angle = longitude
rot_matrix = np.array([[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
return rot_matrix.dot(xyz.T).T
def ENU_from_ECEF(xyz, latitude, longitude, altitude):
"""
Calculate local ENU (east, north, up) coordinates from ECEF coordinates.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates
"""
xyz = np.array(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
if xyz.shape[0] == 3:
warnings.warn('The expected shape of ECEF xyz array is (Npts, 3). '
'Support for arrays shaped (3, Npts) will go away in '
'version 1.5', DeprecationWarning)
xyz_in = xyz.T
transpose = True
else:
raise ValueError('The expected shape of ECEF xyz array is (Npts, 3).')
else:
xyz_in = xyz
transpose = False
if xyz.shape == (3, 3):
warnings.warn('The xyz array in ENU_from_ECEF is being '
'interpreted as (Npts, 3). Historically this function '
'has supported (3, Npts) arrays, please verify that '
'array ordering is as expected. This warning will be '
'removed in version 1.5', DeprecationWarning)
if xyz_in.ndim == 1:
xyz_in = xyz_in[np.newaxis, :]
# check that these are sensible ECEF values -- their magnitudes need to be
# on the order of Earth's radius
ecef_magnitudes = np.linalg.norm(xyz_in, axis=1)
sensible_radius_range = (6.35e6, 6.39e6)
if (np.any(ecef_magnitudes <= sensible_radius_range[0])
or np.any(ecef_magnitudes >= sensible_radius_range[1])):
raise ValueError(
'ECEF vector magnitudes must be on the order of the radius of the earth')
xyz_center = XYZ_from_LatLonAlt(latitude, longitude, altitude)
xyz_use = np.zeros_like(xyz_in)
xyz_use[:, 0] = xyz_in[:, 0] - xyz_center[0]
xyz_use[:, 1] = xyz_in[:, 1] - xyz_center[1]
xyz_use[:, 2] = xyz_in[:, 2] - xyz_center[2]
enu = np.zeros_like(xyz_use)
enu[:, 0] = (-np.sin(longitude) * xyz_use[:, 0]
+ np.cos(longitude) * xyz_use[:, 1])
enu[:, 1] = (-np.sin(latitude) * np.cos(longitude) * xyz_use[:, 0]
- np.sin(latitude) * np.sin(longitude) * xyz_use[:, 1]
+ np.cos(latitude) * xyz_use[:, 2])
enu[:, 2] = (np.cos(latitude) * np.cos(longitude) * xyz_use[:, 0]
+ np.cos(latitude) * np.sin(longitude) * xyz_use[:, 1]
+ np.sin(latitude) * xyz_use[:, 2])
if len(xyz.shape) == 1:
enu = np.squeeze(enu)
elif transpose:
return enu.T
return enu
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Parameters
----------
enu : ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
enu = np.array(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
if enu.shape[0] == 3:
warnings.warn('The expected shape of the ENU array is (Npts, 3). '
'Support for arrays shaped (3, Npts) will go away in '
'version 1.5', DeprecationWarning)
enu_use = enu.T
transpose = True
else:
raise ValueError('The expected shape of the ENU array array is (Npts, 3).')
else:
enu_use = enu
transpose = False
if enu.shape == (3, 3):
warnings.warn('The enu array in ECEF_from_ENU is being '
'interpreted as (Npts, 3). Historically this function '
'has supported (3, Npts) arrays, please verify that '
'array ordering is as expected. This warning will be '
'removed in version 1.5', DeprecationWarning)
if enu_use.ndim == 1:
enu_use = enu_use[np.newaxis, :]
xyz = np.zeros_like(enu_use)
xyz[:, 0] = (-np.sin(latitude) * np.cos(longitude) * enu_use[:, 1]
- np.sin(longitude) * enu_use[:, 0]
+ np.cos(latitude) * np.cos(longitude) * enu_use[:, 2])
xyz[:, 1] = (-np.sin(latitude) * np.sin(longitude) * enu_use[:, 1]
+ np.cos(longitude) * enu_use[:, 0]
+ np.cos(latitude) * np.sin(longitude) * enu_use[:, 2])
xyz[:, 2] = (np.cos(latitude) * enu_use[:, 1]
+ np.sin(latitude) * enu_use[:, 2])
xyz_center = XYZ_from_LatLonAlt(latitude, longitude, altitude)
xyz[:, 0] = xyz[:, 0] + xyz_center[0]
xyz[:, 1] = xyz[:, 1] + xyz_center[1]
xyz[:, 2] = xyz[:, 2] + xyz_center[2]
if len(enu.shape) == 1:
xyz = np.squeeze(xyz)
elif transpose:
return xyz.T
return xyz
def phase_uvw(ra, dec, initial_uvw):
"""
Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.
This code expects input uvws or positions relative to the telescope
location in the same frame that ra/dec are in (e.g. icrs or gcrs) and
returns phased ones in the same frame.
Note that this code is nearly identical to ENU_from_ECEF, except that it
uses an arbitrary phasing center rather than a coordinate center.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
initial_uvw : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
uvw : ndarray of float
uvw array in the same frame as initial_uvws, ra and dec.
"""
if initial_uvw.ndim == 1:
initial_uvw = initial_uvw[np.newaxis, :]
uvw = np.zeros_like(initial_uvw)
uvw[:, 0] = (-np.sin(ra) * initial_uvw[:, 0]
+ np.cos(ra) * initial_uvw[:, 1])
uvw[:, 1] = (-np.sin(dec) * np.cos(ra) * initial_uvw[:, 0]
- np.sin(dec) * np.sin(ra) * initial_uvw[:, 1]
+ np.cos(dec) * initial_uvw[:, 2])
uvw[:, 2] = (np.cos(dec) * np.cos(ra) * initial_uvw[:, 0]
+ np.cos(dec) * np.sin(ra) * initial_uvw[:, 1]
+ np.sin(dec) * initial_uvw[:, 2])
return(uvw)
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
unphased_uvws = np.zeros_like(uvw)
unphased_uvws[:, 0] = (-np.sin(ra) * uvw[:, 0]
- np.sin(dec) *
|
np.cos(ra)
|
numpy.cos
|
import ctypes as ct
import numpy as np
import numpy.ctypeslib as ctl
from .base import NullableFloatArrayType
from smlmlib.context import Context
from smlmlib.calib import sCMOS_Calib
import scipy.stats
class PSF:
def __init__(self, ctx:Context, psfInst):
self.inst = psfInst
self.ctx = ctx
lib = ctx.smlm.lib
self.copyROI = False
InstancePtrType = ct.c_void_p
self._PSF_Delete = lib.PSF_Delete
self._PSF_Delete.argtypes = [InstancePtrType]
self._PSF_ThetaSize = lib.PSF_ThetaSize
self._PSF_ThetaSize.restype = ct.c_int32
self._PSF_ThetaSize.argtypes = [InstancePtrType]
self._PSF_ThetaFormat = lib.PSF_ThetaFormat
self._PSF_ThetaFormat.restype = ct.c_char_p
self._PSF_ThetaFormat.argtypes = [InstancePtrType]
self._PSF_NumConstants = lib.PSF_NumConstants
self._PSF_NumConstants.restype = ct.c_int32
self._PSF_NumConstants.argtypes = [InstancePtrType]
self._PSF_NumDiag = lib.PSF_NumDiag
self._PSF_NumDiag.restype = ct.c_int32
self._PSF_NumDiag.argtypes = [InstancePtrType]
self._PSF_SampleSize = lib.PSF_SampleSize
self._PSF_SampleSize.argtypes = [InstancePtrType, ct.c_int32]
self._PSF_SampleSize.restype = ct.c_int32
self._PSF_SampleIndexDims = lib.PSF_SampleIndexDims
self._PSF_SampleIndexDims.restype = ct.c_int32
self._PSF_SampleIndexDims.argtypes = [InstancePtrType]
self._PSF_SampleCount = lib.PSF_SampleCount
self._PSF_SampleCount.restype = ct.c_int32
self._PSF_SampleCount.argtypes = [InstancePtrType]
# CDLL_EXPORT void PSF_ComputeExpectedValue(PSF* psf, int numspots, const float* theta, const float* constants, const int* spotpos, float* ev);
self._PSF_ComputeExpectedValue = lib.PSF_ComputeExpectedValue
self._PSF_ComputeExpectedValue.argtypes = [
InstancePtrType,
ct.c_int32,
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), #theta
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # constants
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # spotpos
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # ev
]
# CDLL_EXPORT void PSF_ComputeFisherMatrix(PSF* psf, int numspots, const float* theta, const float* constants,const int* spotpos, float* fi);
self._PSF_ComputeFisherMatrix = lib.PSF_ComputeFisherMatrix
self._PSF_ComputeFisherMatrix.argtypes = [
InstancePtrType,
ct.c_int32,
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"),
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"),
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # spotpos
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), #fi
]
# CDLL_EXPORT void PSF_ComputeDerivatives(PSF* psf, int numspots, const float* theta, const float* constants, const int* spotpos, float* derivatives, float* ev);
self._PSF_ComputeDerivatives = lib.PSF_ComputeDerivatives
self._PSF_ComputeDerivatives.argtypes = [
InstancePtrType,
ct.c_int32,
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"),
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"),
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # spotpos
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"),
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"),
]
self.fitInfoDType = np.dtype([('likelihood', '<f4'), ('iterations', '<i4')])
# CDLL_EXPORT void PSF_ComputeMLE(PSF* psf, int numspots, const float* sample, const float* constants,const int* spotpos,
# const float* initial, float* theta, int* iterations, int maxiterations, float levmarAlpha, float* trace, int traceBufLen);
self._PSF_ComputeMLE = lib.PSF_ComputeMLE
self._PSF_ComputeMLE.argtypes = [
InstancePtrType,
ct.c_int32,
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # sample
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # constants
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # spotpos
NullableFloatArrayType, # initial
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # theta
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # diagnostics
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # iterations
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # tracebuf [theta*numspots*tracebuflen]
ct.c_int32,
]
self.thetasize = self._PSF_ThetaSize(self.inst)
self.numconst = self._PSF_NumConstants(self.inst)
self.samplecount = self._PSF_SampleCount(self.inst)
self.indexdims = self._PSF_SampleIndexDims(self.inst)
self.samplesize = [0]*self.indexdims
for i in range(self.indexdims):
self.samplesize[i] = self._PSF_SampleSize(self.inst, i)
self.numdiag = self._PSF_NumDiag(self.inst)
self.fmt = self.ThetaFormat()
self.colnames = self.fmt.split(',')
#print(f"Created PSF Parameters: {self.fmt}, #const={self.numconst}. #diag={self.numdiag} samplesize={self.samplesize}." )
def ThetaColNames(self):
return self.colnames
def _checkparams(self, theta, constants, roipos):
theta = np.array(theta)
if len(theta.shape) == 1:
theta = [theta]
theta = np.ascontiguousarray(theta, dtype=np.float32)
numspots = len(theta)
if constants is None:
constants = np.zeros((numspots, self.numconst), dtype=np.float32)
else:
constants = np.ascontiguousarray(constants, dtype=np.float32)
assert(np.array_equal(constants.shape, [numspots, self.numconst]))
if roipos is None:
roipos = np.zeros((numspots, self.indexdims), dtype=np.int32)
else:
roipos = np.ascontiguousarray(roipos, dtype=np.int32)
assert(np.array_equal( roipos.shape, [numspots, self.indexdims]))
if theta.shape[1] != self.thetasize:
print(f"{self.fmt} expected, {theta.shape[1]} parameters given")
assert(theta.shape[1]==self.thetasize)
return theta,constants,roipos
def ExpectedValue(self, theta, roipos=None, constants=None):
theta,constants,roipos=self._checkparams(theta,constants,roipos)
ev = np.zeros((len(theta), *self.samplesize), dtype=np.float32)
self._PSF_ComputeExpectedValue(self.inst, len(theta), theta, constants, roipos, ev)
return ev
def GenerateSample(self, theta, roipos=None, constants=None):
ev = self.ExpectedValue(theta, roipos, constants)
return
|
np.random.poisson(ev)
|
numpy.random.poisson
|
import numpy as np
from .Track import Track
# Track descriptor information:
# 0 - straight, 1 - corner
# 0 - left, 1 - right
# [type,length/sweep,radius,direction]
segments = np.array(
[
[0, 150, 0, -1],
[1, np.pi / 2, 50, 0],
[0, 100, 0, -1],
[1, np.pi / 2, 90, 0],
[0, 300, 0, -1],
[1, np.pi / 2, 50, 0],
[0, 100, 0, -1],
[1, np.pi / 2, 90, 0],
[0, 155, 0, -1],
]
)
ovaltrack = Track(segments)
segments = np.array(
[
[0, 550, 0, -1],
[1, 2.7, 12, 1],
[0, 300, 0, -1],
[1, 0.2, 160, 1],
[0, 340, 0, -1],
[1, 0.3, 120, 0],
[0, 40, 0, -1],
[1, 0.8, 120, 1],
[0, 40, 0, -1],
[1, 0.4, 120, 0],
[0, 260, 0, -1],
[1, 0.4, 160, 1],
[0, 750, 0, -1],
[1, 1.2, 30, 1],
[0, 30, 0, -1],
[1, 1.1, 40, 0],
[0, 80, 0, -1],
[1, 1.4, 55, 1],
[0, 310, 0, -1],
[1, 3.14, 55, 1],
[0, 140, 0, -1],
[1, 1.45, 50, 0],
[0, 440, 0, -1],
[1, 1.4, 70, 0],
[0, 60, 0, -1],
[1, 1.2, 130, 0],
[0, 340, 0, -1],
[1, 1.45, 66, 1],
[0, 60, 0, -1],
[1, 1.35, 66, 0],
[0, 200, 0, -1],
[1, 1.57, 50, 1],
[0, 140, 0, -1],
[1, 1.6, 140, 1],
[0, 200, 0, -1],
[1, 0.7, 150, 1],
[0, 270, 0, -1],
[1, 0.5, 160, 0],
[0, 220, 0, -1],
[1, 1.1, 140, 0],
[0, 481.920, 0, -1],
[1, 1.90, 10, 1],
[0, 23.9815, 0, -1],
[1, 1.94, 17, 0],
]
)
Spa = Track(segments)
segments = np.array(
[
[0, 927, 0, -1],
[1, 1.48, 40, 1],
[0, 32, 0, -1],
[1, 1.04, 50, 0],
[0, 87, 0, -1],
[1, 1.10, 104, 1],
[0, 30, 0, -1],
[1, 1.60, 150, 1],
[0, 263, 0, -1],
[1, 1.48, 44, 1],
[0, 10, 0, -1],
[1, 1.56, 100, 1],
[0, 177, 0, -1],
[1, 2.54, 42, 0],
[0, 130, 0, -1],
[1, 0.55, 150, 0],
[0, 115, 0, -1],
[1, 1.48, 52, 0],
[0, 45, 0, -1],
[1, 0.4, 100, 1],
[0, 181, 0, -1],
[1, 1.72, 85, 1],
[0, 505, 0, -1],
[1, 2.60, 34, 0],
[0, 100, 0, -1],
[1, 0.8, 30, 0],
[0, 10, 0, -1],
[1, 3.20, 60, 1],
[0, 135, 0, -1],
[1, 1.40, 30, 1],
[0, 100, 0, -1],
[1, 1.65, 25, 0],
[0, 10, 0, -1],
[1, 1.50, 33, 1],
[0, 74.84, 0, -1],
[1, 1.503, 90, 1],
[0, 142.0, 0, -1],
]
)
Barcelona = Track(segments)
segments = np.array(
[
[0, 483.85, 0, -1],
[1, 1.50, 20, 1],
[0, 367, 0, -1],
[1, 2.3, 92, 0],
[0, 40, 0, -1],
[1, 1.75, 30, 1],
[0, 161, 0, -1],
[1, 2.4, 25, 1],
[0, 10, 0, -1],
[1, 0.4, 30, 0],
[0, 60, 0, -1],
[1, 3.2, 11, 0],
[0, 55, 0, -1],
[1, 1.6, 17, 1],
[0, 52, 0, -1],
[1, 1.9, 17, 1],
[0, 180, 0, -1],
[1, 1.05, 160, 1],
[0, 100, 0, -1],
[1, 0.4, 150, 1],
[0, 100, 0, -1],
[1, 1.54, 12, 0],
[0, 1, 0, -1],
[1, 1.54, 12, 1],
[0, 20, 0, -1],
[1, 0.9, 30, 1],
[0, 1, 0, -1],
[1, 0.9, 40, 0],
[0, 140, 0, -1],
[1, 1.5, 40, 0],
[0, 111, 0, -1],
[1, 1.0, 40, 0],
[0, 1, 0, -1],
[1, 0.90, 40, 1],
[0, 125, 0, -1],
[1, 1.4, 20, 1],
[0, 1, 0, -1],
[1, 1.5, 30, 0],
[0, 92, 0, -1],
[1, 0.7, 70, 0],
[0, 44, 0, -1],
[1, 2.6, 18, 1],
[0, 40, 0, -1],
[1, 1.4, 30, 1],
[0, 5, 0, -1],
[1, 0.4, 20, 0],
[0, 51.6, 0, -1],
[1, 0.4, 80, 1],
]
)
Monaco = Track(segments)
segments = np.array(
[
[0, 1042, 0, -1],
[1, 1.54, 16, 1],
[0, 15, 0, -1],
[1, 1.80, 19, 0],
[0, 101, 0, -1],
[1, 0.26, 140, 1],
[0, 102, 0, -1],
[1, 1.40, 200, 1],
[0, 400, 0, -1],
[1, 1.30, 15, 0],
[0, 19, 0, -1],
[1, 1.20, 15, 1],
[0, 280, 0, -1],
[1, 1.70, 67, 1],
[0, 226, 0, -1],
[1, 1.30, 40, 1],
[0, 285, 0, -1],
[1, 0.25, 160, 0],
[0, 441, 0, -1],
[1, 1.1, 28, 0],
[0, 40, 0, -1],
[1, 0.99, 86, 1],
[0, 20, 0, -1],
[1, 0.8, 35, 0],
[0, 921.55, 0, -1],
[1, 1.56, 70, 1],
[0, 0.13, 0, -1],
[1, 1.58, 200, 1],
]
)
Monza = Track(segments)
segments = np.array([[0, 200, 0, -1], [1, np.pi / 2, 20, 1], [0, 200, 0, -1]])
singleturn = Track(segments)
segments = np.array(
[
[0, 60, 0, -1],
[1, np.pi / 2, 30, 1],
[0, 60, 0, -1],
[1, np.pi / 2, 100, 0],
[0, 60, 0, -1],
]
)
doubleturn = Track(segments)
segments = np.array([[0, 120, 0, -1], [1, np.pi, 30, 1], [0, 120, 0, -1]])
uturn = Track(segments)
segments = np.array(
[
[0, 150, 0, -1],
[1, np.pi / 4, 50, 0],
[0, 10, 0, -1],
[1, np.pi / 4, 50, 1],
[0, 50, 0, -1],
[1, np.pi, 25, 0],
[0, 100, 0, -1],
]
)
multiturn = Track(segments)
segments = np.array([[0, 100, 0, -1]])
straight = Track(segments)
segments = np.array(
[
[0, 450, 0, -1],
[1, np.deg2rad(133), 17, 1],
[0, 67, 0, -1],
[1, np.deg2rad(64), 21, 0],
[0, 80, 0, -1],
[1, np.deg2rad(28.6), 90, 1],
[0, 530, 0, -1],
[1, np.deg2rad(117), 35, 1],
[0, 1, 0, -1],
[1, np.deg2rad(25), 150, 1],
[0, 1, 0, -1],
[1, np.deg2rad(16), 100, 0],
[0, 120, 0, -1],
[1, np.deg2rad(30), 100, 0],
[0, 20, 0, -1],
[1, np.deg2rad(82.5), 58, 1],
[0, 55, 0, -1],
[1, np.deg2rad(60), 60, 0],
[0, 198, 0, -1],
[1, np.deg2rad(150), 20, 1],
[0, 300, 0, -1],
[1, np.deg2rad(60), 95, 0],
[0, 28, 0, -1],
[1, np.deg2rad(121), 15, 0],
[0, 120, 0, -1],
[1, np.deg2rad(5.35), 120, 0],
[0, 553, 0, -1],
[1, np.deg2rad(80), 35, 0],
[0, 1, 0, -1],
[1, np.deg2rad(83), 135, 0],
[0, 60, 0, -1],
[1, np.deg2rad(98), 125, 1],
[0, 107, 0, -1],
[1, np.deg2rad(30), 70, 1],
[0, 20, 0, -1],
[1, np.deg2rad(93.5), 40, 1],
[0, 735, 0, -1],
[1, np.deg2rad(100), 30, 1],
[0, 47.3, 0, -1],
[1,
|
np.deg2rad(21.5)
|
numpy.deg2rad
|
import config
from utils import SumTree
import numpy as np
import random
import torch
TD_INIT = config.td_init
EPSILON = config.epsilon
ALPHA = config.alpha
class Replay_buffer:
'''
basic replay buffer
'''
def __init__(self, capacity = int(1e6), batch_size = None):
self.capacity = capacity
self.memory = [None for _ in range(capacity)] #list to save tuples
self.ind_max = 0 #how many transitions have been stored
def remember(self, state, action, reward, next_state, done):
ind = self.ind_max % self.capacity
self.memory[ind] = (state, action, reward, next_state, done)
self.ind_max += 1
def sample(self, k):
'''
return sampled transitions. make sure there are at least k transitions
'''
index_set = random.sample(list(range(len(self))), k)
states = torch.from_numpy(
|
np.vstack([self.memory[ind][0] for ind in index_set])
|
numpy.vstack
|
# coding=utf-8
"""
Module to apply a previously trained model to estimate the epigenome
for a specific cell type in a different species
"""
import os as os
import pandas as pd
import numpy as np
import numpy.random as rng
import operator as op
import multiprocessing as mp
import json as json
import pickle as pck
from scipy.interpolate import LSQUnivariateSpline as kspline
from crplib.auxiliary.seq_parsers import get_twobit_seq
from crplib.auxiliary.hdf_ops import load_masked_sigtrack, get_valid_hdf5_groups, get_mapindex_groups
from crplib.auxiliary.file_ops import create_filepath
from crplib.auxiliary.modeling import select_dataset_subset, load_model, \
load_model_metadata, get_scorer, load_ml_dataset, determine_scoring_method, apply_preprocessor
from crplib.mlfeat.featdef import feat_mapsig, get_online_version
from crplib.auxiliary.constants import CHROMOSOME_BOUNDARY
from crplib.metadata.md_signal import MD_SIGNAL_COLDEFS
from crplib.metadata.md_signal import gen_obj_and_md as gen_sigobj
from crplib.metadata.md_regions import MD_REGION_COLDEFS
from crplib.metadata.md_regions import gen_obj_and_md as genregobj
def load_dataset(fpath, groups, features, subset='', ycol='', ytype=None):
"""
:param fpath:
:param groups:
:param features:
:param subset:
:param ycol:
:param ytype:
:return:
"""
with pd.HDFStore(fpath, 'r') as hdf:
if isinstance(groups, (list, tuple)):
dataset = pd.concat([hdf[grp] for grp in sorted(groups)], ignore_index=True)
else:
dataset = hdf[groups]
dataset = select_dataset_subset(dataset, subset)
y_depvar = None
if ytype is not None:
y_depvar = dataset.loc[:, ycol].astype(ytype, copy=True)
name_col = [cn for cn in dataset.columns if cn in ['name', 'source']]
sample_names = []
if name_col:
sample_names = dataset.loc[:, name_col[0]].tolist()
predictors = dataset.loc[:, features]
return predictors, sample_names, y_depvar
def run_permutation_test(data, output, model, numperm, scorer, extra_scorer=''):
"""
:param data:
:param output:
:param model:
:param numperm:
:param scorer:
:return:
"""
perm_scores = []
extra_scores = []
if extra_scorer:
extra_name = extra_scorer
extra_scorer = get_scorer(extra_scorer)
for _ in range(numperm):
perm_out = rng.permutation(output)
perm_scores.append(scorer(model, data, perm_out))
if extra_scorer:
extra_scores.append(extra_scorer(model, data, perm_out))
# the float here for later JSONification
assert len(perm_scores) == numperm, \
'Permutation failed, generated only {} permutation scores, needed {}'.format(len(perm_scores), numperm)
perm_stats = {'perm_mean': float(np.mean(perm_scores)),
'perm_median': float(np.median(perm_scores)),
'perm_min': float(np.min(perm_scores)),
'perm_std': float(np.std(perm_scores)),
'perm_var': float(np.var(perm_scores)),
'perm_max': float(np.max(perm_scores)),
'perm_95pct': float(np.percentile(perm_scores, 95)),
'perm_5pct': float(np.percentile(perm_scores, 5))}
if extra_scorer:
extra_stats = {'perm_mean': float(np.mean(extra_scores)),
'perm_median': float(np.median(extra_scores)),
'perm_min': float(np.min(extra_scores)),
'perm_std': float(np.std(extra_scores)),
'perm_var': float(np.var(extra_scores)),
'perm_max': float(np.max(extra_scores)),
'perm_95pct': float(np.percentile(extra_scores, 95)),
'perm_5pct': float(np.percentile(extra_scores, 5))}
perm_stats['perm_extra_{}'.format(extra_name)] = extra_stats
return perm_stats
############################
def smooth_signal_estimate(signal, res):
"""
:param signal:
:param res:
:return:
"""
# for default resolution of 25, spans roughly one nucleosome
window = res * 10
for pos in range(CHROMOSOME_BOUNDARY, len(signal), window):
try:
smoother = kspline(range(pos, pos+window), # x-axis
signal[pos:pos+window], # y-axis
t=[j+res for j in range(pos, pos + window - res, res)], # knots
k=3) # degree of polynomial, cubic is default
signal[pos:pos+window] = smoother(range(pos, pos+window))
except Exception as err:
if pos + window > len(signal):
break
else:
raise err
signal = np.clip(signal, 0, signal.max())
return signal
def make_signal_estimate(params):
"""
:param params:
:return:
"""
chrom = params['chrom']
model = pck.load(open(params['modelfile'], 'rb'))
seq = get_twobit_seq(params['seqfile'], params['chrom'])
chromlen = len(seq)
res = params['resolution']
index_groups = get_mapindex_groups(params['targetindex'], '')
with pd.HDFStore(params['targetindex'], 'r') as idx:
mask = idx[index_groups[chrom]['mask']]
map_sig = load_masked_sigtrack(params['inputfile'], '',
params['inputgroup'], chrom, chromlen, mask=mask)
mapfeat = feat_mapsig
est_sig = np.zeros(chromlen, dtype=np.float64)
lolim = CHROMOSOME_BOUNDARY
hilim = int(chromlen // res * res)
comp_seqfeat = get_online_version(params['features'], params['kmers'])
get_values = op.itemgetter(*tuple(params['feature_order']))
chunks = []
positions = []
for pos in range(lolim, hilim, res):
chunk = {'seq': seq[pos:pos+res]}
positions.append(pos)
chunk.update(mapfeat(map_sig[pos:pos+res]))
chunk = comp_seqfeat(chunk)
chunks.append(get_values(chunk))
if len(chunks) >= 10000:
y_hat = model.predict(np.array(chunks))
for idx, val in zip(positions, y_hat):
est_sig[idx:idx+res] = val
chunks = []
positions = []
if len(chunks) > 0:
y_hat = model.predict(np.array(chunks))
for idx, val in zip(positions, y_hat):
est_sig[idx:idx+res] = val
if not params['nosmooth']:
est_sig = smooth_signal_estimate(est_sig, res)
return chrom, est_sig
def assemble_params_estsig(args):
"""
:param args:
:return:
"""
all_groups = get_valid_hdf5_groups(args.inputfile, args.inputgroup)
model_md = load_model_metadata(args)
commons = {'modelfile': args.modelfile, 'resolution': int(model_md['resolution']),
'seqfile': args.seqfile, 'targetindex': args.targetindex, 'inputfile': args.inputfile,
'inputgroup': args.inputgroup, 'features': model_md['features'],
'kmers': model_md['kmers'], 'feature_order': model_md['feature_order'],
'nosmooth': args.nosmooth}
arglist = []
for g in all_groups:
chrom = g.rsplit('/', 1)[1]
tmp = dict(commons)
tmp['chrom'] = chrom
arglist.append(tmp)
return arglist
def run_estimate_signal(logger, args):
"""
:param logger:
:param args:
:return:
"""
logger.debug('Assembling worker parameters')
arglist = assemble_params_estsig(args)
logger.debug('Created parameter list of size {}'.format(len(arglist)))
with pd.HDFStore(args.outputfile, 'w', complevel=9, complib='blosc', encoding='utf-8') as hdfout:
metadata = pd.DataFrame(columns=MD_SIGNAL_COLDEFS)
with mp.Pool(args.workers) as pool:
logger.debug('Start processing...')
resit = pool.imap_unordered(make_signal_estimate, arglist, chunksize=1)
for chrom, valobj in resit:
logger.debug('Processed chromosome {}'.format(chrom))
group, valobj, metadata = gen_sigobj(metadata, args.outputgroup, chrom, args.inputfile, valobj)
hdfout.put(group, valobj, format='fixed')
hdfout.flush()
logger.debug('Estimated signal data saved')
logger.debug('Saving metadata')
hdfout.put('metadata', metadata)
hdfout.flush()
logger.debug('All chromosomes processed')
return 0
def build_output_dataframe(dset, pred, probs, classes, merge, reduce):
"""
:param dset:
:param pred:
:param probs:
:param classes:
:param merge:
:return:
"""
dset = dset.assign(class_pred=pred)
class_cols = ['class_prob_' + cls for cls in map(str, list(map(int, classes)))]
dset = pd.concat([dset, pd.DataFrame(probs, columns=class_cols)], axis='columns')
if reduce:
dset = dset.loc[dset.class_pred.isin(reduce), :]
if merge:
assert len(reduce) == 1, 'Merging overlapping regions in dataset w/o reducing to single class not supported'
dset.drop([col for col in dset.columns if col.startswith('ft')], axis='columns', inplace=True)
dset.sort_values(by=['start', 'end'], axis='index', ascending=True, inplace=True)
dset.index = np.arange(dset.shape[0])
# TODO
# in spare time, find out if there is a more
# native way in Pandas to merge overlapping intervals...
new_rows = []
cur_start = dset.loc[0, 'start']
cur_end = dset.loc[0, 'end']
cur_probs = []
cur_names = set()
get_name = 'name' if 'name' in dset.columns else 'source'
assert get_name in dset.columns, 'No naming column exists for regions'
class_prob = 'class_prob_' + str(reduce[0])
for row in dset.itertuples(index=False):
if row.start <= cur_end:
cur_end = row.end
cur_probs.append(row.__getattribute__(class_prob))
cur_names.add(row.__getattribute__(get_name))
else:
regname = cur_names.pop() # return any or unique
new_rows.append([cur_start, cur_end, regname, np.average(cur_probs)])
cur_names = set()
cur_probs = []
cur_start = row.start
cur_end = row.end
cur_probs.append(row.__getattribute__(class_prob))
cur_names.add(row.__getattribute__(get_name))
new_cols = ['start', 'end', 'name', 'class_prob']
dset = pd.DataFrame(new_rows, columns=new_cols)
return dset
def model_scan_regions(params):
"""
:param params:
:return:
"""
model_md = json.load(open(params['modelmetadata'], 'r'))
model = pck.load(open(params['modelfile'], 'rb'))
feat_order = model_md['feature_order']
featdata, _, _ = load_region_data(params['inputfile'], params['inputgroup'], feat_order, params['classlabels'])
y_pred = model.predict(featdata)
y_prob = model.predict_proba(featdata)
class_order = model.classes_
with pd.HDFStore(params['inputfile'], 'r') as hdf:
full_dataset = hdf[params['inputgroup']]
df = build_output_dataframe(full_dataset, y_pred, y_prob, class_order, params['merge'], params['reduce'])
return params['chrom'], df
def assemble_params_scnreg(args, logger):
"""
:return:
"""
all_groups = get_valid_hdf5_groups(args.inputfile, args.inputgroup)
logger.debug('Identified {} valid groups in input file'.format(len(all_groups)))
merge_regions = False
with pd.HDFStore(args.inputfile, 'r') as hdf:
md = hdf['metadata']
res = md.loc[0, 'resolution']
if res > 0:
merge_regions = True
logger.debug('Detected - merge regions: {}'.format(merge_regions))
if not args.modelmetadata:
fpath_md = args.modelfile.rsplit('.', 1)[0] + '.json'
else:
fpath_md = args.modelmetadata
commons = vars(args)
del commons['module_logger']
del commons['execute']
commons['modelmetadata'] = fpath_md
arglist = []
for g in all_groups:
tmp = dict(commons)
tmp['inputgroup'] = g
_, tmp['chrom'] = os.path.split(g)
tmp['merge'] = merge_regions
arglist.append(tmp)
logger.debug('Build argument list of size {} to process'.format(len(arglist)))
return arglist
def run_scan_regions(logger, args):
"""
:param logger:
:param args:
:return:
"""
arglist = assemble_params_scnreg(args, logger)
with pd.HDFStore(args.outputfile, 'w') as hdf:
metadata = pd.DataFrame(columns=MD_REGION_COLDEFS)
with mp.Pool(args.workers) as pool:
resit = pool.imap_unordered(model_scan_regions, arglist, chunksize=1)
for chrom, dataobj in resit:
logger.debug('Received data for chromosome {}'.format(chrom))
grp, dataobj, metadata = genregobj(metadata, args.outputgroup, chrom, [args.inputfile, args.modelfile], dataobj)
hdf.put(grp, dataobj, format='fixed')
hdf.flush()
logger.debug('Flushed data to file')
hdf.put('metadata', metadata, format='table')
return 0
def run_regression_testdata(args, model, model_md, dataset, y_true, logger):
"""
:param args:
:param model:
:param model_md:
:param dataset:
:param y_true:
:return:
"""
logger.debug('Making prediction for test dataset')
scoring_method = determine_scoring_method(args, model_md, logger)
scorer = get_scorer(scoring_method)
y_pred = model.predict(dataset)
model_perf = scorer(model, dataset, y_true)
out_metadata = {}
if args.numperm > 0:
logger.debug('Running permutation test')
perf_score, permscores, permstats, permparams = run_permutation_test(dataset, y_true, model, args.cvperm,
args.numperm, args.workers, scorer)
if not
|
np.isclose(model_perf, perf_score, rtol=1e-05, atol=1e-05)
|
numpy.isclose
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
import numpy as np
import numpy.linalg as LA
import scipy.stats as scistats
import matplotlib.pyplot as plt
import sklearn.gaussian_process as skgp
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel, RBF
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
def _hermite_moment_nonlinear(u,mu,sigma,mu3,mu4,fisher=True):
"""
Hermite moment models for nonlinear responses
"""
if fisher:
mu4 = mu4 + 3
if mu4 > 3 :
h3 = mu3/6
h4 = (mu4-3)/24
c4 = (np.sqrt(1+36*h4)-1)/18
c3 = h3/(1+6*c4)
k = 1/(np.sqrt(1+2*c3**2+6*c4**2))
x = mu + k*sigma*(u+c3*(u**2-1) + c4*(u**3-3*u))
elif mu4 < 3:
h4 = (mu4-3) / 24
h3 = mu3 / 6
a = h3/(3*h4)
b = -1/(3*h4)
k = (b - 1 - a**2)**3
c = 1.5*b*(a+u) - a**3
x = mu + sigma * ((np.sqrt(c**2+k)+c)**1/3 - (np.sqrt(c**2+k)-c)**1/3 - a)
else:
x = mu + sigma * u
return x
def _mean_up_crossing_rate(data, dt=1):
"""
Calculate the mean upcrossing rate for data of shape [m,ndofs]
"""
data_shape = data.shape
data_mean = np.mean(data,axis=0)
data = data - data_mean
data1 = data[:,0:-1]
data2 = data[:,1:]
cross_points = data1 * data2
up_points = data2 - data1
mucr = []
for i in np.arange(data.shape[0]):
candidatex = np.arange(data1.shape[1])
condition1 = cross_points[i,:] < 0
condition2 = up_points[i,:] > 0
condition = condition1 & condition2
up_cross_idx = candidatex[np.where(condition)]
T = (up_cross_idx[-1] - up_cross_idx[0]) * dt ## Only counting complete circles
mucr.append((len(up_cross_idx)-1)/T)
return np.array(mucr).reshape(-1,1)
np.set_printoptions(precision=3)
filename = 'GQ/GQ9_'
datax = []
datay = []
time_ramp = 50
for i in np.arange(10):
_datax = np.load(filename+'x{:d}.npy'.format(i))
_datay = np.load(filename+'y{:d}.npy'.format(i))
datax.append(np.squeeze(_datax[0])) ## quadrature, (coords, weights)
dt = _datay[0,0,1,0] - _datay[0,0,0,0]
time_ramp_idx = int(time_ramp/dt)
_datay = _datay[:,:,time_ramp_idx:,:]
y_max = np.max(_datay[:,:,:,1], axis=2)
y_std = np.std(_datay[:,:,:,1], axis=2)
y_mu3 = scistats.skew(_datay[:,:,:,1], axis=2)
y_mu4 = scistats.kurtosis(_datay[:,:,:,1], axis=2)
y_mucr=_mean_up_crossing_rate(np.squeeze(_datay[:,:,:,1]), dt=dt)
print('Skewness:{} '.format(y_mu3.T))
print('Kurtosis:{} '.format(y_mu4.T))
# print(_datay[0,0,1,0] , _datay[0,0,0,0])
# datay.append([np.squeeze(y_max), np.squeeze(y_std), np.squeeze(y_mu3), np.squeeze(y_mu4)])
datay.append([np.squeeze(y_max), np.squeeze(y_std), np.squeeze(y_mu3), np.squeeze(y_mu4), np.squeeze(y_mucr)])
datax = np.array(datax)
datay = np.array(datay)
print('*******')
print(np.array(datax).shape, np.array(datay).shape)
# filename = 'MC2500'
# datax = np.load('MC2500x.npy')
# datay = np.load('MC2500y.npy')
# X = datax.T
# y = np.max(datay[:,:,:,1],axis=2)
# print(X.shape,y.shape)
# plt.plot(datax.T,datay.T,'-.o')
# plt.xlabel(r'Spectrum parameter $c$')
# plt.ylabel(r'Max$(Y_t)$')
# plt.savefig('Max_vs_c_observation.eps')
# X = datax.reshape((datax.size,1 ))
# y = datay.reshape((datay.size,1 ))
X = np.mean(datax,axis=0).reshape(datax.shape[1],-1)
y_max = np.squeeze(datay[:,0,:])
# print(y_max)
y_max_mean= np.mean(y_max, axis=0)
# print(y_max_mean)
y_max = y_max - y_max_mean
# print(y_max)
y_max_std =
|
np.std(y_max, axis=0,ddof=1 )
|
numpy.std
|
import numpy as np
import numpy.linalg as lg
class atom:
def __init__(self,name,pos):
self.type=name
self.pos=pos
self.rank=0
self.q=0
self.d=np.zeros(3)
self.quad=np.zeros(5)
self.pol=np.zeros([3,3])
def setmultipoles(self,q,d,quad,pol):
self.q=q
self.d=d
self.quad=quad
self.detrank()
self.pol=pol
def CartesianQuadrupoles(self):
cartesian=np.zeros([3,3])
sqrt3=np.sqrt(3)
cartesian[0][0] = 0.5 * (sqrt3 * self.quad[3] - self.quad[0])
cartesian[1][1] = -0.5 * (sqrt3 * self.quad[3] + self.quad[0]);
cartesian[2][2] = self.quad[0];
cartesian[0][1] = 0.5 * sqrt3 * self.quad[4];
cartesian[1][0] = cartesian[0][1];
cartesian[0][2] = 0.5 * sqrt3 * self.quad[1];
cartesian[2][0] = cartesian[0][2];
cartesian[1][2] = 0.5 * sqrt3 * self.quad[2];
cartesian[2][1] = cartesian[1][2];
return cartesian
def detrank(self):
rank=0
if any(self.d!=0):
rank=1
if any(self.quad!=0):
rank=2
self.rank=rank
def shift(self,shift):
#print self.pos
#print shift
self.pos+=shift
#print self.pos
def xyzline(self):
return "{:<3s} {:.6f} {:.6f} {:.6f}\n".format(self.type,self.pos[0],self.pos[1],self.pos[2])
def splitup(self,spacing=0.01):
multipolelist=[]
m=atom(self.type,self.pos)
m.setmultipoles(self.q,np.zeros(3),np.zeros(5),self.pol)
multipolelist.append(m)
if self.rank>0 and any(self.d!=0):
cartesian=np.array([self.d[1],self.d[2],self.d[0]])
norm=cartesian/np.linalg.norm(self.d)
posA=self.pos+0.5*spacing*norm
posB=self.pos-0.5*spacing*norm
qA=np.linalg.norm(self.d)/spacing
if np.absolute(qA) >1.e-9:
qB=-qA
d1=atom("D",posA);
d1.setmultipoles(qA,np.zeros(3),np.zeros(5),np.zeros([3,3]))
d2 = atom("D", posB);
d2.setmultipoles(qB, np.zeros(3), np.zeros(5), np.zeros([3, 3]))
multipolelist.append(d1)
multipolelist.append(d2)
if self.rank>1:
cartesian=self.CartesianQuadrupoles()
eigenval,eigenvec=np.linalg.eigh(cartesian)
fullcharge=0
for val,vec in zip(eigenval,eigenvec.T):
q=1/3.0*val/(spacing**2)
if np.absolute(q)<1.e-9:
continue
vecA=self.pos+spacing*vec
vecB = self.pos-spacing * vec
q1 = atom("Q", vecA);
q1.setmultipoles(q, np.zeros(3), np.zeros(5), np.zeros([3, 3]))
q2 = atom("Q", vecB);
q2.setmultipoles(q, np.zeros(3),
|
np.zeros(5)
|
numpy.zeros
|
import numpy as np
from numpy.testing import assert_array_equal, assert_raises
from nilabels.tools.aux_methods.morpological_operations import get_morphological_patch, get_morphological_mask, \
get_values_below_patch, get_circle_shell_for_given_radius
# TEST aux_methods.morphological.py
def test_get_morpological_patch():
expected = np.ones([3, 3]).astype(np.bool)
expected[0, 0] = False
expected[0, 2] = False
expected[2, 0] = False
expected[2, 2] = False
assert_array_equal(get_morphological_patch(2, 'circle'), expected)
assert_array_equal(get_morphological_patch(2, 'square'),
|
np.ones([3, 3])
|
numpy.ones
|
"""
Collection of environment classes that are based on rai-python
"""
import sys
import os
import time
import tqdm
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.getenv("HOME") + '/git/rai-python/rai/rai/ry')
if os.getenv("HOME") + '/git/rai-python/rai/rai/ry' in sys.path:
import libry as ry
class DiskEnv():
"""
Wrapper class for the disk-on-a-table environment,
simulated using PhysX
"""
def __init__(
self,
action_duration=0.5,
action_length=0.1,
break_pos_thres=0.03,
floor_level=0.65,
finger_relative_level=0.14,
tau=.01,
safety_distance=0.1,
spherically_symmetric_neighbours=False,
file=None,
display=False
):
self.action_duration = action_duration
self.action_length = action_length
self.break_pos_thres = break_pos_thres
self.floor_level = floor_level
self.finger_relative_level = finger_relative_level
self.tau = tau
self.safety_distance = safety_distance
self.spherically_symmetric_neighbours = spherically_symmetric_neighbours
self.n_steps = int(self.action_duration/self.tau)
self.proportion_per_step = 1/self.n_steps
self.config = ry.Config()
if file is not None:
self.config.addFile(file)
else:
self.config.addFile(os.getenv("HOME") +
'/git/ryenv/ryenv/z.push_default.g')
self.config.makeObjectsFree(['finger'])
self.config.setJointState([0.3, 0.3, 0.15, 1., 0., 0., 0.])
self.finger_radius = self.config.frame('finger').info()['size'][0]
self.simulation = self.config.simulation(
ry.SimulatorEngine.physx, display)
self.reset_disk()
self.disk_dimensions = [0.2, 0.25]
self.reset([0.3, 0.3])
def view(self):
"""
Create view of current configuration
"""
return self.config.view()
def add_and_show_target(self, target_state):
"""
Add target state and visualize it in view
"""
target = self.config.addFrame(name="target")
target.setShape(ry.ST.cylinder, size=self.disk_dimensions)
target.setColor([1, 1, 0, 0.4])
self.set_frame_state(
target_state,
"target"
)
def get_disk_state(self):
"""
Get the current position of the disk
"""
return np.array(self.config.frame('box').getPosition()[:2])
def reset_disk(self, coords=(0, 0)):
"""
Reset the disk to an arbitrary position
"""
# always reset box to the center
self.set_frame_state(
coords,
'box'
)
state_now = self.config.getFrameState()
self.simulation.setState(state_now, np.zeros((state_now.shape[0], 6)))
def allowed_state(
self,
finger_position,
disk_position=np.array([0, 0])
):
"""
Return whether a state of the finger is within the allowed area or not
"""
return np.linalg.norm(
finger_position - disk_position
) > self.disk_dimensions[0] + self.safety_distance
def reset(
self,
finger_position,
disk_position=(0, 0)
):
"""
Reset the state (i.e. the finger state) to an arbitrary position
"""
assert self.allowed_state(
finger_position,
disk_position=np.array(disk_position)
)
joint_q = np.array([
*finger_position,
self.finger_relative_level,
1.,
0.,
0.,
0.
])
self.config.setJointState(joint_q)
self.simulation.step(u_control=[], tau=self.tau)
self.reset_disk(coords=disk_position)
def evolve(
self,
n_steps=1000,
fps=None
):
"""
Evolve the simulation for n_steps time steps of length self.tau
"""
for _ in range(n_steps):
self.simulation.step(u_control=[], tau=self.tau)
if fps is not None:
time.sleep(1/fps)
def set_frame_state(
self,
state,
frame_name
):
"""
Set an arbitrary frame of the configuration to
and arbitrary state
"""
self.config.frame(frame_name).setPosition([
*state[:2],
self.floor_level
])
def transition(
self,
action,
fps=None
):
"""
Simulate the system's transition under an action
"""
# gradual pushing movement
joint_q = self.config.getJointState()
for _ in range(self.n_steps):
joint_q[0] += self.proportion_per_step * action[0]
joint_q[1] += self.proportion_per_step * action[1]
self.config.setJointState(joint_q)
self.simulation.step(u_control=[], tau=self.tau)
if fps is not None:
time.sleep(1/fps)
change = np.array(
self.config.frame('box').getPosition()[:2]
)
return change
def get_state(self):
"""
Get the current state, i.e. position of the finger
"""
return self.config.getJointState()[:2]
def get_relative_finger_state(self):
""""
Get the current state (position of the finger) relative to
the position of the disk
"""
disk = self.get_disk_state()
finger = self.get_state()
finger_shifted = finger - disk
return finger_shifted
def sample_random_goals(self, n_goals):
"""
This function samples uniformly from the goal distribution
"""
angle_dir = np.pi*(2*np.random.rand(n_goals)-1)
return np.stack((
np.cos(angle_dir),
np.sin(angle_dir)
), axis=-1)
def calculate_thresholded_change(self, change):
"""Apply threshold to change in order to avoid giving rewards fors numerical noise"""
change_thresholded = change.copy()
if np.linalg.norm(change_thresholded) < self.break_pos_thres:
change_thresholded[0] = 0
change_thresholded[1] = 0
return change_thresholded
def calculate_reward(self, change, goal):
"""calculate reward from intended goal and actual change of disk coordinates"""
change = self.calculate_thresholded_change(change)
direction_changed = not sum(change) == 0
if direction_changed:
direction_cosine = np.sum(change[:2]*goal[:2])/np.linalg.norm(
change[:2]
)/np.linalg.norm(goal[:2])
if direction_cosine > 0.9:
return 1
return -1
return 0
def find_near_neighbours(
self,
states,
goals,
state,
goal,
scale
):
"""
This function does a rapneid pre-choice of possible near neighbours only by
putting constraints on single-coordinate differences on the 5 coordinates
state_x,state_y,goal_dir_x,goal_dir_y,goal_orientation.
This greatly reduces the number of pairs the actual distance has to be
calculated for.
"""
# only consider samples who have a smaller difference than action_length
# in all of their state coordinates...
subset = np.where(
np.abs(
states[:, 0] - state[0]
) < self.action_length * scale
)[0]
subset = subset[
np.abs(
states[subset, 1] - state[1]
) < self.action_length * scale
]
# ...and who have a smaller difference than 0.1
# in both of the goal direction coordinates
subset = subset[
np.abs(
goals[subset, 0] - goal[0]
) < 0.1 * scale
]
subset = subset[
np.abs(
goals[subset, 1] - goal[1]
) < 0.1 * scale
]
if self.spherically_symmetric_neighbours:
# angle-dependent cut-out in goal space
subset = subset[
np.sum(
goals[subset, :] * goal[None, :],
axis=-1
) > np.cos(0.1 * scale)
]
# circular cut-out in state-space
subset = subset[
np.linalg.norm(
states[subset, :] - state[None, :],
axis=-1
) < self.action_length * scale
]
return subset
def get_augmented_targets(self, states, targets):
"""
Create handcrafted targets for the values of some of the states
"""
targets[
np.linalg.norm(
states,
axis=-1
) > 2
] = 0
def visualize_states(self, states, save_name=None):
"""
Helper function to visualize a collection of states
"""
plt.plot(
states[:, 0], states[:, 1], '*'
)
plt.xlim(-1, 1)
plt.ylim(-1, 1)
if save_name is not None:
plt.savefig(save_name + '.png')
plt.show()
def test_controller(
self,
controller,
n_of_n_splits=(0, 1),
n_trial_numbers=20,
rollout_length=50
):
"""
Create data for a circular plot of the performance of
the controller in this environment
"""
direction_angles_all = np.linspace(0, 2*np.pi, 16, endpoint=False)
direction_angles = np.split(
direction_angles_all,
n_of_n_splits[1]
)[n_of_n_splits[0]]
all_rewards = []
for direction_angle in tqdm.tqdm(direction_angles):
goal = np.array([
np.cos(direction_angle),
np.sin(direction_angle)
])
rewards = []
trial_number = 0
while trial_number < n_trial_numbers:
possible_finger_state = 1*(np.random.rand(2)-0.5)
if self.allowed_state(possible_finger_state) and (
sum(goal*possible_finger_state)/np.linalg.norm(
goal)/np.linalg.norm(possible_finger_state) < 0
):
trial_number += 1
self.reset(
possible_finger_state,
disk_position=[0, 0]
)
for __ in range(rollout_length):
action = controller.get_action(
self.get_state(), goal
)
if any(np.isnan(action)):
raise Exception('action is nan')
change = self.transition(action)
if np.sum(np.abs(
self.calculate_thresholded_change(change)
)) != 0:
break
if np.sum(np.abs(
self.calculate_thresholded_change(change)
)) == 0:
reward = -10
else:
reward = np.sum(np.array(change)*np.array(
goal))/np.linalg.norm(change)/np.linalg.norm(goal)
print(goal, self.calculate_thresholded_change(change), reward)
rewards.append(reward)
all_rewards.append(rewards)
return all_rewards
class DiskMazeEnv():
"""
Wrapper class for the disk-on-a-table environment,
simulated using PhysX
"""
def __init__(
self,
action_duration=0.5,
action_length=0.1,
floor_level=0.075,
wall_height=0.1,
wall_thickness=0.01,
finger_relative_level=0.075,
tau=.01,
file=None,
display=False
):
self.action_duration = action_duration
self.action_length = action_length
self.floor_level = floor_level
self.wall_height = wall_height
self.wall_thickness = wall_thickness
self.finger_relative_level = finger_relative_level
self.tau = tau
self.n_steps = int(self.action_duration/self.tau)
self.proportion_per_step = 1/self.n_steps
self.config = ry.Config()
if file is not None:
self.config.addFile(file)
else:
self.config.addFile(os.getenv("HOME") +
'/git/ryenv/ryenv/z.push_maze.g')
self.config.makeObjectsFree(['finger'])
self.simulation = self.config.simulation(
ry.SimulatorEngine.physx, display)
self.wall_num = 0
self.reset([0, -0.1])
def view(self):
"""
Create view of current configuration
"""
return self.config.view()
def get_disk_state(self):
"""
Get the current state of the disk
"""
return np.array(self.config.frame('disk').getPosition()[:2])
def get_finger_state(self):
"""
Get the current state of the finger
"""
# for some reason, the finger has the middle of the table
# as reference
return self.config.getJointState()[:2] + np.array([
0.5, 0.5
])
def get_relative_finger_state(self):
""""
Get the current state of the finger relative to
the state of the disk
"""
disk = self.get_disk_state()
finger = self.get_finger_state()
finger_shifted = finger - disk
return finger_shifted
def get_state(self):
"""
Get the current state of both finger and disk
"""
return np.concatenate((
self.get_finger_state(),
self.get_disk_state()
))
def reset_disk(self, coords=(0, 0)):
"""
Reset the disk to an arbitrary position
"""
# reset disk
disk = self.config.frame('disk')
disk.setPosition([
*coords,
self.floor_level
])
disk.setQuaternion([
1., 0., 0., 0.
])
state_now = self.config.getFrameState()
self.simulation.setState(state_now, np.zeros((state_now.shape[0], 6)))
def reset(
self,
finger_position,
disk_position=(0.1, 0.1)
):
"""
Reset the state (i.e. the finger state) to an arbitrary position
"""
finger_position_relative_to_table = np.array(
finger_position
) - np.array([0.5, 0.5])
joint_q = np.array([
*finger_position_relative_to_table,
self.finger_relative_level,
1.,
0.,
0.,
0.
])
self.config.setJointState(joint_q)
self.simulation.step(u_control=[], tau=self.tau)
self.reset_disk(coords=disk_position)
def transition(
self,
action,
fps=None
):
"""
Simulate the system's transition under an action
"""
pos_before = np.array(
self.config.frame('disk').getPosition()[:2]
)
# gradual pushing movement
joint_q = self.config.getJointState()
for _ in range(self.n_steps):
joint_q[0] += self.proportion_per_step * action[0]
joint_q[1] += self.proportion_per_step * action[1]
self.config.setJointState(joint_q)
self.simulation.step(u_control=[], tau=self.tau)
if fps is not None:
time.sleep(1/fps)
change = np.array(
self.config.frame('disk').getPosition()[:2]
) - pos_before
return change
def add_wall(self, start_end):
"""
Add a wall to the maze based on start and end position
"""
start, end = start_end
# make sure the wall extends into exactly one direction
assert sum(start == end) == 1
box_position = (end+start)/2
box_position = np.append(
box_position,
(self.floor_level + self.wall_height)/2
)
xy_dim = self.wall_thickness*(start == end) + np.abs(end-start)
wall = self.config.frame('wall_'+str(self.wall_num))
wall.setShape(ry.ST.box, [
xy_dim[0], xy_dim[1], self.wall_height, 0.0
])
wall.setPosition(box_position)
wall.setQuaternion([1, 0, 0, 0])
wall.setContact(-1)
wall.setColor([1, 1, 0])
self.wall_num += 1
def remove_remaining_walls(self):
"""
Remove non-used walls from .g file
"""
while self.wall_num < 30:
wall = self.config.delFrame('wall_'+str(self.wall_num))
self.wall_num += 1
def add_maze(self, maze_array):
"""
Translate the maze array to a physical maze
in the simulation
"""
walls_left = maze_array
n_dim, m_dim = walls_left.shape
start_ends = []
for i in range(n_dim):
for j in range(m_dim):
if i == 0 or i == n_dim-1 or j == 0 or j == n_dim-1:
walls_left[i, j] = 0
if walls_left[i, j]:
for neighbour in [
(1, 0),
(0, 1),
(-1, 0),
(0, -1)
]:
length = 0
while (
i + (length+1) * neighbour[0] >= 0
and i + (length+1) * neighbour[0] <= n_dim-1
and j + (length+1) * neighbour[1] >= 0
and j + (length+1) * neighbour[1] <= m_dim-1
and walls_left[
i + (length+1) * neighbour[0],
j + (length+1) * neighbour[1]
]
):
walls_left[
i + (length+1) * neighbour[0],
j + (length+1) * neighbour[1]
] = 0
length += 1
if length > 0:
start_ends.append([
[
i,
j
],
[
i + length * neighbour[0],
j + length * neighbour[1]
]
])
start_ends = np.array(start_ends) / (
np.array(
maze_array.shape
)[None, None, :] - 1
)
for start_end in start_ends:
self.add_wall(
start_end
)
self.remove_remaining_walls()
def visualize_states(self, states, save_name=None):
"""
Helper function to visualize a collection of states
"""
plt.plot(
states[:, 0], states[:, 1], '*'
)
plt.xlim(0, 1)
plt.ylim(0, 1)
if save_name is not None:
plt.savefig(save_name + '.png')
plt.show()
class BoxEnv():
"""
This env is meant as the open source version of the 'FetchPush-v1'
environemnt in the open ai gym baseline
"""
def __init__(
self,
action_duration=0.5,
floor_level=0.65,
finger_relative_level=0.14,
tau=.01,
file=None,
display=False
):
self.action_duration = action_duration
self.floor_level = floor_level
self.finger_relative_level = finger_relative_level
self.tau = tau
self.n_steps = int(self.action_duration/self.tau)
self.proportion_per_step = 1/self.n_steps
self.target_tolerance = 0.1
self.config = ry.Config()
if file is not None:
self.config.addFile(file)
else:
self.config.addFile(os.getenv("HOME") +
'/git/ryenv/ryenv/z.push_box.g')
self.config.makeObjectsFree(['finger'])
self.config.setJointState([0.3, 0.3, 0.15, 1., 0., 0., 0.])
self.finger_radius = self.config.frame('finger').info()['size'][0]
self.simulation = self.config.simulation(
ry.SimulatorEngine.physx, display)
self.reset_box()
self.box_dimensions = [0.4, 0.4, 0.2, 0.05]
self.reset([0.3, 0.3])
self.maximum_xy_for_finger = 1.7
self.minimum_rel_z_for_finger = 0.05 + 0.03
self.maximum_rel_z_for_finger = 1
self.config.frame('floor').setColor(
np.array((200, 200, 200))/255,
)
rgb = [93, 87, 94]
self.config.frame('finger').setColor(np.array([
*rgb, 255
])/255)
self.config.frame('box').setColor(np.array([
*rgb, 255
])/255)
def view(self):
"""
Create view of current configuration
"""
return self.config.view()
def add_and_show_target(self, target_state):
"""
Add target state and visualize it in view
"""
self.config.delFrame('target')
target = self.config.addFrame(name="target")
target.setShape(ry.ST.sphere, size=[self.target_tolerance])
target.setColor([1, 1, 0, 0.4])
self.set_frame_state(
target_state,
"target"
)
self.config.frame('target').setColor(
np.array((81, 203, 32, 130))/255
)
def reset_box(self, coords=(0, 0)):
"""
Reset the box to an arbitrary position
"""
print('Test for collision here')
# always reset box to the center
self.set_frame_state(
coords,
'box'
)
state_now = self.config.getFrameState()
self.simulation.setState(state_now,
|
np.zeros((state_now.shape[0], 6))
|
numpy.zeros
|
import numpy as np
import astropy.units as u
import astropy.time as at
import astropy.coordinates as coord
import scipy.interpolate as interp
import scipy.ndimage as img
import scipy.sparse
import numpy.random as random
def dict_from_h5(hf,data):
import h5py
for key in hf.keys():
if key == 'obs_times':
try:
print(hf[key])
data[key] = at.Time(
|
np.array(hf[key])
|
numpy.array
|
import tensorflow as tf
import numpy as np
import sac_dev.util.tf_util as TFUtil
import sac_dev.util.mpi_util as MPIUtil
from sac_dev.util.logger import Logger
class MPISolver():
CHECK_SYNC_ITERS = 1000
def __init__(self, sess, optimizer, vars):
self._vars = vars
self._sess = sess
self._optimizer = optimizer
self._build_grad_feed(vars)
self._update = optimizer.apply_gradients(zip(self._grad_ph_list, self._vars))
self._set_flat_vars = TFUtil.SetFromFlat(sess, self._vars)
self._get_flat_vars = TFUtil.GetFlat(sess, self._vars)
self._iters = 0
grad_dim = self._calc_grad_dim()
self._flat_grad = np.zeros(grad_dim, dtype=np.float32)
self._global_flat_grad =
|
np.zeros(grad_dim, dtype=np.float32)
|
numpy.zeros
|
#coding:utf-8
import numpy as np
import time
from videocore.assembler import qpu
from videocore.driver import Driver
def mask(idx):
values = [1]*16
values[idx] = 0
return values
@qpu
def piadd(asm):
A_ADDR=0 #インデックス
B_ADDR=1
C_ADDR=2
IO_ITER=3
THR_ID=4
THR_NM=5
COMPLETED=0 #セマフォ用
ldi(null,mask(A_ADDR),set_flags=True)#r2にuniformを格納
mov(r2,uniform,cond='zs')
ldi(null,mask(B_ADDR),set_flags=True)
mov(r2,uniform,cond='zs')
ldi(null,mask(C_ADDR),set_flags=True)
mov(r2,uniform,cond='zs')
ldi(null,mask(IO_ITER),set_flags=True)
mov(r2,uniform,cond='zs')
ldi(null,mask(THR_ID),set_flags=True)
mov(r2,uniform,cond='zs')
ldi(null,mask(THR_NM),set_flags=True)
mov(r2,uniform,cond='zs')
imul24(r3,element_number,4)
rotate(broadcast,r2,-A_ADDR)
iadd(r0,r5,r3)
rotate(broadcast,r2,-B_ADDR)
iadd(r1,r5,r3)
#r0:A_ADDR
#r1:B_ADDR
L.loop
ldi(broadcast,16*4)
for i in range(32):
#ra
mov(tmu0_s,r0)
mov(tmu1_s,r1)
nop(sig='load tmu0')
iadd(r0,r0,r5)
iadd(r1,r1,r5)
mov(r3,r4,sig='load tmu1')
fadd(ra[i],r3,r4)
#rb
mov(tmu0_s,r0)
mov(tmu1_s,r1)
nop(sig='load tmu0')
iadd(r0,r0,r5)
iadd(r1,r1,r5)
mov(r3,r4,sig='load tmu1')
fadd(rb[i],r3,r4)
ldi(r3,64*16*4)
mutex_acquire()
rotate(broadcast,r2,-C_ADDR)
setup_vpm_write(mode='32bit horizontal',Y=0,X=0)
for i in range(32):
mov(vpm,ra[i])
mov(vpm,rb[i])
setup_dma_store(mode='32bit horizontal',Y=0,nrows=64)
start_dma_store(r5)
wait_dma_store()
mutex_release()
ldi(null,mask(IO_ITER),set_flags=True)
isub(r2,r2,1,cond='zs')
jzc(L.loop)
ldi(null,mask(C_ADDR),set_flags=True)
iadd(r2,r2,r3,cond='zs')
nop()
#====semaphore=====
sema_up(COMPLETED)
rotate(broadcast,r2,-THR_ID)
iadd(null,r5,-1,set_flags=True)
jzc(L.skip_fin)
nop()
nop()
nop()
rotate(broadcast,r2,-THR_NM)
iadd(r0, r5, -1,set_flags=True)
L.sem_down
jzc(L.sem_down)
sema_down(COMPLETED) # すべてのスレッドが終了するまで待つ
nop()
iadd(r0, r0, -1)
interrupt()
L.skip_fin
exit(interrupt=False)
with Driver() as drv:
H=1920
W=1088
n_threads=12
SIMD=16
R=64
th_H=int(H/n_threads) #1スレッドの担当行
th_ele=th_H*W #1スレッドの担当要素
io_iter=int(th_ele/(R*SIMD)) #何回転送するか
A=drv.alloc((H,W),'float32')
B=drv.alloc((H,W),'float32')
C=drv.alloc((H,W),'float32')
C[:]=0.0
A[:]=np.random.randn(H,W)
B[:]=np.random.randn(H,W)
start = time.time()
CC=A+B #CPUの行列和
elapsed_cpu = time.time() - start
uniforms=drv.alloc((n_threads,6),'uint32')
for th in range(n_threads):
uniforms[th,0]=A.addresses()[int(th_H*th),0]
uniforms[th,1]=B.addresses()[int(th_H*th),0]
uniforms[th,2]=C.addresses()[int(th_H*th),0]
uniforms[:,3]=int(io_iter)
uniforms[:,4]=np.arange(1,(n_threads+1))
uniforms[:,5]=n_threads
code=drv.program(piadd)
elapsed_gpu=0
iter=1
for i in range(iter):
start = time.time()
drv.execute(
n_threads=n_threads,
program=code,
uniforms=uniforms
)
elapsed_gpu += time.time() - start
elapsed_gpu=elapsed_gpu/iter
print ("GPU:elapsed_time:{0}".format(elapsed_gpu*1000) + "[msec]")
print ("CPU:elapsed_time:{0}".format(elapsed_cpu*1000) + "[msec]")
print('maximum absolute error: {:.4e}'.format(
float(np.max(
|
np.abs(C - CC)
|
numpy.abs
|
import os
import numpy as np
import pyccl as ccl
# Set cosmology
cosmo = ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, Omega_g=0, Omega_k=0,
h=0.7, sigma8=0.8, n_s=0.96, Neff=0, m_nu=0.0,
w0=-1, wa=0, T_CMB=2.7255,
transfer_function='eisenstein_hu')
# Read data
dirdat = os.path.join(os.path.dirname(__file__), 'data')
# Redshifts
zs = np.array([0., 0.5, 1.])
def test_hmf_despali16():
hmd = ccl.halos.MassDef('vir', 'critical')
mf = ccl.halos.MassFuncDespali16(cosmo, hmd)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_despali16.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(np.fabs(nm_h / nm_d - 1) < 0.01)
def test_hmf_bocquet16():
hmd = ccl.halos.MassDef200c()
mf = ccl.halos.MassFuncBocquet16(cosmo, hmd)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_bocquet16.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(np.fabs(nm_h / nm_d - 1) < 0.01)
def test_hmf_watson13():
mf = ccl.halos.MassFuncWatson13(cosmo)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_watson13.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(np.fabs(nm_h / nm_d - 1) < 0.01)
def test_hmf_tinker08():
mf = ccl.halos.MassFuncTinker08(cosmo)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_tinker08.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(np.fabs(nm_h / nm_d - 1) < 0.01)
def test_hmf_press74():
mf = ccl.halos.MassFuncPress74(cosmo)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_press74.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(np.fabs(nm_h / nm_d - 1) < 0.01)
def test_hmf_angulo12():
mf = ccl.halos.MassFuncAngulo12(cosmo)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_angulo12.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(np.fabs(nm_h / nm_d - 1) < 0.01)
def test_hmf_sheth99():
mf = ccl.halos.MassFuncSheth99(cosmo)
d_hmf = np.loadtxt(os.path.join(dirdat, 'hmf_sheth99.txt'),
unpack=True)
m = d_hmf[0]
for iz, z in enumerate(zs):
nm_d = d_hmf[iz+1]
nm_h = mf.get_mass_function(cosmo, m, 1. / (1 + z))
assert np.all(
|
np.fabs(nm_h / nm_d - 1)
|
numpy.fabs
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 11:45:32 2018
Empirical Wavelet Transform implementation for 1D signals
Original paper:
<NAME>., 2013. Empirical Wavelet Transform. IEEE Transactions on Signal Processing, 61(16), pp.3999–4010.
Available at: http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6522142.
Original Matlab toolbox: https://www.mathworks.com/matlabcentral/fileexchange/42141-empirical-wavelet-transforms
@author: <NAME>
Programa de pós graduação em engenharia elétrica - PPGEE UFMG
Universidade Federal de Minas Gerais - Belo Horizonte, Brazil
Núcleo de Neurociências - NNC
"""
import numpy as np
#%EWT functions
def EWT1D(f, N = 5, log = 0,detect = "locmax", completion = 0, reg = 'average', lengthFilter = 10,sigmaFilter = 5):
"""
=========================================================================
ewt, mfb ,boundaries = EWT1D(f, N = 5, log = 0,detect = "locmax", completion = 0, reg = 'average', lengthFilter = 10,sigmaFilter = 5):
Perform the Empirical Wavelet Transform of f over N scales. See
also the documentation of EWT_Boundaries_Detect for more details about
the available methods and their parameters.
Inputs:
-f: the 1D input signal
Optional Inputs:
-log: 0 or 1 to indicate if we want to work with
the log spectrum
-method: 'locmax','locmaxmin','locmaxminf'
-reg: 'none','gaussian','average'
-lengthFilter: width of the above filters (Gaussian or average)
-sigmaFilter: standard deviation of the above Gaussian filter
-N: maximum number of supports (modes or signal components)
-completion: 0 or 1 to indicate if we try to complete
or not the number of modes if the detection
find a lower number of mode than N
Outputs:
-ewt: contains first the low frequency component and
then the successives frequency subbands
-mfb: contains the filter bank (in the Fourier domain)
-boundaries: vector containing the set of boundaries corresponding
to the Fourier line segmentation (normalized between
0 and Pi)
Original MATLAB Version:
Author: <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 2.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
% =========================================================================
"""
#signal spectrum
ff = np.fft.fft(f)
ff = abs(ff[0:int(np.ceil(ff.size/2))])#one-sided magnitude
#extract boundaries of Fourier Segments
boundaries = EWT_Boundaries_Detect(ff,log,detect,N,reg,lengthFilter,sigmaFilter)
boundaries = boundaries*np.pi/round(ff.size)
if completion == 1 and len(boundaries)<N-1:
boundaries = EWT_Boundaries_Completion(boundaries,N-1)
#Filtering
#extend the signal by mirroring to deal with boundaries
ltemp = int(np.ceil(f.size/2)) #to behave the same as matlab's round
fMirr = np.append(np.flip(f[0:ltemp-1],axis = 0),f)
fMirr = np.append(fMirr,np.flip(f[-ltemp-1:-1],axis = 0))
ffMirr = np.fft.fft(fMirr)
#build the corresponding filter bank
mfb=EWT_Meyer_FilterBank(boundaries,ffMirr.size)
#filter the signal to extract each subband
ewt = np.zeros(mfb.shape)
for k in range(mfb.shape[1]):
ewt[:,k] = np.real(np.fft.ifft(np.conjugate(mfb[:,k])*ffMirr))
ewt = ewt[ltemp-1:-ltemp,:]
return ewt, mfb ,boundaries
def EWT_Boundaries_Detect(ff,log,detect, N, reg, lengthFilter,sigmaFilter):
"""This function segments f into a certain amount of supports by using different technics:
- middle point between consecutive local maxima (default),
- lowest minima between consecutive local maxima (locmaxmin),
- lowest minima between consecutive local maxima of original spectrum (locmaxminf),
Regularized version of the spectrum can be obtained by the
following methods:
- Gaussian filtering (its parameters are filter of width
lengthFilter and standard deviation sigmaFilter)scalesp
- Average filtering (its parameters are filter of width
lengthFilter)
Note: the detected boundaries are given in term of indices
Inputs:
-f: the function to segment
Optional parameters:
-log: 0 or 1 to indicate if we want to work with
the log of the ff
-reg: 'none','gaussian','average'
-lengthFilter: width of the above filters (Gaussian or average)
-sigmaFilter: standard deviation of the above Gaussian filter
-N: maximum number of supports (modes or signal components)
-completion: 0 or 1 to indicate if we try to complete
or not the number of modes if the detection
find a lower number of mode than N
Outputs:
-boundaries: list of detected boundaries
TODO Preprocessing steps not yet implemented
Original MATLAB version:
Author: <NAME> + <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 2.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
"""
from scipy.ndimage.filters import gaussian_filter
#apply log if needed
if log == 1:
ff = np.log(ff)
#Global trend removal - TODO
#Regularization
if reg == 'average':
regFilter = np.ones(lengthFilter)/lengthFilter
presig = np.convolve(ff,regFilter,mode = 'same') #for even lenght, numpy's convolve is shifted when compared with MATLAB's
elif reg == 'gaussian':
regFilter = np.zeros(lengthFilter)
regFilter[regFilter.size//2] = 1 #prefer odd filter lengths - otherwise the gaussian is skewed
presig = np.convolve(ff,gaussian_filter(regFilter,sigmaFilter),mode = 'same')
else:
presig = ff
#Boundaries detection
if detect == "locmax":#Mid-point between two consecutive local maxima computed on the regularized spectrum
boundaries = LocalMax(presig,N)
elif detect == "locmaxmin":#extract the lowest local minima between two selected local maxima
boundaries = LocalMaxMin(presig,N)
elif detect == "locmaxminf":#We extract the lowest local minima on the original spectrum between
#two local maxima selected on the regularized signal
boundaries = LocalMaxMin(presig,N,fm = ff)
#elif detect == "adaptivereg": #TODO
return boundaries+1
def LocalMax(ff, N):
"""
================================================================
bound = LocalMax(f,N)
This function segments f into a maximum of N supports by taking
the middle point between the N largest local maxima.
Note: the detected boundaries are given in term of indices
Inputs:
-f: the function to segment
-N: maximal number of bands
Outputs:
-bound: list of detected boundaries
Original MATLAB version:
Author: <NAME> + <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 1.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
%===============================================================
"""
N=N-1
locmax = np.zeros(ff.size)
locmin = max(ff)*np.ones(ff.size)
for i in np.arange(1,ff.size-1):
if ff[i-1]<ff[i] and ff[i]>ff[i+1]:
locmax[i] = ff[i]
if ff[i-1]> ff[i] and ff[i] <= ff[i+1]:
locmin[i] = ff[i]
N = min(N,locmax.size)
#keep the N-th highest maxima
maxidxs = np.sort(locmax.argsort()[::-1][:N])
#middle point between consecutive maxima
bound = np.zeros(N)
for i in range(N):
if i == 0:
a = 0
else:
a = maxidxs[i-1]
bound[i] = (a + maxidxs[i])/2
return bound
def LocalMaxMin(f,N,fm = 0):
"""
===================================================================
bound = LocalMaxMin(f,N,fm)
This function segments f into a maximum of N supports by detecting
the lowest local minima between the N largest local maxima. If the
input fm is provided then the local maxima are computed on f and
the local minima on fm otherwise both are computed on f (this is
useful if you want to compute the maxima on a regularized version
of your signal while detecting the "true" minima).
Note: the detected boundaries are given in term of indices
Inputs:
-f: the function to segment
-N: maximal number of bands
Optional input:
-fm: function on which the local minima will be computed
Outputs:
-bound: list of detected boundaries
Author: <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 2.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
===================================================================
"""
locmax= np.zeros(f.size)
if type(fm) == int:
f2 = f
else:
f2 = fm
locmin = max(f2)*np.ones(f2.size)
#detect local minima and maxima
for i in np.arange(1,f.size-1):
if ((f[i-1]<f[i]) and (f[i]>f[i+1])):
locmax[i] = f[i]
if ((f2[i-1]>f2[i]) and (f2[i]<f2[i+1])):
locmin[i] = f2[i]
#keep the N-th highest maxima and their index
if N!=-1:
N = N-1
#keep the N-th highest maxima
Imax = np.sort(locmax.argsort()[::-1][:N])
#detect the lowest minima between two consecutive maxima
bound = np.zeros(N)
for i in range(N):
if i == 0:
a = 1
else:
a = Imax[i-1]
lmin = np.sort(locmin[a:Imax[i]])
ind = np.argsort(locmin[a:Imax[i]])
tmpp = lmin[0]
n = 0
if n < len(lmin):
n = 1
while ((n<len(lmin)) and (tmpp==lmin[n])):
n = n+1
bound[i] = a + ind[n//2]-1
else:
k = 0
for i in range(locmin):
if locmin[i]<max(f2):
bound[k] = i-1
k = k+1
return bound
#TODO def Adaptive_Bounds_Adapt():
def EWT_Boundaries_Completion(boundaries,NT):
"""
======================================================================
boundaries=EWT_Boundaries_Completion(boundaries,NT)
This function permits to complete the boundaries vector to get a
total of NT boundaries by equally splitting the last band (highest
frequencies)
Inputs:
-boundaries: the boundaries vector you want to complete
-NT: the total number of boundaries wanted
Output:
-boundaries: the completed boundaries vector
Author: <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 1.0
Python Version: Vinícius Rezende Carvalho - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
%======================================================================
"""
Nd=NT-len(boundaries)
deltaw=(np.pi-boundaries[-1])/(Nd+1)
for k in range(Nd):
boundaries =
|
np.append(boundaries,boundaries[-1]+deltaw)
|
numpy.append
|
#!/usr/bin/env python
from datetime import datetime
import copy
import traceback
import os, subprocess, time, signal
#from cv_bridge import CvBridge
import gym
import math
import random
# u
import numpy as np
import cv2 as cv
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the .action file and messages used by the move base action
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import TransformStamped
from rosgraph_msgs.msg import Clock
from costmap_converter.msg import ObstacleArrayMsg
from costmap_converter.msg import ObstacleMsg
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Twist
from gazebo_msgs.srv import SetModelState
from gym.utils import seeding
import threading
import _thread
from squaternion import Quaternion
from simple_pid import PID
import pickle
import logging
logger = logging.getLogger(__name__)
# Environment Parameters
class EnvConfig:
# Boolean to make robots spawn at constant locations
USE_TESTING = False
# If False, Moves obstacles out of the way
USE_OBSTACLES = False
# Pattern to init obstacles
# 0: Places obstacles between robot and person
# 1: Places obstacles randomly within circle
OBSTACLE_MODE = 1
# Radius(meters) away from person robot for random placement(mode 1) of objects
OBSTACLE_RADIUS_AWAY = 3
# Obstacle size
OBSTACLE_SIZE = 0.5
# Allows/Denies Robot TEB Local Planner to avoid obstacles
SEND_TEB_OBSTACLES = True
# Gets person robot to use move base
PERSON_USE_MB = True
# Episode Length
EPISODE_LEN = 15
# Returns Human State only in get_observations if True
RETURN_HINN_STATE = False # was True
# Size to reduce laser scan to
SCAN_REDUCTION_SIZE = 20
# If True, calls init_simulator() on set_agent() call
INIT_SIM_ON_AGENT = False
# If True, moves jackal bot out of the way and puts obstacles around person
TRAIN_HINN = False
# For NON-HINN OUTPUT ONLY: Outputs laser scan if true
OUTPUT_OBSTACLES_IN_STATE = True
# Evaluation Mode, Removes stochasticity when initializing environment
EVALUATION_MODE = True
class History():
def __init__(self, window_size, update_rate, save_rate=10):
self.idx = 0
self.update_rate = update_rate
self.save_rate = save_rate
self.lock = threading.Lock()
self.memory_size = int(math.ceil(save_rate/update_rate*window_size)+1)
self.data = [None for x in range(self.memory_size)]
self.prev_add_time = rospy.Time.now().to_sec() - 1
self.window_size = window_size
self.avg_frame_rate = None
self.time_data_ = []
def add_element(self, element):
"""
element: the data that we put inside the history data array
"""
if abs(rospy.Time.now().to_sec() - self.prev_add_time) < 1./self.save_rate:
return
with self.lock:
self.idx = (self.idx + 1) % self.window_size
self.prev_add_time = rospy.Time.now().to_sec()
if self.data[self.idx] is None:
for idx in range(self.memory_size):
self.data[idx] = element
self.data[self.idx] = element
if not len(self.time_data_) > 50:
self.time_data_.append(self.prev_add_time)
if len(self.time_data_) > 3:
prev_t = self.time_data_[0]
time_intervals = []
for t in self.time_data_[1:]:
time_intervals.append(t - prev_t)
prev_t = t
self.avg_frame_rate = 1.0 / np.average(time_intervals)
def get_elemets(self):
return_data = []
while self.avg_frame_rate is None:
time.sleep(0.1)
skip_frames = -int(math.ceil(self.avg_frame_rate / self.update_rate))
with self.lock:
index = self.idx # (self.idx - 1)% self.window_size
if self.window_size * abs(skip_frames) >= self.memory_size:
rospy.logerr("error in get element memory not enough update rate{} avg_frame_rate{} mem_size {} skipf: {}".format(self.update_rate, self.avg_frame_rate, self.memory_size, skip_frames))
for i in range(self.window_size):
return_data.append(self.data[index])
index = (index + skip_frames) % self.window_size
return return_data
def get_latest(self):
with self.lock:
return self.data[self.idx]
class Robot():
def __init__(self, name, max_angular_speed=1, max_linear_speed=1, relative=None, agent_num=None, use_goal=False, use_movebase=False, use_jackal=False, window_size=10, is_testing=False):
self.name = name
self.use_jackal = use_jackal
self.init_node = False
self.alive = True
self.prev_call_gazeboros_ = None
if relative is None:
relative = self
self.relative = relative
self.is_testing = is_testing
if self.is_testing:
self.all_pose_ = []
self.last_time_added = rospy.Time.now().to_sec()
self.log_history = []
self.agent_num = agent_num
self.init_node = True
self.deleted = False
self.update_rate_states = 2.0
self.window_size_history = window_size
self.current_vel_ = Twist()
self.goal = {"pos": None, "orientation": None}
self.use_goal = use_goal
self.use_movebase = use_movebase
self.max_angular_vel = max_angular_speed
self.max_linear_vel = max_linear_speed
self.max_rel_pos_range = 5.0 # meter
self.width_laserelement_image = 100
self.height_laser_image = 50
self.state_ = {'position': (None, None),
'orientation': None}
if self.use_jackal:
self.cmd_vel_pub = rospy.Publisher(
'/{}/jackal_velocity_controller/cmd_vel'.format(name), Twist, queue_size=1)
else:
self.cmd_vel_pub = rospy.Publisher(
'/{}/cmd_vel'.format(name), Twist, queue_size=1)
if "tb3" in self.name and self.use_movebase:
# Create an action client called "move_base" with action definition file "MoveBaseAction"
self.action_client_ = actionlib.SimpleActionClient(
'/move_base_{}'.format(self.agent_num), MoveBaseAction)
# Waits until the action server has started up and started listening for goals.
self.action_client_.wait_for_server(rospy.rostime.Duration(0.4))
else:
self.action_client_ = None
if "person" == self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(
self.window_size_history, self.update_rate_states)
self.orientation_history = History(
self.window_size_history, self.update_rate_states)
self.velocity_history = History(
self.window_size_history, self.update_rate_states)
self.is_collided = False
self.is_pause = False
self.reset = False
self.scan_image = None
def calculate_ahead(self, distance):
x = self.state_['position'][0] + \
math.cos(self.state_["orientation"]) * distance
y = self.state_['position'][1] + \
math.sin(self.state_["orientation"]) * distance
return (x, y)
def movebase_cancel_goals(self):
self.action_client_.cancel_all_goals()
self.stop_robot()
def movebase_client_goal(self, goal_pos, goal_orientation):
# Creates a new goal with the MoveBaseGoal constructor
move_base_goal = MoveBaseGoal()
move_base_goal.target_pose.header.frame_id = "tb3_{}/odom".format(self.agent_num)
move_base_goal.target_pose.header.stamp = rospy.Time.now()
move_base_goal.target_pose.pose.position.x = goal_pos[0]
move_base_goal.target_pose.pose.position.y = goal_pos[1]
quaternion_rotation = Quaternion.from_euler(0, goal_orientation, 0)
move_base_goal.target_pose.pose.orientation.x = quaternion_rotation[3]
move_base_goal.target_pose.pose.orientation.y = quaternion_rotation[1]
move_base_goal.target_pose.pose.orientation.z = quaternion_rotation[2]
move_base_goal.target_pose.pose.orientation.w = quaternion_rotation[0]
# Sends the move_base_goal to the action server.
self.action_client_.send_goal(move_base_goal)
# Waits for the server to finish performing the action.
#wait = self.action_client_.wait_for_result(rospy.rostime.Duration(0.4))
# If the result doesn't arrive, assume the Server is not available
# if not wait:
# rospy.logerr("Action server not available!")
# else:
# # Result of executing the action
# return self.action_client_.get_result()
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_orientation(self):
counter_problem = 0
while self.state_['orientation'] is None:
if self.reset:
return None
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['orientation']
def is_current_state_ready(self):
return (self.state_['position'][0] is not None)
def is_observation_ready(self):
return (self.pos_history.avg_frame_rate is not None and
self.orientation_history.avg_frame_rate is not None and
self.velocity_history.avg_frame_rate is not None)
def update(self, init_pose):
self.alive = True
self.goal = {"pos": None, "orientation": None}
if "person" == self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history.add_element((0,0))
self.pos_history.add_element((init_pose["pos"][0],init_pose["pos"][1]))
self.orientation_history.add_element(init_pose["orientation"])
self.log_history = []
if self.is_testing:
self.all_pose_ = []
#self.prev_call_gazeboros_ = None
#self.is_collided = False
self.is_pause = False
self.reset = False
def add_log(self, log):
self.log_history.append(log)
def remove(self):
self.reset = True
def set_state(self, state):
self.state_["position"] = state["position"]
self.state_["orientation"] = state["orientation"]
self.state_["velocity"] = state["velocity"]
self.orientation_history.add_element(state["orientation"])
self.pos_history.add_element(state["position"])
self.velocity_history.add_element(state["velocity"])
if self.is_testing and abs(rospy.Time.now().to_sec() - self.last_time_added) > 0.01:
self.all_pose_.append(self.state_.copy())
self.last_time_added = rospy.Time.now().to_sec()
def get_state(self):
return self.state_
def get_velocity(self):
return self.velocity_history.get_latest()
def pause(self):
self.is_pause = True
self.stop_robot()
def resume(self):
self.is_pause = False
def take_action(self, action, target_orientation=None):
if self.is_pause:
return
if self.use_goal:
if "person" in self.name:
pose = self.get_pos()
pos_global = [pose[0]+action[0], pose[1]+action[1]]
else:
pos = GazeborosEnv.denormalize(action[0:2], self.max_rel_pos_range)
pos_global = GazeborosEnv.get_global_position(pos, self.relative)
if target_orientation:
self.goal["orientation"] = target_orientation
else:
self.goal["orientation"] = self.get_orientation()
self.goal["pos"] = pos_global
if self.use_movebase:
self.movebase_client_goal(pos_global, self.goal["orientation"])
else:
linear_vel = max(min(action[0]*self.max_linear_vel, self.max_linear_vel), -self.max_linear_vel)
angular_vel = max(min(action[1]*self.max_angular_vel, self.max_angular_vel), -self.max_angular_vel)
cmd_vel = Twist()
cmd_vel.linear.x = linear_vel #float(self.current_vel_.linear.x -(self.current_vel_.linear.x - linear_vel)*0.9)
cmd_vel.angular.z = angular_vel #-float(self.current_vel_.angular.z - (self.current_vel_.angular.z - angular_vel)*0.9)
self.current_vel_ = cmd_vel
self.cmd_vel_pub.publish(cmd_vel)
def stop_robot(self):
self.cmd_vel_pub.publish(Twist())
def angle_distance_to_point(self, pos):
current_pos = self.get_pos()
if current_pos[0] is None:
return None, None
angle = math.atan2(pos[1] - current_pos[1], pos[0] - current_pos[0])
distance = math.hypot(pos[0] - current_pos[0], pos[1] - current_pos[1])
angle = (angle - self.state_["orientation"] + math.pi) % (math.pi * 2) - math.pi
return angle, distance
def publish_cmd_vel(self, linear, angular):
cmd_vel = Twist()
angular_vel = min(max(angular, -self.max_angular_vel), self.max_angular_vel)
linear_vel = min(max(linear, 0), self.max_linear_vel)
cmd_vel.linear.x = float(linear_vel)
cmd_vel.angular.z = float(angular_vel)
self.cmd_vel_pub.publish(cmd_vel)
def use_selected_person_mod(self, person_mode):
while person_mode <= 6:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
angular_vel = 0
linear_vel = 0
if person_mode == 0:
linear_vel = self.max_linear_vel
if person_mode == 1:
#linear_vel = self.max_linear_vel * random.random()
linear_vel = self.max_linear_vel * 0.35
elif person_mode == 2:
linear_vel = self.max_linear_vel/2
angular_vel = self.max_angular_vel/6
elif person_mode == 3:
linear_vel = self.max_linear_vel/2
angular_vel = -self.max_angular_vel/6
elif person_mode == 4:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = -self.max_angular_vel/6
elif person_mode == 5:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = self.max_angular_vel/6
elif person_mode == 6:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = angular_vel - (angular_vel - (random.random()-0.5)*2)/2.
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.002)
def go_to_goal(self):
while True:
if self.reset:
return
while self.goal["pos"] is None:
time.sleep(0.1)
continue
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
time_prev = rospy.Time.now().to_sec()
while not distance < 0.1 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel), self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 1.5)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
self.stop_robot()
def go_to_pos(self, pos, stop_after_getting=False):
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
print(self.get_pos())
return
time_prev = rospy.Time.now().to_sec()
while not distance < 0.2 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 2)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
if stop_after_getting:
self.stop_robot()
def get_goal(self):
counter_problem = 0
while self.goal["pos"] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for goal to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
# if not self.use_movebase:
# pos = GazeborosEnv.get_global_position(self.goal["pos"], self)
# goal = {"pos":pos, "orientation":None}
# else:
# goal = self.goal
return self.goal
def get_laser_image(self):
return np.expand_dims(self.scan_image, axis=2)
class GazeborosEnv(gym.Env):
def __init__(self, is_evaluation=False):
self.is_evaluation_ = is_evaluation
# self.bridge = CvBridge()
# self.image_pub = rospy.Publisher("image_observation", Image)
# self.image_pub_gt = rospy.Publisher("image_observation_gt", Image)
self.is_reseting = True
self.use_path = True
self.use_jackal = True
self.lock = _thread.allocate_lock()
self.path_follower_test_settings = {0:(0,0, "straight",False), 1:(2,0, "right", False), 2:(3,0, "left", False),\
3:(1,4, "straight_Behind", False), 4:(2,3, "right_behind", False), 5:(3,3, "left_behind", False), 6:(7,2, "traj_1", True, True),\
7:(7, 12, "traj_2", True, True), 8:(7, 43, "traj_3", True),\
9:(2,1, "right_left", False), 10:(2,2, "right_right", False),\
11:(3,1, "left_left", False), 12:(3,2, "left_right", False)\
}
#self.path_follower_test_settings = {0:(7, 43, "traj_3", True)#(7,2, "traj_1", True, True), 1:(7, 12, "traj_2", True, True)}
self.is_testing = EnvConfig.USE_TESTING
self.small_window_size = False
self.use_predifined_mode_person = True
self.use_goal = True
self.use_orientation_in_observation = True
self.collision_distance = 0.3
self.best_distance = 1.5
self.robot_mode = 0
self.window_size = 10
self.use_movebase = True
self.use_reachability = False
self.use_obstacles = EnvConfig.USE_OBSTACLES
self.obstacle_mode = EnvConfig.OBSTACLE_MODE
self.obstacle_names = []
self.person_scan = [1000.0 for i in range(EnvConfig.SCAN_REDUCTION_SIZE)]
self.person_use_move_base = EnvConfig.PERSON_USE_MB
self.person_mode = 0
self.position_thread = None
self.eval_x = -4
self.eval_y = -4
self.eval_orientation = 0
self.robot_eval_x = -1
self.robot_eval_y = -1
self.path_follower_current_setting_idx = 0
self.use_supervise_action = False
self.mode_person = 0
self.use_noise = True
self.is_use_test_setting = False
self.use_reverse = True
if self.small_window_size:
self.window_size = 5
if self.is_testing:
self.use_noise = False
self.use_reverse = False
self.is_use_test_setting = True
self.fallen = False
self.is_max_distance = False
self.use_random_around_person_ = False
self.max_mod_person_ = 7
self.wait_observation_ = 0
# being use for observation visualization
self.center_pos_ = (0, 0)
self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_BGR2RGB).reshape(255, 3).tolist()
self.color_index = 0
self.first_call_observation = True
self.test_simulation_ = False
self.person_scan = [1000.0 for i in range(EnvConfig.SCAN_REDUCTION_SIZE)]
self.person_use_move_base = EnvConfig.PERSON_USE_MB
self.person_mode = 0
self.position_thread = None
self.eval_x = -4
self.eval_y = -4
self.eval_orientation = 0
self.robot_eval_x = -1
self.robot_eval_y = -1
self.min_distance = 1
self.max_distance = 2.5
if self.test_simulation_ or self.is_evaluation_:
self.max_numb_steps = 80
elif self.is_use_test_setting:
self.max_numb_steps = 100
else:
self.max_numb_steps = 80
self.reward_range = [-1, 1]
self.reachabilit_value = None
if self.use_reachability:
with open('data/reachability.pkl', 'rb') as f:
self.reachabilit_value = pickle.load(f)
def get_person_pos(self):
theta = self.person.get_orientation()
xy = self.person.get_pos()
return [xy[0], xy[1], theta]
def get_system_velocities(self):
robot_state = self.robot.get_state()
person_state = self.person.get_state()
robot_lin_velocity = robot_state["velocity"][0]
robot_angular_velocity = robot_state["velocity"][1]
robot_orientation = robot_state["orientation"]
person_lin_velocity = person_state["velocity"][0]
person_angular_velocity = person_state["velocity"][1]
x_distance_between = person_state["position"][0] - robot_state["position"][0]
y_distance_between = person_state["position"][1] - robot_state["position"][1]
dx_dt = -person_lin_velocity + robot_lin_velocity * math.cos(robot_orientation) + person_angular_velocity * y_distance_between
dy_dt = robot_lin_velocity * math.sin(robot_orientation) - person_angular_velocity * x_distance_between
da_dt = robot_angular_velocity - person_angular_velocity
return (dx_dt, dy_dt, da_dt)
def get_test_path_number(self):
rospy.loginfo("current path idx: {}".format(self.path_follower_current_setting_idx))
return self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
def use_test_setting(self):
self.is_use_test_setting = True
def set_person_mode(self, setting):
self.person_mode = setting
def set_use_obstacles(self, setting):
self.use_obstacles = setting
def set_agent(self, agent_num):
try:
self.node = rospy.init_node('gym_gazeboros_{}'.format(agent_num))
except Exception as e:
rospy.logerr("probably already init in another node {}".format(e))
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
date_time = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
self.agent_num = agent_num
self.obstacle_pub_ = rospy.Publisher('/move_base_node_tb3_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.person_obstacle_pub_ = rospy.Publisher('/move_base_node_person_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.create_robots()
self.path = {}
self.paths = []
self.log_file = None
try:
with open('data/person_trajectories_rl.pkl', 'rb') as f:
paths = pickle.load(f)
for path in paths:
angle_person = path['start_person']['orientation']
for angle in [x for x in range(0, 360, 10)]:
for angle_robot_person in [x for x in range(0, 360, 90)]:
path_angle = path.copy()
angle_from_person = np.deg2rad(angle) + angle_person
angle_person_robot = np.deg2rad(angle_robot_person) + angle_person
path_angle['start_robot']['pos'] = (path_angle['start_person']['pos'][0] + math.cos(angle_from_person)*2, path_angle['start_person']['pos'][1] + math.sin(angle_from_person)*2)
path_angle['start_robot']['orientation'] = angle_person_robot
path_angle['name'] = path['name'] + " " + str(angle) +" " + str(angle_robot_person)
self.paths.append(path_angle)
self.path_idx = -1
self.path = self.paths[self.path_idx]
except Exception as e:
print("error happend in writing {}".format(e))
self.agent_num = agent_num
self.state_cb_prev_time = None
self.model_states_sub = rospy.Subscriber("/gazebo/model_states", ModelStates, self.model_states_cb)
self.scan_sub = rospy.Subscriber("/person_{}/scan".format(self.agent_num), LaserScan, self.scan_cb)
if EnvConfig.INIT_SIM_ON_AGENT:
with self.lock:
self.init_simulator()
def scan_cb(self, msg):
reduced_size = EnvConfig.SCAN_REDUCTION_SIZE
large_n = 1000.0
div = int(len(msg.ranges)/reduced_size)
reduced_scan = []
count = 0
a_size = 0
avg = 0
# Reduce from 720 to reduced size
for r in msg.ranges:
if r > 0 and r < 20:
avg += r
a_size += 1
count += 1
if count == div:
if a_size != 0:
avg /= a_size
else:
avg = large_n
reduced_scan.append(avg)
count = 0
a_size = 0
avg = 0
self.person_scan = reduced_scan
pass
def create_obstacle_msg(self, name, pose):
obstacle_msg = ObstacleMsg()
obstacle_msg.id = 1
point = Point32()
point.x = pose.position.x
point.y = pose.position.y
point.z = pose.position.z
obstacle_msg.polygon.points.append(point)
# TODO probably needs some tweaking but works for regular cyn/box
# - I think the robot could be ok to get closer to the obstacles?
# TODO polygon for box instead of using a circle
obstacle_msg.radius = EnvConfig.OBSTACLE_SIZE/2
obstacle_msg.orientation.x = pose.orientation.x
obstacle_msg.orientation.y = pose.orientation.y
obstacle_msg.orientation.z = pose.orientation.z
obstacle_msg.orientation.w = pose.orientation.w
obstacle_msg.velocities.twist.linear.x = 0
obstacle_msg.velocities.twist.angular.z = 0
return obstacle_msg
def model_states_cb(self, states_msg):
# Grab Obstacle Names for Agent
if not self.obstacle_names:
for name in states_msg.name:
if "obstacle" in name:
for char in name:
if char.isdigit():
if int(char) == self.agent_num:
self.obstacle_names.append(name)
obstacle_msg_array = ObstacleArrayMsg()
obstacle_msg_array.header.stamp = rospy.Time.now()
obstacle_msg_array.header.frame_id = "tb3_{}/odom".format(self.agent_num)
person_obs_msg_array = ObstacleArrayMsg()
person_obs_msg_array.header.stamp = rospy.Time.now()
person_obs_msg_array.header.frame_id = "person_{}/odom".format(self.agent_num)
for model_idx in range(len(states_msg.name)):
found = False
for robot in [self.robot, self.person]:
if states_msg.name[model_idx] == robot.name:
found = True
break
elif "obstacle" in states_msg.name[model_idx] and EnvConfig.SEND_TEB_OBSTACLES:
obstacle_msg_array.obstacles.append(
self.create_obstacle_msg(
states_msg.name[model_idx], states_msg.pose[model_idx]
)
)
person_obs_msg_array.obstacles.append(
self.create_obstacle_msg(
states_msg.name[model_idx], states_msg.pose[model_idx]
)
)
if not found:
continue
pos = states_msg.pose[model_idx]
euler = Quaternion(w=pos.orientation.w, x=pos.orientation.x, y=pos.orientation.y, z=pos.orientation.z).to_euler()
if EnvConfig.PERSON_USE_MB:
orientation = euler[2]
else:
# Preserve how Payam had it setup...
orientation = euler[0]
fall_angle = np.deg2rad(90)
if abs(abs(euler[1]) - fall_angle)< 0.1 or abs(abs(euler[2]) - fall_angle)<0.1:
self.fallen = True
# get velocity
twist = states_msg.twist[model_idx]
linear_vel = twist.linear.x
angular_vel = twist.angular.z
pos_x = pos.position.x
pos_y = pos.position.y
state = {}
state["velocity"] = (linear_vel, angular_vel)
state["position"] = (pos_x, pos_y)
state["orientation"] = orientation
robot.set_state(state)
if self.use_movebase:
obstacle_msg = ObstacleMsg()
obstacle_msg.id = 0
for x in range (5):
for y in range (5):
point = Point32()
point.x = pos.position.x + (x-2)*0.1
point.y = pos.position.y + (y-2)*0.1
point.z = pos.position.z
obstacle_msg.polygon.points.append(point)
obstacle_msg.orientation.x = pos.orientation.x
obstacle_msg.orientation.y = pos.orientation.y
obstacle_msg.orientation.z = pos.orientation.z
obstacle_msg.orientation.w = pos.orientation.w
obstacle_msg.velocities.twist.linear.x = twist.linear.x
obstacle_msg.velocities.twist.angular.z = twist.linear.z
if robot.name == self.person.name:
obstacle_msg.header = obstacle_msg_array.header
obstacle_msg_array.obstacles.append(obstacle_msg)
else:
obstacle_msg.header = person_obs_msg_array.header
person_obs_msg_array.obstacles.append(obstacle_msg)
self.obstacle_pub_.publish(obstacle_msg_array)
self.person_obstacle_pub_.publish(person_obs_msg_array)
def create_robots(self):
self.person = Robot('person_{}'.format(self.agent_num),
max_angular_speed=1, max_linear_speed=.6, agent_num=self.agent_num, window_size=self.window_size, is_testing=self.is_testing, use_goal=self.use_goal, use_movebase=self.use_movebase)
relative = self.person
self.robot = Robot('tb3_{}'.format(self.agent_num),
max_angular_speed=1.8, max_linear_speed=0.8, relative=relative, agent_num=self.agent_num, use_goal=self.use_goal, use_movebase=self.use_movebase ,use_jackal=self.use_jackal, window_size=self.window_size, is_testing=self.is_testing)
def find_random_point_in_circle(self, radious, min_distance, around_point):
max_r = 2
r = (radious - min_distance) * math.sqrt(random.random()) + min_distance
theta = random.random() * 2 * math.pi
x = around_point[0] + r * math.cos(theta)
y = around_point[1] + r * math.sin(theta)
return (x, y)
def set_mode_person_based_on_episode_number(self, episode_number):
if episode_number < 500:
self.mode_person = 0
elif episode_number < 510:
self.mode_person = 1
elif episode_number < 700:
self.mode_person = 3
elif episode_number < 900:
self.mode_person = 5
elif episode_number < 1000:
self.mode_person = 6
else:
#self.mode_person = 7
if random.random() > 0.5:
self.mode_person = 7
else:
self.mode_person = random.randint(0, 6)
def get_init_pos_robot_person(self):
if self.is_evaluation_:
idx_start = 0
elif self.is_use_test_setting:
idx_start = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
else:
idx_start = random.randint(0, len(self.path["points"]) - 20)
self.current_path_idx = idx_start
if not self.is_use_test_setting and self.use_reverse and random.random() > 0.5:
self.path["points"].reverse()
if self.person_use_move_base:
if EnvConfig.EVALUATION_MODE:
if self.eval_x > 4:
self.eval_x = -4
self.eval_y = -4
self.eval_orientation = 0
self.robot_eval_x = -1
self.robot_eval_y = -1
if self.robot_eval_x > 1:
self.robot_eval_x = -1
self.robot_eval_y = 1
init_pos_person = {"pos": (self.eval_x, self.eval_y), "orientation":self.eval_orientation}
init_pos_robot = {"pos": (self.robot_eval_x, self.robot_eval_y), "orientation":self.eval_orientation}
self.eval_x += 1
self.eval_y += 1
self.eval_orientation += math.pi/4
self.robot_eval_x += 2
self.robot_eval_y += 2
return init_pos_robot, init_pos_person
else:
x = random.uniform(-3,3)
y = random.uniform(-3,3)
init_pos_person = {"pos": (x, y), "orientation":random.uniform(0, math.pi)}
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot, "orientation":random.uniform(0, math.pi)}
return init_pos_robot, init_pos_person
if self.is_evaluation_:
init_pos_person = self.path["start_person"]
init_pos_robot = self.path["start_robot"]
elif self.is_use_test_setting and not self.path_follower_test_settings[self.path_follower_current_setting_idx][3]:
init_pos_person = {"pos": (0, 0), "orientation": 0}
mode = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
if mode == 0:
orinetation_person_rob = 0
elif mode == 1:
orinetation_person_rob = -math.pi / 4.
elif mode == 2:
orinetation_person_rob = math.pi / 4.
elif mode == 3:
orinetation_person_rob = -math.pi
else:
orinetation_person_rob = math.pi/8*7
pos_robot = (1.5*math.cos(orinetation_person_rob), 1.5*math.sin(orinetation_person_rob))
init_pos_robot = {"pos": pos_robot, "orientation": 0}
elif not self.use_path:
init_pos_person = {"pos": (0, 0), "orientation": random.random()*2*math.pi - math.pi}
ahead_person = (init_pos_person['pos'][0] + math.cos(init_pos_person["orientation"]) * 2, init_pos_person['pos'][1] + math.sin(init_pos_person["orientation"]) * 2)
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot,\
"orientation": init_pos_person["orientation"]}#random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
elif self.use_random_around_person_:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
init_pos_robot = {"pos": self.find_random_point_in_circle(1.5, 1, self.path["points"][idx_start]),\
"orientation": random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
else:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
if self.is_use_test_setting and len(self.path_follower_test_settings[self.path_follower_current_setting_idx])>4 and self.path_follower_test_settings[self.path_follower_current_setting_idx][4] :
orinetation_person_rob = math.pi/2.2
pos_robot = (self.path["points"][idx_start][0] + 2*math.cos(orinetation_person_rob+init_pos_person["orientation"]), self.path["points"][idx_start][1] + 2*math.sin(orinetation_person_rob+init_pos_person["orientation"]))
init_pos_robot = {"pos": pos_robot, "orientation":self.calculate_angle_using_path(idx_start+5)}
else:
idx_robot = idx_start + 1
while (math.hypot(self.path["points"][idx_robot][1] - self.path["points"][idx_start][1],
self.path["points"][idx_robot][0] - self.path["points"][idx_start][0]) < 1.6):
idx_robot += 1
init_pos_robot = {"pos": self.path["points"][idx_robot],\
"orientation": self.calculate_angle_using_path(idx_robot)}
if not self.is_testing:
init_pos_robot["pos"] = (init_pos_robot["pos"][0]+ random.random()-0.5, \
init_pos_robot["pos"][1]+ random.random()-0.5)
init_pos_robot["orientation"] = GazeborosEnv.wrap_pi_to_pi(init_pos_robot["orientation"] + random.random()-0.5)
return init_pos_robot, init_pos_person
def set_marker_pose(self, xy):
pose = {"pos": (xy[0], xy[1]), "orientation": 0}
self.set_pos("marker", pose)
def set_pos(self, name, pose):
set_model_msg = ModelState()
set_model_msg.model_name = name
self.prev_action = (0, 0)
quaternion_rotation = Quaternion.from_euler(0, pose["orientation"], 0)
set_model_msg.pose.orientation.x = quaternion_rotation[3]
set_model_msg.pose.orientation.y = quaternion_rotation[1]
set_model_msg.pose.orientation.z = quaternion_rotation[2]
set_model_msg.pose.orientation.w = quaternion_rotation[0]
if self.use_jackal and "tb3" in name:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.1635
elif "marker" in name:
set_model_msg.pose.position.z = 1.6
else:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.099
set_model_msg.pose.position.x = pose["pos"][0]
set_model_msg.pose.position.y = pose["pos"][1]
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp(set_model_msg)
def get_obstacle_init_pos(self, init_pos_robot, init_pos_person):
num_obstacles = len(self.obstacle_names)
out_of_the_way_pose = {"pos": (15,15), "orientation":0}
if not self.use_obstacles:
return [out_of_the_way_pose for i in range(num_obstacles)]
elif self.obstacle_mode == 0:
# Place obstacles between robot and person
# Calculate distance between robots, subtract some buffer room
x_range = abs(init_pos_robot["pos"][0] - init_pos_person["pos"][0])
y_range = abs(init_pos_robot["pos"][1] - init_pos_person["pos"][1])
if x_range != 0:
x_range -= EnvConfig.OBSTACLE_SIZE
if y_range != 0:
y_range -= EnvConfig.OBSTACLE_SIZE
# Check if we have enough space for obstacles between robots
x_buffer_space = y_buffer_space = -1
num_obs_to_place = num_obstacles + 1
while x_buffer_space < 0 and y_buffer_space < 0:
num_obs_to_place -= 1
x_buffer_space = x_range - (EnvConfig.OBSTACLE_SIZE * num_obs_to_place)
y_buffer_space = y_range - ((EnvConfig.OBSTACLE_SIZE * num_obs_to_place))
if num_obs_to_place == 0:
# No space for obstacles so put them away
rospy.logwarn("Not enough space for obstacles between robots.")
return [out_of_the_way_pose for i in range(num_obstacles)]
x_spacing = x_range / num_obs_to_place
y_spacing = y_range / num_obs_to_place
if init_pos_robot["pos"][0] < init_pos_person["pos"][0]:
base_x = init_pos_robot["pos"][0]
else:
base_x = init_pos_person["pos"][0]
if init_pos_robot["pos"][1] < init_pos_person["pos"][1]:
base_y = init_pos_robot["pos"][1]
else:
base_y = init_pos_person["pos"][1]
# Place obstacles on line between robot and person
obstacle_positions = []
for i in range(num_obs_to_place):
base_x += x_spacing
base_y += y_spacing
obstacle_positions.append({"pos": (base_x, base_y), "orientation":0})
obstacle_positions.extend([out_of_the_way_pose for i in range(num_obstacles - num_obs_to_place)])
return obstacle_positions
elif self.obstacle_mode == 1:
# Put obstacles randomly within area
obstacle_radius = EnvConfig.OBSTACLE_RADIUS_AWAY
min_distance_away_from_robot = EnvConfig.OBSTACLE_SIZE * 1.25
obstacle_positions = []
if EnvConfig.EVALUATION_MODE:
x_diff = -1
y_diff = -1
count = 0
for obs_idx in range(num_obstacles):
p_xy = init_pos_robot["pos"]
point = (p_xy[0] + x_diff*1.25, p_xy[1] + y_diff*1.25)
point = self.prevent_overlap(init_pos_person["pos"], point, min_distance_away_from_robot)
point = self.prevent_overlap(init_pos_robot["pos"], point, min_distance_away_from_robot)
obstacle_positions.append({"pos": point, "orientation":0})
if count % 2 == 0:
x_diff += 1
else:
y_diff += 1
x_diff -= 0.5
count += 1
else:
for obs_idx in range(num_obstacles):
random_point = self.find_random_point_in_circle(obstacle_radius, min_distance_away_from_robot, init_pos_robot["pos"])
random_point = self.prevent_overlap(init_pos_person["pos"], random_point, min_distance_away_from_robot)
obstacle_positions.append({"pos": random_point, "orientation":0})
return obstacle_positions
# Prevent point b from overlapping point a
def prevent_overlap(self, point_a, point_b, min_distance):
x = point_b[0]
y = point_b[1]
if abs(point_b[0] - point_a[0]) < min_distance:
x += min_distance
if abs(point_b[1] - point_a[1]) < min_distance:
y += min_distance
return (x, y)
def set_obstacle_pos(self, init_pos_robot, init_pos_person):
obs_positions = self.get_obstacle_init_pos(init_pos_robot, init_pos_person)
for obs_idx in range(len(self.obstacle_names)):
self.set_pos(self.obstacle_names[obs_idx], obs_positions[obs_idx])
def init_simulator(self):
self.number_of_steps = 0
rospy.loginfo("init simulation called")
self.is_pause = True
init_pos_robot, init_pos_person = self.get_init_pos_robot_person()
self.center_pos_ = init_pos_person["pos"]
self.color_index = 0
self.fallen = False
self.is_max_distance = False
self.first_call_observation = True
rospy.loginfo("Waiting for path follower to die")
if self.position_thread:
self.position_thread.join()
rospy.loginfo("Done waiting")
self.current_obsevation_image_.fill(255)
if self.use_movebase:
self.robot.movebase_cancel_goals()
if self.person_use_move_base:
self.person.movebase_cancel_goals()
rospy.sleep(0.5)
self.person.stop_robot()
self.robot.stop_robot()
# if self.use_movebase:
# self.prev_action = (0,0, 0)
# else:
self.prev_action = (0, 0)
if EnvConfig.TRAIN_HINN:
init_pos_robot = {"pos": (30,30), "orientation": 0}
# Set positions of robots and obstacles
self.set_pos(self.robot.name, init_pos_robot)
self.set_pos(self.person.name, init_pos_person)
if EnvConfig.TRAIN_HINN:
self.set_obstacle_pos(init_pos_person, init_pos_robot)
else:
self.set_obstacle_pos(init_pos_robot, init_pos_person)
self.robot.update(init_pos_robot)
self.person.update(init_pos_person)
self.path_finished = False
self.position_thread = threading.Thread(target=self.path_follower, args=(self.current_path_idx, self.robot, init_pos_person,))
self.position_thread.daemon = True
self.is_reseting = False
self.position_thread.start()
self.wait_observation_ = 0
self.is_reseting = False
self.robot.reset = False
self.person.reset = False
# self.resume_simulator()
rospy.loginfo("init simulation finished")
self.is_pause = False
def pause(self):
self.is_pause = True
self.person.pause()
self.robot.pause()
def resume_simulator(self):
rospy.loginfo("resume simulator")
self.is_pause = False
self.person.resume()
self.robot.resume()
rospy.loginfo("resumed simulator")
def calculate_angle_using_path(self, idx):
return math.atan2(self.path["points"][idx+1][1] - self.path["points"][idx][1], self.path["points"][idx+1][0] - self.path["points"][idx][0])
@staticmethod
def denormalize(value, max_val):
if type(value) == tuple or type(value) == list:
norm_val = [float(x) * max_val for x in value]
else:
norm_val = value * float(max_val)
return norm_val
@staticmethod
def normalize(value, max_val, zero_to_one=None):
if type(value) == tuple or type(value) == list:
norm_val = [x/float(max_val) for x in value]
else:
norm_val = value/float(max_val)
if zero_to_one is not None:
if type(value) == tuple or type(value) == list:
norm_val = [(x + 1)/2 for x in norm_val]
else:
norm_val = (norm_val + 1)/2.
return norm_val
@staticmethod
def get_global_position(pos_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
return global_pos
@staticmethod
def get_global_position_orientation(pos_goal, orientation_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray((relative_pos[0] + math.cos(orientation_goal), relative_pos[1] + math.sin(orientation_goal)))
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos, new_orientation
@staticmethod
def wrap_pi_to_pi(angle):
while angle > math.pi:
angle -= 2*math.pi
while angle < - math.pi:
angle += 2*math.pi
return angle
@staticmethod
def get_relative_heading_position(relative, center):
while not relative.is_current_state_ready() or not center.is_current_state_ready():
if relative.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.1)
rospy.loginfo("waiting for observation to be ready heading pos")
relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
# transform the relative to center coordinat
relative_pos = np.asarray(relative.state_['position'] - center_pos)
relative_pos2 = np.asarray((relative_pos[0] + math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))
rotation_matrix = np.asarray([[
|
np.cos(-center_orientation)
|
numpy.cos
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 11 16:19:39 2014
"""
import os
import sys
import imp
# Put location of
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + '\\modules') # add ODYM module directory to system path
#NOTE: Hidden variable __file__ must be know to script for the directory structure to work.
# Therefore: When first using the model, run the entire script with F5 so that the __file__ variable can be created.
import dynamic_stock_model as dsm # remove and import the class manually if this unit test is run as standalone script
imp.reload(dsm)
import numpy as np
import unittest
import scipy
###############################################################################
"""My Input for fixed lifetime"""
Time_T_FixedLT = np.arange(0,10)
Time_T_30 = np.arange(0,30)
Inflow_T_FixedLT = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5])}
lifetime_FixedLT0 = {'Type': 'Fixed', 'Mean': np.array([0])}
#lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5,5,5,5,5,5,5,5,5,5])}
lifetime_NormLT = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
lifetime_NormLT0 = {'Type': 'Normal', 'Mean': np.array([0]), 'StdDev': np.array([1.5])}
lifetime_NormLT8 = {'Type': 'Normal', 'Mean': np.array([8]), 'StdDev': np.array([3])}
###############################################################################
"""My Output for fixed lifetime"""
Outflow_T_FixedLT = np.array([0, 0, 0, 0, 0, 1, 2, 3, 4, 5])
Outflow_TC_FixedLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 5, 0, 0, 0, 0, 0]])
Stock_T_FixedLT = np.array([1, 3, 6, 10, 15, 20, 25, 30, 35, 40])
StockChange_T_FixedLT = np.array([1, 2, 3, 4, 5, 5, 5, 5, 5, 5])
Stock_TC_FixedLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0],
[0, 2, 3, 4, 5, 6, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 6, 7, 0, 0, 0],
[0, 0, 0, 4, 5, 6, 7, 8, 0, 0],
[0, 0, 0, 0, 5, 6, 7, 8, 9, 0],
[0, 0, 0, 0, 0, 6, 7, 8, 9, 10]])
Bal = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Bal9 = np.zeros((9))
Bal30 = np.zeros((30))
"""My Output for normally distributed lifetime"""
Stock_TC_NormLT = np.array([[ 9.99570940e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.96169619e-01, 1.99914188e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.77249868e-01, 1.99233924e+00, 2.99871282e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.08788780e-01, 1.95449974e+00, 2.98850886e+00,
3.99828376e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 7.47507462e-01, 1.81757756e+00, 2.93174960e+00,
3.98467848e+00, 4.99785470e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 5.00000000e-01, 1.49501492e+00, 2.72636634e+00,
3.90899947e+00, 4.98084810e+00, 5.99742564e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 2.52492538e-01, 1.00000000e+00, 2.24252239e+00,
3.63515512e+00, 4.88624934e+00, 5.97701772e+00,
6.99699658e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.12112197e-02, 5.04985075e-01, 1.50000000e+00,
2.99002985e+00, 4.54394390e+00, 5.86349921e+00,
6.97318734e+00, 7.99656752e+00, 0.00000000e+00,
0.00000000e+00],
[ 2.27501319e-02, 1.82422439e-01, 7.57477613e-01,
2.00000000e+00, 3.73753731e+00, 5.45273268e+00,
6.84074908e+00, 7.96935696e+00, 8.99613846e+00,
0.00000000e+00],
[ 3.83038057e-03, 4.55002639e-02, 2.73633659e-01,
1.00997015e+00, 2.50000000e+00, 4.48504477e+00,
6.36152146e+00, 7.81799894e+00, 8.96552657e+00,
9.99570940e+00]])
Stock_T_NormLT = np.array([ 0.99957094, 2.9953115 , 5.96830193, 9.85008113,
14.4793678 , 19.60865447, 24.99043368, 30.46342411,
35.95916467, 41.45873561])
Stock_SDM_NegInflow_NormLT = np.array([0.0,0,1,2,3,5,6,8,8,6,6,6,6,6,8,8,8,9,10,12,9,9,9,9,9,9,9,9.0,9,9])
Stock_SDM_NegInflow_NormLTNeg = np.array([0.0,0,0,0,0,0,0,0,8,6,6,6,6,6,3,3,3,3,4,5,6,7,8,9,9,9,7.1,8,8.0,8])
Stock_SDM_PosInflow_NormLTNeg = np.array([0.0,0,0,0,0,0,0,0,8,8,8,8.2,9,9,8.5,8,7.5,7,6.5,6,5.5,7,8,9,8.8,8.5,8.1,8,8.0,8])
Outflow_T_NormLT = np.array([ 4.29060333e-04, 4.25944090e-03, 2.70095728e-02,
1.18220793e-01, 3.70713330e-01, 8.70713330e-01,
1.61822079e+00, 2.52700957e+00, 3.50425944e+00,
4.50042906e+00])
Outflow_TC_NormLT = np.array([[ 4.29060333e-04, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 3.40132023e-03, 8.58120666e-04, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.89197514e-02, 6.80264047e-03, 1.28718100e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 6.84610878e-02, 3.78395028e-02, 1.02039607e-02,
1.71624133e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.61281318e-01, 1.36922176e-01, 5.67592541e-02,
1.36052809e-02, 2.14530167e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 2.47507462e-01, 3.22562636e-01, 2.05383263e-01,
7.56790055e-02, 1.70066012e-02, 2.57436200e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 2.47507462e-01, 4.95014925e-01, 4.83843953e-01,
2.73844351e-01, 9.45987569e-02, 2.04079214e-02,
3.00342233e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.61281318e-01, 4.95014925e-01, 7.42522387e-01,
6.45125271e-01, 3.42305439e-01, 1.13518508e-01,
2.38092416e-02, 3.43248267e-03, -0.00000000e+00,
-0.00000000e+00],
[ 6.84610878e-02, 3.22562636e-01, 7.42522387e-01,
9.90029850e-01, 8.06406589e-01, 4.10766527e-01,
1.32438260e-01, 2.72105619e-02, 3.86154300e-03,
-0.00000000e+00],
[ 1.89197514e-02, 1.36922176e-01, 4.83843953e-01,
9.90029850e-01, 1.23753731e+00, 9.67687907e-01,
4.79227614e-01, 1.51358011e-01, 3.06118821e-02,
4.29060333e-03]])
StockChange_T_NormLT = np.array([ 0.99957094, 1.99574056, 2.97299043, 3.88177921, 4.62928667,
5.12928667, 5.38177921, 5.47299043, 5.49574056, 5.49957094])
"""My Output for Weibull-distributed lifetime"""
Stock_TC_WeibullLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # computed with Excel and taken from there
[0.367879441, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0.100520187, 0.735758882, 3, 0, 0, 0, 0, 0, 0, 0],
[0.023820879, 0.201040373, 1.103638324, 4, 0, 0, 0, 0, 0, 0],
[0.005102464, 0.047641758, 0.30156056, 1.471517765,5, 0, 0, 0, 0, 0],
[0.001009149, 0.010204929, 0.071462637, 0.402080746,1.839397206, 6, 0, 0, 0, 0],
[0.000186736, 0.002018297, 0.015307393, 0.095283516, 0.502600933, 2.207276647, 7, 0, 0, 0],
[3.26256E-05, 0.000373472, 0.003027446, 0.020409858, 0.119104394, 0.60312112, 2.575156088, 8, 0, 0],
[5.41828E-06, 6.52513E-05, 0.000560208, 0.004036594, 0.025512322, 0.142925273, 0.703641306, 2.943035529, 9, 0],
[8.59762E-07, 1.08366E-05, 9.78769E-05, 0.000746944, 0.005045743, 0.030614786, 0.166746152, 0.804161493, 3.310914971, 10]])
Stock_T_WeibullLT = np.array([1,2.367879441,3.836279069,5.328499576,6.825822547,8.324154666,9.822673522,11.321225,12.8197819,14.31833966])
Outflow_T_WeibullLT = np.array([0,0.632120559,1.531600372,2.507779493,3.502677029,4.50166788,5.501481144,6.501448519,7.5014431,8.501442241])
Outflow_TC_WeibullLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.632120559, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.267359255, 1.264241118, 0, 0, 0, 0, 0, 0, 0, 0],
[0.076699308, 0.534718509, 1.896361676, 0, 0, 0, 0, 0, 0, 0],
[0.018718414, 0.153398615, 0.802077764, 2.528482235, 0, 0, 0, 0, 0, 0],
[0.004093316, 0.037436829, 0.230097923, 1.069437018, 3.160602794, 0, 0, 0, 0, 0],
[0.000822413, 0.008186632, 0.056155243, 0.306797231, 1.336796273, 3.792723353, 0, 0, 0, 0],
[0.00015411, 0.001644825, 0.012279947, 0.074873658, 0.383496539, 1.604155527, 4.424843912, 0, 0, 0],
[2.72074E-05, 0.000308221, 0.002467238, 0.016373263, 0.093592072, 0.460195846, 1.871514782, 5.056964471, 0, 0],
[4.55852E-06, 5.44147E-05 , 0.000462331 , 0.00328965, 0.020466579, 0.112310487, 0.536895154, 2.138874037, 5.689085029, 0]])
StockChange_T_WeibullLT = np.array([1,1.367879441,1.468399628,1.492220507,1.497322971,1.49833212,1.498518856,1.498551481,1.4985569,1.498557759])
lifetime_WeibullLT = {'Type': 'Weibull', 'Shape': np.array([1.2]), 'Scale': np.array([1])}
InitialStock_WB = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50])
Inflow_WB = np.array([11631.1250671964, 1845.6048709861, 2452.0593141014, 1071.0305279511, 198.1868742385, 391.9674590243, 83.9599583940, 29.8447516023, 10.8731273138, 7.5000000000])
# We need 10 digits AFTER the . to get a 9 digits after the . overlap with np.testing.
# The total number of counting digits is higher, because there are up to 5 digits before the .
# For the stock-driven model with initial stock, colculated with Excel
Sc_InitialStock_2_Ref = np.array([[ 3.29968072, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3.28845263, 5.1142035 , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3.2259967 , 5.09680099, 2.0068288 , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3. , 5. , 2. , 4. , 0. ,
0. , 0. , 0. , 0. ],
[ 2.46759471, 4.64972578, 1.962015 , 3.98638888, 4.93427563,
0. , 0. , 0. , 0. ],
[ 1.65054855, 3.82454624, 1.82456634, 3.91067739, 4.91748538,
3.8721761 , 0. , 0. , 0. ],
[ 0.83350238, 2.55819937, 1.50076342, 3.63671549, 4.82409004,
3.85899993, 2.78772936, 0. , 0. ],
[ 0.30109709, 1.2918525 , 1.00384511, 2.9913133 , 4.48613916,
3.78570788, 2.77824333, 3.36180162, 0. ],
[ 0.07510039, 0.46667297, 0.5069268 , 2.00085849, 3.68999109,
3.5205007 , 2.72547754, 3.35036215, 3.66410986]])
Sc_InitialStock_2_Ref_Sum = np.array([ 3.29968072, 8.40265614, 10.32962649, 14. ,
18. , 20. , 20. , 20. , 20. ])
Oc_InitialStock_2_Ref = np.array([[ 1.41636982e-03, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 1.12280883e-02, 2.19524375e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 6.24559363e-02, 1.74025106e-02, 8.61420234e-04,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 2.25996698e-01, 9.68009922e-02, 6.82879736e-03,
1.71697802e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 5.32405289e-01, 3.50274224e-01, 3.79849998e-02,
1.36111209e-02, 2.11801070e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 8.17046165e-01, 8.25179532e-01, 1.37448656e-01,
7.57114903e-02, 1.67902556e-02, 1.66211031e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 8.17046165e-01, 1.26634687e+00, 3.23802924e-01,
2.73961897e-01, 9.33953405e-02, 1.31761643e-02,
1.19661751e-03, -0.00000000e+00, -0.00000000e+00],
[ 5.32405289e-01, 1.26634687e+00, 4.96918311e-01,
6.45402188e-01, 3.37950879e-01, 7.32920558e-02,
9.48603036e-03, 1.44303487e-03, -0.00000000e+00],
[ 2.25996698e-01, 8.25179532e-01, 4.96918311e-01,
9.90454815e-01, 7.96148072e-01, 2.65207178e-01,
5.27657861e-02, 1.14394721e-02, 1.57279902e-03]])
I_InitialStock_2_Ref = np.array([ 3.30109709, 5.11639875, 2.00769022, 4.00171698, 4.93639364, 3.87383821, 2.78892598, 3.36324466, 3.66568266])
""" Test case with fixed lifetime for initial stock"""
Time_T_FixedLT_X = np.arange(1, 9, 1)
lifetime_FixedLT_X = {'Type': 'Fixed', 'Mean': np.array([5])}
InitialStock_X = np.array([0, 0, 0, 7, 5, 4, 3, 2])
Inflow_X = np.array([0, 0, 0, 7, 5, 4, 3, 2])
Time_T_FixedLT_XX = np.arange(1, 11, 1)
lifetime_NormLT_X = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
InitialStock_XX = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50])
Inflow_XX = np.array([ 2.61070664, 0.43955789, 0.87708508, 0.79210262, 0.4,
2.67555857, 2.20073139, 3.06983925, 4.01538044, 7.50321933])
""" Test case with normally distributed lifetime for initial stock and stock-driven model"""
Time_T_FixedLT_2 = np.arange(1, 10, 1)
Time_T_FixedLT_3 = np.arange(0, 30, 1)
lifetime_NormLT_2 = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
lifetime_NormLT_3 = {'Type': 'LogNormal', 'Mean': np.array([6]), 'StdDev': np.array([3])}
lifetime_NormLT_4 = {'Type': 'Normal', 'Mean': np.array([7]), 'StdDev': np.array([2])}
InitialStock_2 =
|
np.array([3,5,2,4])
|
numpy.array
|
# Imported and adapated from Trimesh
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://github.com/mikedh/trimesh/blob/510b4468d65ecb52759c9c660bf5c4791361d63f/trimesh/intersections.py#L429-L617
# https://github.com/mikedh/trimesh/blob/510b4468d65ecb52759c9c660bf5c4791361d63f/trimesh/constants.py
import logging
import numpy as np
from ..tri import FACE_DTYPE, quads_to_tris
log = logging.getLogger("polliwog.plane._trimesh_intersections")
class ToleranceMesh(object):
"""
ToleranceMesh objects hold tolerance information about meshes.
Parameters
----------------
tol.merge : float
When merging vertices, consider vertices closer than this
to be the same vertex. Here we use the same value (1e-8)
as SolidWorks uses, according to their documentation.
"""
def __init__(self, **kwargs):
# vertices closer than this should be merged
self.merge = 1e-8
tol = ToleranceMesh()
def unique_bincount(values, minlength=0):
"""
For arrays of integers find unique values using bin counting.
Roughly 10x faster for correct input than np.unique
Parameters
--------------
values : (n,) int
Values to find unique members of
minlength : int
Maximum value that will occur in values (values.max())
Returns
------------
unique : (m,) int
Unique values in original array
inverse : (n,) int, optional
An array such that unique[inverse] == values
"""
values = np.asanyarray(values)
if len(values.shape) != 1 or values.dtype.kind != "i": # pragma: no cover
raise ValueError("input must be 1D integers!")
# count the number of occurrences of each value
counts = np.bincount(values, minlength=minlength)
# which bins are occupied at all
# counts are integers so this works
unique_bin = counts.astype(bool)
# which values are unique
# indexes correspond to original values
unique = np.where(unique_bin)[0]
# find the inverse to reconstruct original
inverse = (np.cumsum(unique_bin) - 1)[values]
return unique, inverse
def slice_faces_plane(
vertices,
faces,
plane_normal,
plane_origin,
face_index=None,
cached_dots=None,
return_face_mapping=False,
):
"""
Slice a mesh (given as a set of faces and vertices) with a plane, returning a
new mesh (again as a set of faces and vertices) that is the
portion of the original mesh to the positive normal side of the plane.
Parameters
---------
vertices : (n, 3) float
Vertices of source mesh to slice
faces : (n, 3) int
Faces of source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin : (3,) float
Point on plane to intersect with mesh
face_index : ((m,) int)
Indexes of faces to slice. When no mask is provided, the
default is to slice all faces.
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
return_face_mapping : bool
When True, return the indices of the old faces to which the new faces
correspond.
Returns
----------
new_vertices : (n, 3) float
Vertices of sliced mesh
new_faces : (n, 3) int
Faces of sliced mesh
"""
if len(vertices) == 0:
empty = (vertices, faces)
if return_face_mapping:
return (*empty, np.arange(len(faces)))
else:
return empty
# Construct a mask for the faces to slice.
if face_index is None:
mask = np.ones(len(faces), dtype=np.bool)
else:
mask = np.zeros(len(faces), dtype=np.bool)
mask[face_index] = True
if cached_dots is not None: # pragma: no cover
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as faces (n,3)
dots = np.einsum("i,ij->j", plane_normal, (vertices - plane_origin).T)
# Find vertex orientations w.r.t. faces for all triangles:
# -1 -> vertex "inside" plane (positive normal direction)
# 0 -> vertex on plane
# 1 -> vertex "outside" plane (negative normal direction)
signs = np.zeros(len(vertices), dtype=np.int8)
signs[dots < -tol.merge] = 1
signs[dots > tol.merge] = -1
signs = signs[faces]
# Find all triangles that intersect this plane
# onedge <- indices of all triangles intersecting the plane
# inside <- indices of all triangles "inside" the plane (positive normal)
signs_sum = signs.sum(axis=1, dtype=np.int8)
signs_asum = np.abs(signs).sum(axis=1, dtype=np.int8)
# Cases:
# (0,0,0), (-1,0,0), (-1,-1,0), (-1,-1,-1) <- inside
# (1,0,0), (1,1,0), (1,1,1) <- outside
# (1,0,-1), (1,-1,-1), (1,1,-1) <- onedge
onedge = np.logical_and(
np.logical_and(signs_asum >= 2, np.abs(signs_sum) <= 1), mask
)
inside = np.logical_or((signs_sum == -signs_asum), ~mask)
# Automatically include all faces that are "inside"
new_faces = faces[inside]
if return_face_mapping:
new_face_mapping = inside.nonzero()[0]
# Separate faces on the edge into two cases: those which will become
# quads (two vertices inside plane) and those which will become triangles
# (one vertex inside plane)
onedge_quad = np.logical_and(onedge, signs_sum < 0).nonzero()[0]
cut_faces_quad = faces[onedge_quad]
cut_signs_quad = signs[onedge_quad]
onedge_tri = np.logical_and(onedge, signs_sum >= 0).nonzero()[0]
cut_faces_tri = faces[onedge_tri]
cut_signs_tri = signs[onedge_tri]
# If no faces to cut, the surface is not in contact with this plane.
# Thus, return a mesh with only the inside faces
if len(cut_faces_quad) + len(cut_faces_tri) == 0:
if len(new_faces) == 0:
# if no new faces at all return empty arrays
empty = (
np.zeros((0, 3), dtype=np.float64),
np.zeros((0, 3), dtype=FACE_DTYPE),
)
if return_face_mapping:
return (*empty, new_face_mapping)
else:
return empty
# Renumber vertices, dropping any which have been orphaned.
unique, inverse = unique_bincount(new_faces.ravel())
final = (
vertices[unique],
inverse.reshape((-1, 3)),
)
if return_face_mapping:
return (*final, new_face_mapping)
else:
return final
# Extract the intersections of each triangle's edges with the plane
o = vertices[faces][onedge] # origins
d =
|
np.roll(o, -1, axis=1)
|
numpy.roll
|
# -*- coding: utf-8 -*-
"""
Test nematusLL for consistency with nematus
"""
import os
import unittest
import sys
import numpy as np
import logging
import Pyro4
nem_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
sys.path.insert(1, nem_path)
from nematus.pyro_utils import setup_remotes, get_random_key, get_unused_port
from nematus.util import load_dict
from unit_test_utils import initialize
GPU_ID = 0
VOCAB_SIZE = 90000
SRC = 'ro'
TGT = 'en'
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_data')
model_options = dict(factors=1, # input factors
dim_per_factor=None,
# list of word vector dimensionalities (one per factor): [250,200,50] for dimensionality of 500
encoder='gru',
decoder='gru_cond',
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
map_decay_c=0., # L2 regularization penalty towards original weights
alpha_c=0., # alignment regularization
shuffle_each_epoch=True,
finetune=False,
finetune_only_last=False,
sort_by_length=True,
use_domain_interpolation=False,
domain_interpolation_min=0.1,
domain_interpolation_inc=0.1,
domain_interpolation_indomain_datasets=['indomain.en', 'indomain.fr'],
maxibatch_size=20, # How many minibatches to load at one time
model_version=0.1, # store version used for training for compatibility
pyro_key=None, # pyro hmac key
pyro_port=None, # pyro nameserver port
pyro_name=None, # if None, will import instead of assuming a server is running
saveto='model.npz',
reload_=True,
dim_word=500,
dim=1024,
n_words=VOCAB_SIZE,
n_words_src=VOCAB_SIZE,
decay_c=0.,
clip_c=1.,
lrate=0.0001,
optimizer='adadelta',
maxlen=50,
batch_size=80,
valid_batch_size=80,
datasets=[DATA_DIR + '/corpus.bpe.' + SRC, DATA_DIR + '/corpus.bpe.' + TGT],
valid_datasets=[DATA_DIR + '/newsdev2016.bpe.' + SRC, DATA_DIR + '/newsdev2016.bpe.' + TGT],
dictionaries=[DATA_DIR + '/corpus.bpe.' + SRC + '.json',
DATA_DIR + '/corpus.bpe.' + TGT + '.json'],
validFreq=10000,
dispFreq=10,
saveFreq=30000,
sampleFreq=10000,
use_dropout=False,
dropout_embedding=0.2, # dropout for input embeddings (0: no dropout)
dropout_hidden=0.2, # dropout for hidden layers (0: no dropout)
dropout_source=0.1, # dropout source words (0: no dropout)
dropout_target=0.1, # dropout target words (0: no dropout)
overwrite=False,
external_validation_script='./validate.sh',
)
x0 = np.array([[[3602],
[8307],
[7244],
[7],
[58],
[9],
[5893],
[62048],
[11372],
[4029],
[25],
[34],
[2278],
[5],
[4266],
[11],
[2852],
[3],
[2298],
[2],
[23912],
[6],
[16358],
[3],
[730],
[2328],
[5],
[28],
[353],
[4],
[0], ]]) # 0 = EOS
xx0 =
|
np.tile(x0, [1, 1, 2])
|
numpy.tile
|
# AUTO GENERATED. DO NOT CHANGE!
from ctypes import *
import numpy as np
class MJCONTACT(Structure):
_fields_ = [
("dist", c_double),
("pos", c_double * 3),
("frame", c_double * 9),
("includemargin", c_double),
("friction", c_double * 5),
("solref", c_double * 2),
("solimp", c_double * 3),
("mu", c_double),
("coef", c_double * 5),
("zone", c_int),
("dim", c_int),
("geom1", c_int),
("geom2", c_int),
("exclude", c_int),
("efc_address", c_int),
]
class MJRRECT(Structure):
_fields_ = [
("left", c_int),
("bottom", c_int),
("width", c_int),
("height", c_int),
]
class MJVCAMERAPOSE(Structure):
_fields_ = [
("head_pos", c_double * 3),
("head_right", c_double * 3),
("window_pos", c_double * 3),
("window_right", c_double * 3),
("window_up", c_double * 3),
("window_normal", c_double * 3),
("window_size", c_double * 2),
("scale", c_double),
("ipd", c_double),
]
class MJROPTION(Structure):
_fields_ = [
("stereo", c_ubyte),
("flags", c_ubyte * 6),
]
class MJRCONTEXT(Structure):
_fields_ = [
("linewidth", c_float),
("znear", c_float),
("zfar", c_float),
("shadowclip", c_float),
("shadowscale", c_float),
("shadowsize", c_int),
("offwidth", c_uint),
("offheight", c_uint),
("offFBO", c_uint),
("offColor", c_uint),
("offDepthStencil", c_uint),
("shadowFBO", c_uint),
("shadowTex", c_uint),
("ntexture", c_uint),
("texture", c_int * 100),
("textureType", c_int * 100),
("basePlane", c_uint),
("baseMesh", c_uint),
("baseHField", c_uint),
("baseBuiltin", c_uint),
("baseFontNormal", c_uint),
("baseFontBack", c_uint),
("baseFontBig", c_uint),
("rangePlane", c_int),
("rangeMesh", c_int),
("rangeHField", c_int),
("rangeBuiltin", c_int),
("rangeFont", c_int),
("charWidth", c_int * 127),
("charWidthBig", c_int * 127),
("charHeight", c_int),
("charHeightBig", c_int),
("glewInitialized", c_int),
]
class MJVCAMERA(Structure):
_fields_ = [
("fovy", c_double),
("camid", c_int),
("trackbodyid", c_int),
("lookat", c_double * 3),
("azimuth", c_double),
("elevation", c_double),
("distance", c_double),
("pose", MJVCAMERAPOSE),
("VR", c_ubyte),
]
class MJVOPTION(Structure):
_fields_ = [
("label", c_int),
("frame", c_int),
("geomgroup", c_ubyte * 5),
("sitegroup", c_ubyte * 5),
("flags", c_ubyte * 18),
]
class MJVGEOM(Structure):
_fields_ = [
("type", c_int),
("dataid", c_int),
("objtype", c_int),
("objid", c_int),
("category", c_int),
("texid", c_int),
("texuniform", c_int),
("texrepeat", c_float * 2),
("size", c_float * 3),
("pos", c_float * 3),
("mat", c_float * 9),
("rgba", c_float * 4),
("emission", c_float),
("specular", c_float),
("shininess", c_float),
("reflectance", c_float),
("label", c_char * 100),
("camdist", c_float),
("rbound", c_float),
("transparent", c_ubyte),
]
class MJVLIGHT(Structure):
_fields_ = [
("pos", c_float * 3),
("dir", c_float * 3),
("attenuation", c_float * 3),
("cutoff", c_float),
("exponent", c_float),
("ambient", c_float * 3),
("diffuse", c_float * 3),
("specular", c_float * 3),
("headlight", c_ubyte),
("directional", c_ubyte),
("castshadow", c_ubyte),
]
class MJVOBJECTS(Structure):
_fields_ = [
("nlight", c_int),
("ngeom", c_int),
("maxgeom", c_int),
("lights", MJVLIGHT * 8),
("geoms", POINTER(MJVGEOM)),
("geomorder", POINTER(c_int)),
]
class MJOPTION(Structure):
_fields_ = [
("timestep", c_double),
("apirate", c_double),
("tolerance", c_double),
("impratio", c_double),
("gravity", c_double * 3),
("wind", c_double * 3),
("magnetic", c_double * 3),
("density", c_double),
("viscosity", c_double),
("o_margin", c_double),
("o_solref", c_double * 2),
("o_solimp", c_double * 3),
("mpr_tolerance", c_double),
("mpr_iterations", c_int),
("integrator", c_int),
("collision", c_int),
("impedance", c_int),
("reference", c_int),
("solver", c_int),
("iterations", c_int),
("disableflags", c_int),
("enableflags", c_int),
]
class MJVISUAL(Structure):
class ANON_GLOBAL(Structure):
_fields_ = [
("fovy", c_float),
("ipd", c_float),
("linewidth", c_float),
("glow", c_float),
("offwidth", c_int),
("offheight", c_int),
]
class ANON_QUALITY(Structure):
_fields_ = [
("shadowsize", c_int),
("numSlices", c_int),
("numStacks", c_int),
("numArrows", c_int),
("numQuads", c_int),
]
class ANON_HEADLIGHT(Structure):
_fields_ = [
("ambient", c_float * 3),
("diffuse", c_float * 3),
("specular", c_float * 3),
("active", c_int),
]
class ANON_MAP(Structure):
_fields_ = [
("stiffness", c_float),
("force", c_float),
("torque", c_float),
("alpha", c_float),
("fogstart", c_float),
("fogend", c_float),
("znear", c_float),
("zfar", c_float),
("shadowclip", c_float),
("shadowscale", c_float),
]
class ANON_SCALE(Structure):
_fields_ = [
("forcewidth", c_float),
("contactwidth", c_float),
("contactheight", c_float),
("connect", c_float),
("com", c_float),
("camera", c_float),
("light", c_float),
("selectpoint", c_float),
("jointlength", c_float),
("jointwidth", c_float),
("actuatorlength", c_float),
("actuatorwidth", c_float),
("framelength", c_float),
("framewidth", c_float),
("constraint", c_float),
("slidercrank", c_float),
]
class ANON_RGBA(Structure):
_fields_ = [
("fog", c_float * 4),
("force", c_float * 4),
("inertia", c_float * 4),
("joint", c_float * 4),
("actuator", c_float * 4),
("com", c_float * 4),
("camera", c_float * 4),
("light", c_float * 4),
("selectpoint", c_float * 4),
("connect", c_float * 4),
("contactpoint", c_float * 4),
("contactforce", c_float * 4),
("contactfriction", c_float * 4),
("contacttorque", c_float * 4),
("constraint", c_float * 4),
("slidercrank", c_float * 4),
("crankbroken", c_float * 4),
]
_fields_ = [
("global_", ANON_GLOBAL),
("quality", ANON_QUALITY),
("headlight", ANON_HEADLIGHT),
("map_", ANON_MAP),
("scale", ANON_SCALE),
("rgba", ANON_RGBA),
]
class MJSTATISTIC(Structure):
_fields_ = [
("meanmass", c_double),
("meansize", c_double),
("extent", c_double),
("center", c_double * 3),
]
class MJDATA(Structure):
_fields_ = [
("nstack", c_int),
("nbuffer", c_int),
("pstack", c_int),
("maxstackuse", c_int),
("ne", c_int),
("nf", c_int),
("nefc", c_int),
("ncon", c_int),
("nwarning", c_int * 8),
("warning_info", c_int * 8),
("timer_duration", c_double * 14),
("timer_ncall", c_double * 14),
("mocaptime", c_double * 3),
("time", c_double),
("energy", c_double * 2),
("solverstat", c_double * 4),
("solvertrace", c_double * 200),
("buffer", POINTER(c_ubyte)),
("stack", POINTER(c_double)),
("qpos", POINTER(c_double)),
("qvel", POINTER(c_double)),
("act", POINTER(c_double)),
("ctrl", POINTER(c_double)),
("qfrc_applied", POINTER(c_double)),
("xfrc_applied", POINTER(c_double)),
("qacc", POINTER(c_double)),
("act_dot", POINTER(c_double)),
("mocap_pos", POINTER(c_double)),
("mocap_quat", POINTER(c_double)),
("userdata", POINTER(c_double)),
("sensordata", POINTER(c_double)),
("xpos", POINTER(c_double)),
("xquat", POINTER(c_double)),
("xmat", POINTER(c_double)),
("xipos", POINTER(c_double)),
("ximat", POINTER(c_double)),
("xanchor", POINTER(c_double)),
("xaxis", POINTER(c_double)),
("geom_xpos", POINTER(c_double)),
("geom_xmat", POINTER(c_double)),
("site_xpos", POINTER(c_double)),
("site_xmat", POINTER(c_double)),
("cam_xpos", POINTER(c_double)),
("cam_xmat", POINTER(c_double)),
("light_xpos", POINTER(c_double)),
("light_xdir", POINTER(c_double)),
("com_subtree", POINTER(c_double)),
("cdof", POINTER(c_double)),
("cinert", POINTER(c_double)),
("ten_wrapadr", POINTER(c_int)),
("ten_wrapnum", POINTER(c_int)),
("ten_length", POINTER(c_double)),
("ten_moment", POINTER(c_double)),
("wrap_obj", POINTER(c_int)),
("wrap_xpos", POINTER(c_double)),
("actuator_length", POINTER(c_double)),
("actuator_moment", POINTER(c_double)),
("crb", POINTER(c_double)),
("qM", POINTER(c_double)),
("qLD", POINTER(c_double)),
("qLDiagInv", POINTER(c_double)),
("qLDiagSqrtInv", POINTER(c_double)),
("contact", POINTER(MJCONTACT)),
("efc_type", POINTER(c_int)),
("efc_id", POINTER(c_int)),
("efc_rownnz", POINTER(c_int)),
("efc_rowadr", POINTER(c_int)),
("efc_colind", POINTER(c_int)),
("efc_rownnz_T", POINTER(c_int)),
("efc_rowadr_T", POINTER(c_int)),
("efc_colind_T", POINTER(c_int)),
("efc_solref", POINTER(c_double)),
("efc_solimp", POINTER(c_double)),
("efc_margin", POINTER(c_double)),
("efc_frictionloss", POINTER(c_double)),
("efc_pos", POINTER(c_double)),
("efc_J", POINTER(c_double)),
("efc_J_T", POINTER(c_double)),
("efc_diagApprox", POINTER(c_double)),
("efc_D", POINTER(c_double)),
("efc_R", POINTER(c_double)),
("efc_AR", POINTER(c_double)),
("e_ARchol", POINTER(c_double)),
("fc_e_rect", POINTER(c_double)),
("fc_AR", POINTER(c_double)),
("ten_velocity", POINTER(c_double)),
("actuator_velocity", POINTER(c_double)),
("cvel", POINTER(c_double)),
("cdof_dot", POINTER(c_double)),
("qfrc_bias", POINTER(c_double)),
("qfrc_passive", POINTER(c_double)),
("efc_vel", POINTER(c_double)),
("efc_aref", POINTER(c_double)),
("actuator_force", POINTER(c_double)),
("qfrc_actuator", POINTER(c_double)),
("qfrc_unc", POINTER(c_double)),
("qacc_unc", POINTER(c_double)),
("efc_b", POINTER(c_double)),
("fc_b", POINTER(c_double)),
("efc_force", POINTER(c_double)),
("qfrc_constraint", POINTER(c_double)),
("qfrc_inverse", POINTER(c_double)),
("cacc", POINTER(c_double)),
("cfrc_int", POINTER(c_double)),
("cfrc_ext", POINTER(c_double)),
]
class MJMODEL(Structure):
_fields_ = [
("nq", c_int),
("nv", c_int),
("nu", c_int),
("na", c_int),
("nbody", c_int),
("njnt", c_int),
("ngeom", c_int),
("nsite", c_int),
("ncam", c_int),
("nlight", c_int),
("nmesh", c_int),
("nmeshvert", c_int),
("nmeshface", c_int),
("nmeshgraph", c_int),
("nhfield", c_int),
("nhfielddata", c_int),
("ntex", c_int),
("ntexdata", c_int),
("nmat", c_int),
("npair", c_int),
("nexclude", c_int),
("neq", c_int),
("ntendon", c_int),
("nwrap", c_int),
("nsensor", c_int),
("nnumeric", c_int),
("nnumericdata", c_int),
("ntext", c_int),
("ntextdata", c_int),
("nkey", c_int),
("nuser_body", c_int),
("nuser_jnt", c_int),
("nuser_geom", c_int),
("nuser_site", c_int),
("nuser_tendon", c_int),
("nuser_actuator", c_int),
("nuser_sensor", c_int),
("nnames", c_int),
("nM", c_int),
("nemax", c_int),
("njmax", c_int),
("nconmax", c_int),
("nstack", c_int),
("nuserdata", c_int),
("nmocap", c_int),
("nsensordata", c_int),
("nbuffer", c_int),
("opt", MJOPTION),
("vis", MJVISUAL),
("stat", MJSTATISTIC),
("buffer", POINTER(c_ubyte)),
("qpos0", POINTER(c_double)),
("qpos_spring", POINTER(c_double)),
("body_parentid", POINTER(c_int)),
("body_rootid", POINTER(c_int)),
("body_weldid", POINTER(c_int)),
("body_mocapid", POINTER(c_int)),
("body_jntnum", POINTER(c_int)),
("body_jntadr", POINTER(c_int)),
("body_dofnum", POINTER(c_int)),
("body_dofadr", POINTER(c_int)),
("body_geomnum", POINTER(c_int)),
("body_geomadr", POINTER(c_int)),
("body_pos", POINTER(c_double)),
("body_quat", POINTER(c_double)),
("body_ipos", POINTER(c_double)),
("body_iquat", POINTER(c_double)),
("body_mass", POINTER(c_double)),
("body_inertia", POINTER(c_double)),
("body_invweight0", POINTER(c_double)),
("body_user", POINTER(c_double)),
("jnt_type", POINTER(c_int)),
("jnt_qposadr", POINTER(c_int)),
("jnt_dofadr", POINTER(c_int)),
("jnt_bodyid", POINTER(c_int)),
("jnt_limited", POINTER(c_ubyte)),
("jnt_solref", POINTER(c_double)),
("jnt_solimp", POINTER(c_double)),
("jnt_pos", POINTER(c_double)),
("jnt_axis", POINTER(c_double)),
("jnt_stiffness", POINTER(c_double)),
("jnt_range", POINTER(c_double)),
("jnt_margin", POINTER(c_double)),
("jnt_user", POINTER(c_double)),
("dof_bodyid", POINTER(c_int)),
("dof_jntid", POINTER(c_int)),
("dof_parentid", POINTER(c_int)),
("dof_Madr", POINTER(c_int)),
("dof_frictional", POINTER(c_ubyte)),
("dof_solref", POINTER(c_double)),
("dof_solimp", POINTER(c_double)),
("dof_frictionloss", POINTER(c_double)),
("dof_armature", POINTER(c_double)),
("dof_damping", POINTER(c_double)),
("dof_invweight0", POINTER(c_double)),
("geom_type", POINTER(c_int)),
("geom_contype", POINTER(c_int)),
("geom_conaffinity", POINTER(c_int)),
("geom_condim", POINTER(c_int)),
("geom_bodyid", POINTER(c_int)),
("geom_dataid", POINTER(c_int)),
("geom_matid", POINTER(c_int)),
("geom_group", POINTER(c_int)),
("geom_solmix", POINTER(c_double)),
("geom_solref", POINTER(c_double)),
("geom_solimp", POINTER(c_double)),
("geom_size", POINTER(c_double)),
("geom_rbound", POINTER(c_double)),
("geom_pos", POINTER(c_double)),
("geom_quat", POINTER(c_double)),
("geom_friction", POINTER(c_double)),
("geom_margin", POINTER(c_double)),
("geom_gap", POINTER(c_double)),
("geom_user", POINTER(c_double)),
("geom_rgba", POINTER(c_float)),
("site_type", POINTER(c_int)),
("site_bodyid", POINTER(c_int)),
("site_matid", POINTER(c_int)),
("site_group", POINTER(c_int)),
("site_size", POINTER(c_double)),
("site_pos", POINTER(c_double)),
("site_quat", POINTER(c_double)),
("site_user", POINTER(c_double)),
("site_rgba", POINTER(c_float)),
("cam_mode", POINTER(c_int)),
("cam_bodyid", POINTER(c_int)),
("cam_targetbodyid", POINTER(c_int)),
("cam_pos", POINTER(c_double)),
("cam_quat", POINTER(c_double)),
("cam_poscom0", POINTER(c_double)),
("cam_pos0", POINTER(c_double)),
("cam_mat0", POINTER(c_double)),
("cam_fovy", POINTER(c_double)),
("cam_ipd", POINTER(c_double)),
("light_mode", POINTER(c_int)),
("light_bodyid", POINTER(c_int)),
("light_targetbodyid", POINTER(c_int)),
("light_directional", POINTER(c_ubyte)),
("light_castshadow", POINTER(c_ubyte)),
("light_active", POINTER(c_ubyte)),
("light_pos", POINTER(c_double)),
("light_dir", POINTER(c_double)),
("light_poscom0", POINTER(c_double)),
("light_pos0", POINTER(c_double)),
("light_dir0", POINTER(c_double)),
("light_attenuation", POINTER(c_float)),
("light_cutoff", POINTER(c_float)),
("light_exponent", POINTER(c_float)),
("light_ambient", POINTER(c_float)),
("light_diffuse", POINTER(c_float)),
("light_specular", POINTER(c_float)),
("mesh_faceadr", POINTER(c_int)),
("mesh_facenum", POINTER(c_int)),
("mesh_vertadr", POINTER(c_int)),
("mesh_vertnum", POINTER(c_int)),
("mesh_graphadr", POINTER(c_int)),
("mesh_vert", POINTER(c_float)),
("mesh_normal", POINTER(c_float)),
("mesh_face", POINTER(c_int)),
("mesh_graph", POINTER(c_int)),
("hfield_size", POINTER(c_double)),
("hfield_nrow", POINTER(c_int)),
("hfield_ncol", POINTER(c_int)),
("hfield_adr", POINTER(c_int)),
("hfield_data", POINTER(c_float)),
("tex_type", POINTER(c_int)),
("tex_height", POINTER(c_int)),
("tex_width", POINTER(c_int)),
("tex_adr", POINTER(c_int)),
("tex_rgb", POINTER(c_ubyte)),
("mat_texid", POINTER(c_int)),
("mat_texuniform", POINTER(c_ubyte)),
("mat_texrepeat", POINTER(c_float)),
("mat_emission", POINTER(c_float)),
("mat_specular", POINTER(c_float)),
("mat_shininess", POINTER(c_float)),
("mat_reflectance", POINTER(c_float)),
("mat_rgba", POINTER(c_float)),
("pair_dim", POINTER(c_int)),
("pair_geom1", POINTER(c_int)),
("pair_geom2", POINTER(c_int)),
("pair_signature", POINTER(c_int)),
("pair_solref", POINTER(c_double)),
("pair_solimp", POINTER(c_double)),
("pair_margin", POINTER(c_double)),
("pair_gap", POINTER(c_double)),
("pair_friction", POINTER(c_double)),
("exclude_signature", POINTER(c_int)),
("eq_type", POINTER(c_int)),
("eq_obj1id", POINTER(c_int)),
("eq_obj2id", POINTER(c_int)),
("eq_active", POINTER(c_ubyte)),
("eq_solref", POINTER(c_double)),
("eq_solimp", POINTER(c_double)),
("eq_data", POINTER(c_double)),
("tendon_adr", POINTER(c_int)),
("tendon_num", POINTER(c_int)),
("tendon_matid", POINTER(c_int)),
("tendon_limited", POINTER(c_ubyte)),
("tendon_frictional", POINTER(c_ubyte)),
("tendon_width", POINTER(c_double)),
("tendon_solref_lim", POINTER(c_double)),
("tendon_solimp_lim", POINTER(c_double)),
("tendon_solref_fri", POINTER(c_double)),
("tendon_solimp_fri", POINTER(c_double)),
("tendon_range", POINTER(c_double)),
("tendon_margin", POINTER(c_double)),
("tendon_stiffness", POINTER(c_double)),
("tendon_damping", POINTER(c_double)),
("tendon_frictionloss", POINTER(c_double)),
("tendon_lengthspring", POINTER(c_double)),
("tendon_length0", POINTER(c_double)),
("tendon_invweight0", POINTER(c_double)),
("tendon_user", POINTER(c_double)),
("tendon_rgba", POINTER(c_float)),
("wrap_type", POINTER(c_int)),
("wrap_objid", POINTER(c_int)),
("wrap_prm", POINTER(c_double)),
("actuator_trntype", POINTER(c_int)),
("actuator_dyntype", POINTER(c_int)),
("actuator_gaintype", POINTER(c_int)),
("actuator_biastype", POINTER(c_int)),
("actuator_trnid", POINTER(c_int)),
("actuator_ctrllimited", POINTER(c_ubyte)),
("actuator_forcelimited", POINTER(c_ubyte)),
("actuator_dynprm", POINTER(c_double)),
("actuator_gainprm", POINTER(c_double)),
("actuator_biasprm", POINTER(c_double)),
("actuator_ctrlrange", POINTER(c_double)),
("actuator_forcerange", POINTER(c_double)),
("actuator_gear", POINTER(c_double)),
("actuator_cranklength", POINTER(c_double)),
("actuator_invweight0", POINTER(c_double)),
("actuator_length0", POINTER(c_double)),
("actuator_lengthrange", POINTER(c_double)),
("actuator_user", POINTER(c_double)),
("sensor_type", POINTER(c_int)),
("sensor_objid", POINTER(c_int)),
("sensor_dim", POINTER(c_int)),
("sensor_adr", POINTER(c_int)),
("sensor_scale", POINTER(c_double)),
("sensor_user", POINTER(c_double)),
("numeric_adr", POINTER(c_int)),
("numeric_size", POINTER(c_int)),
("numeric_data", POINTER(c_double)),
("text_adr", POINTER(c_int)),
("text_data", POINTER(c_char)),
("key_time", POINTER(c_double)),
("key_qpos", POINTER(c_double)),
("key_qvel", POINTER(c_double)),
("key_act", POINTER(c_double)),
("name_bodyadr", POINTER(c_int)),
("name_jntadr", POINTER(c_int)),
("name_geomadr", POINTER(c_int)),
("name_siteadr", POINTER(c_int)),
("name_camadr", POINTER(c_int)),
("name_lightadr", POINTER(c_int)),
("name_meshadr", POINTER(c_int)),
("name_hfieldadr", POINTER(c_int)),
("name_texadr", POINTER(c_int)),
("name_matadr", POINTER(c_int)),
("name_eqadr", POINTER(c_int)),
("name_tendonadr", POINTER(c_int)),
("name_actuatoradr", POINTER(c_int)),
("name_sensoradr", POINTER(c_int)),
("name_numericadr", POINTER(c_int)),
("name_textadr", POINTER(c_int)),
("names", POINTER(c_char)),
]
class MjContactWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def dist(self):
return self._wrapped.contents.dist
@dist.setter
def dist(self, value):
self._wrapped.contents.dist = value
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_double))
@property
def frame(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.frame, dtype=np.double, count=(9)), (9, ))
arr.setflags(write=False)
return arr
@frame.setter
def frame(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.frame, val_ptr, 9 * sizeof(c_double))
@property
def includemargin(self):
return self._wrapped.contents.includemargin
@includemargin.setter
def includemargin(self, value):
self._wrapped.contents.includemargin = value
@property
def friction(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.friction, dtype=np.double, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@friction.setter
def friction(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.friction, val_ptr, 5 * sizeof(c_double))
@property
def solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solref, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@solref.setter
def solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solref, val_ptr, 2 * sizeof(c_double))
@property
def solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solimp, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@solimp.setter
def solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solimp, val_ptr, 3 * sizeof(c_double))
@property
def mu(self):
return self._wrapped.contents.mu
@mu.setter
def mu(self, value):
self._wrapped.contents.mu = value
@property
def coef(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.coef, dtype=np.double, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@coef.setter
def coef(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.coef, val_ptr, 5 * sizeof(c_double))
@property
def zone(self):
return self._wrapped.contents.zone
@zone.setter
def zone(self, value):
self._wrapped.contents.zone = value
@property
def dim(self):
return self._wrapped.contents.dim
@dim.setter
def dim(self, value):
self._wrapped.contents.dim = value
@property
def geom1(self):
return self._wrapped.contents.geom1
@geom1.setter
def geom1(self, value):
self._wrapped.contents.geom1 = value
@property
def geom2(self):
return self._wrapped.contents.geom2
@geom2.setter
def geom2(self, value):
self._wrapped.contents.geom2 = value
@property
def exclude(self):
return self._wrapped.contents.exclude
@exclude.setter
def exclude(self, value):
self._wrapped.contents.exclude = value
@property
def efc_address(self):
return self._wrapped.contents.efc_address
@efc_address.setter
def efc_address(self, value):
self._wrapped.contents.efc_address = value
class MjrRectWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def left(self):
return self._wrapped.contents.left
@left.setter
def left(self, value):
self._wrapped.contents.left = value
@property
def bottom(self):
return self._wrapped.contents.bottom
@bottom.setter
def bottom(self, value):
self._wrapped.contents.bottom = value
@property
def width(self):
return self._wrapped.contents.width
@width.setter
def width(self, value):
self._wrapped.contents.width = value
@property
def height(self):
return self._wrapped.contents.height
@height.setter
def height(self, value):
self._wrapped.contents.height = value
class MjvCameraPoseWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def head_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.head_pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@head_pos.setter
def head_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.head_pos, val_ptr, 3 * sizeof(c_double))
@property
def head_right(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.head_right, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@head_right.setter
def head_right(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.head_right, val_ptr, 3 * sizeof(c_double))
@property
def window_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_pos.setter
def window_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_pos, val_ptr, 3 * sizeof(c_double))
@property
def window_right(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_right, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_right.setter
def window_right(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_right, val_ptr, 3 * sizeof(c_double))
@property
def window_up(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_up, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_up.setter
def window_up(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_up, val_ptr, 3 * sizeof(c_double))
@property
def window_normal(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_normal, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_normal.setter
def window_normal(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_normal, val_ptr, 3 * sizeof(c_double))
@property
def window_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_size, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@window_size.setter
def window_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_size, val_ptr, 2 * sizeof(c_double))
@property
def scale(self):
return self._wrapped.contents.scale
@scale.setter
def scale(self, value):
self._wrapped.contents.scale = value
@property
def ipd(self):
return self._wrapped.contents.ipd
@ipd.setter
def ipd(self, value):
self._wrapped.contents.ipd = value
class MjrOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def stereo(self):
return self._wrapped.contents.stereo
@stereo.setter
def stereo(self, value):
self._wrapped.contents.stereo = value
@property
def flags(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.flags, dtype=np.uint8, count=(6)), (6, ))
arr.setflags(write=False)
return arr
@flags.setter
def flags(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.flags, val_ptr, 6 * sizeof(c_ubyte))
class MjrContextWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def linewidth(self):
return self._wrapped.contents.linewidth
@linewidth.setter
def linewidth(self, value):
self._wrapped.contents.linewidth = value
@property
def znear(self):
return self._wrapped.contents.znear
@znear.setter
def znear(self, value):
self._wrapped.contents.znear = value
@property
def zfar(self):
return self._wrapped.contents.zfar
@zfar.setter
def zfar(self, value):
self._wrapped.contents.zfar = value
@property
def shadowclip(self):
return self._wrapped.contents.shadowclip
@shadowclip.setter
def shadowclip(self, value):
self._wrapped.contents.shadowclip = value
@property
def shadowscale(self):
return self._wrapped.contents.shadowscale
@shadowscale.setter
def shadowscale(self, value):
self._wrapped.contents.shadowscale = value
@property
def shadowsize(self):
return self._wrapped.contents.shadowsize
@shadowsize.setter
def shadowsize(self, value):
self._wrapped.contents.shadowsize = value
@property
def offwidth(self):
return self._wrapped.contents.offwidth
@offwidth.setter
def offwidth(self, value):
self._wrapped.contents.offwidth = value
@property
def offheight(self):
return self._wrapped.contents.offheight
@offheight.setter
def offheight(self, value):
self._wrapped.contents.offheight = value
@property
def offFBO(self):
return self._wrapped.contents.offFBO
@offFBO.setter
def offFBO(self, value):
self._wrapped.contents.offFBO = value
@property
def offColor(self):
return self._wrapped.contents.offColor
@offColor.setter
def offColor(self, value):
self._wrapped.contents.offColor = value
@property
def offDepthStencil(self):
return self._wrapped.contents.offDepthStencil
@offDepthStencil.setter
def offDepthStencil(self, value):
self._wrapped.contents.offDepthStencil = value
@property
def shadowFBO(self):
return self._wrapped.contents.shadowFBO
@shadowFBO.setter
def shadowFBO(self, value):
self._wrapped.contents.shadowFBO = value
@property
def shadowTex(self):
return self._wrapped.contents.shadowTex
@shadowTex.setter
def shadowTex(self, value):
self._wrapped.contents.shadowTex = value
@property
def ntexture(self):
return self._wrapped.contents.ntexture
@ntexture.setter
def ntexture(self, value):
self._wrapped.contents.ntexture = value
@property
def texture(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texture, dtype=np.int, count=(100)), (100, ))
arr.setflags(write=False)
return arr
@texture.setter
def texture(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.texture, val_ptr, 100 * sizeof(c_int))
@property
def textureType(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.textureType, dtype=np.int, count=(100)), (100, ))
arr.setflags(write=False)
return arr
@textureType.setter
def textureType(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.textureType, val_ptr, 100 * sizeof(c_int))
@property
def basePlane(self):
return self._wrapped.contents.basePlane
@basePlane.setter
def basePlane(self, value):
self._wrapped.contents.basePlane = value
@property
def baseMesh(self):
return self._wrapped.contents.baseMesh
@baseMesh.setter
def baseMesh(self, value):
self._wrapped.contents.baseMesh = value
@property
def baseHField(self):
return self._wrapped.contents.baseHField
@baseHField.setter
def baseHField(self, value):
self._wrapped.contents.baseHField = value
@property
def baseBuiltin(self):
return self._wrapped.contents.baseBuiltin
@baseBuiltin.setter
def baseBuiltin(self, value):
self._wrapped.contents.baseBuiltin = value
@property
def baseFontNormal(self):
return self._wrapped.contents.baseFontNormal
@baseFontNormal.setter
def baseFontNormal(self, value):
self._wrapped.contents.baseFontNormal = value
@property
def baseFontBack(self):
return self._wrapped.contents.baseFontBack
@baseFontBack.setter
def baseFontBack(self, value):
self._wrapped.contents.baseFontBack = value
@property
def baseFontBig(self):
return self._wrapped.contents.baseFontBig
@baseFontBig.setter
def baseFontBig(self, value):
self._wrapped.contents.baseFontBig = value
@property
def rangePlane(self):
return self._wrapped.contents.rangePlane
@rangePlane.setter
def rangePlane(self, value):
self._wrapped.contents.rangePlane = value
@property
def rangeMesh(self):
return self._wrapped.contents.rangeMesh
@rangeMesh.setter
def rangeMesh(self, value):
self._wrapped.contents.rangeMesh = value
@property
def rangeHField(self):
return self._wrapped.contents.rangeHField
@rangeHField.setter
def rangeHField(self, value):
self._wrapped.contents.rangeHField = value
@property
def rangeBuiltin(self):
return self._wrapped.contents.rangeBuiltin
@rangeBuiltin.setter
def rangeBuiltin(self, value):
self._wrapped.contents.rangeBuiltin = value
@property
def rangeFont(self):
return self._wrapped.contents.rangeFont
@rangeFont.setter
def rangeFont(self, value):
self._wrapped.contents.rangeFont = value
@property
def charWidth(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidth, dtype=np.int, count=(127)), (127, ))
arr.setflags(write=False)
return arr
@charWidth.setter
def charWidth(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidth, val_ptr, 127 * sizeof(c_int))
@property
def charWidthBig(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidthBig, dtype=np.int, count=(127)), (127, ))
arr.setflags(write=False)
return arr
@charWidthBig.setter
def charWidthBig(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidthBig, val_ptr, 127 * sizeof(c_int))
@property
def charHeight(self):
return self._wrapped.contents.charHeight
@charHeight.setter
def charHeight(self, value):
self._wrapped.contents.charHeight = value
@property
def charHeightBig(self):
return self._wrapped.contents.charHeightBig
@charHeightBig.setter
def charHeightBig(self, value):
self._wrapped.contents.charHeightBig = value
@property
def glewInitialized(self):
return self._wrapped.contents.glewInitialized
@glewInitialized.setter
def glewInitialized(self, value):
self._wrapped.contents.glewInitialized = value
class MjvCameraWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def fovy(self):
return self._wrapped.contents.fovy
@fovy.setter
def fovy(self, value):
self._wrapped.contents.fovy = value
@property
def camid(self):
return self._wrapped.contents.camid
@camid.setter
def camid(self, value):
self._wrapped.contents.camid = value
@property
def trackbodyid(self):
return self._wrapped.contents.trackbodyid
@trackbodyid.setter
def trackbodyid(self, value):
self._wrapped.contents.trackbodyid = value
@property
def lookat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.lookat, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@lookat.setter
def lookat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.lookat, val_ptr, 3 * sizeof(c_double))
@property
def azimuth(self):
return self._wrapped.contents.azimuth
@azimuth.setter
def azimuth(self, value):
self._wrapped.contents.azimuth = value
@property
def elevation(self):
return self._wrapped.contents.elevation
@elevation.setter
def elevation(self, value):
self._wrapped.contents.elevation = value
@property
def distance(self):
return self._wrapped.contents.distance
@distance.setter
def distance(self, value):
self._wrapped.contents.distance = value
@property
def pose(self):
return self._wrapped.contents.pose
@pose.setter
def pose(self, value):
self._wrapped.contents.pose = value
@property
def VR(self):
return self._wrapped.contents.VR
@VR.setter
def VR(self, value):
self._wrapped.contents.VR = value
class MjvOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def label(self):
return self._wrapped.contents.label
@label.setter
def label(self, value):
self._wrapped.contents.label = value
@property
def frame(self):
return self._wrapped.contents.frame
@frame.setter
def frame(self, value):
self._wrapped.contents.frame = value
@property
def geomgroup(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geomgroup, dtype=np.uint8, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@geomgroup.setter
def geomgroup(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.geomgroup, val_ptr, 5 * sizeof(c_ubyte))
@property
def sitegroup(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sitegroup, dtype=np.uint8, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@sitegroup.setter
def sitegroup(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.sitegroup, val_ptr, 5 * sizeof(c_ubyte))
@property
def flags(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.flags, dtype=np.uint8, count=(18)), (18, ))
arr.setflags(write=False)
return arr
@flags.setter
def flags(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.flags, val_ptr, 18 * sizeof(c_ubyte))
class MjvGeomWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def type(self):
return self._wrapped.contents.type
@type.setter
def type(self, value):
self._wrapped.contents.type = value
@property
def dataid(self):
return self._wrapped.contents.dataid
@dataid.setter
def dataid(self, value):
self._wrapped.contents.dataid = value
@property
def objtype(self):
return self._wrapped.contents.objtype
@objtype.setter
def objtype(self, value):
self._wrapped.contents.objtype = value
@property
def objid(self):
return self._wrapped.contents.objid
@objid.setter
def objid(self, value):
self._wrapped.contents.objid = value
@property
def category(self):
return self._wrapped.contents.category
@category.setter
def category(self, value):
self._wrapped.contents.category = value
@property
def texid(self):
return self._wrapped.contents.texid
@texid.setter
def texid(self, value):
self._wrapped.contents.texid = value
@property
def texuniform(self):
return self._wrapped.contents.texuniform
@texuniform.setter
def texuniform(self, value):
self._wrapped.contents.texuniform = value
@property
def texrepeat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texrepeat, dtype=np.float, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@texrepeat.setter
def texrepeat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.texrepeat, val_ptr, 2 * sizeof(c_float))
@property
def size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.size, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@size.setter
def size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.size, val_ptr, 3 * sizeof(c_float))
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_float))
@property
def mat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat, dtype=np.float, count=(9)), (9, ))
arr.setflags(write=False)
return arr
@mat.setter
def mat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat, val_ptr, 9 * sizeof(c_float))
@property
def rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.rgba, dtype=np.float, count=(4)), (4, ))
arr.setflags(write=False)
return arr
@rgba.setter
def rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.rgba, val_ptr, 4 * sizeof(c_float))
@property
def emission(self):
return self._wrapped.contents.emission
@emission.setter
def emission(self, value):
self._wrapped.contents.emission = value
@property
def specular(self):
return self._wrapped.contents.specular
@specular.setter
def specular(self, value):
self._wrapped.contents.specular = value
@property
def shininess(self):
return self._wrapped.contents.shininess
@shininess.setter
def shininess(self, value):
self._wrapped.contents.shininess = value
@property
def reflectance(self):
return self._wrapped.contents.reflectance
@reflectance.setter
def reflectance(self, value):
self._wrapped.contents.reflectance = value
@property
def label(self):
return self._wrapped.contents.label
@label.setter
def label(self, value):
self._wrapped.contents.label = value
@property
def camdist(self):
return self._wrapped.contents.camdist
@camdist.setter
def camdist(self, value):
self._wrapped.contents.camdist = value
@property
def rbound(self):
return self._wrapped.contents.rbound
@rbound.setter
def rbound(self, value):
self._wrapped.contents.rbound = value
@property
def transparent(self):
return self._wrapped.contents.transparent
@transparent.setter
def transparent(self, value):
self._wrapped.contents.transparent = value
class MjvLightWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_float))
@property
def dir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dir, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@dir.setter
def dir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.dir, val_ptr, 3 * sizeof(c_float))
@property
def attenuation(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.attenuation, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@attenuation.setter
def attenuation(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.attenuation, val_ptr, 3 * sizeof(c_float))
@property
def cutoff(self):
return self._wrapped.contents.cutoff
@cutoff.setter
def cutoff(self, value):
self._wrapped.contents.cutoff = value
@property
def exponent(self):
return self._wrapped.contents.exponent
@exponent.setter
def exponent(self, value):
self._wrapped.contents.exponent = value
@property
def ambient(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ambient, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@ambient.setter
def ambient(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.ambient, val_ptr, 3 * sizeof(c_float))
@property
def diffuse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.diffuse, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@diffuse.setter
def diffuse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.diffuse, val_ptr, 3 * sizeof(c_float))
@property
def specular(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.specular, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@specular.setter
def specular(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.specular, val_ptr, 3 * sizeof(c_float))
@property
def headlight(self):
return self._wrapped.contents.headlight
@headlight.setter
def headlight(self, value):
self._wrapped.contents.headlight = value
@property
def directional(self):
return self._wrapped.contents.directional
@directional.setter
def directional(self, value):
self._wrapped.contents.directional = value
@property
def castshadow(self):
return self._wrapped.contents.castshadow
@castshadow.setter
def castshadow(self, value):
self._wrapped.contents.castshadow = value
class MjvObjectsWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nlight(self):
return self._wrapped.contents.nlight
@nlight.setter
def nlight(self, value):
self._wrapped.contents.nlight = value
@property
def ngeom(self):
return self._wrapped.contents.ngeom
@ngeom.setter
def ngeom(self, value):
self._wrapped.contents.ngeom = value
@property
def maxgeom(self):
return self._wrapped.contents.maxgeom
@maxgeom.setter
def maxgeom(self, value):
self._wrapped.contents.maxgeom = value
@property
def lights(self):
return self._wrapped.contents.lights
@lights.setter
def lights(self, value):
self._wrapped.contents.lights = value
class MjOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def timestep(self):
return self._wrapped.contents.timestep
@timestep.setter
def timestep(self, value):
self._wrapped.contents.timestep = value
@property
def apirate(self):
return self._wrapped.contents.apirate
@apirate.setter
def apirate(self, value):
self._wrapped.contents.apirate = value
@property
def tolerance(self):
return self._wrapped.contents.tolerance
@tolerance.setter
def tolerance(self, value):
self._wrapped.contents.tolerance = value
@property
def impratio(self):
return self._wrapped.contents.impratio
@impratio.setter
def impratio(self, value):
self._wrapped.contents.impratio = value
@property
def gravity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.gravity, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@gravity.setter
def gravity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.gravity, val_ptr, 3 * sizeof(c_double))
@property
def wind(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wind, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@wind.setter
def wind(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wind, val_ptr, 3 * sizeof(c_double))
@property
def magnetic(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.magnetic, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@magnetic.setter
def magnetic(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.magnetic, val_ptr, 3 * sizeof(c_double))
@property
def density(self):
return self._wrapped.contents.density
@density.setter
def density(self, value):
self._wrapped.contents.density = value
@property
def viscosity(self):
return self._wrapped.contents.viscosity
@viscosity.setter
def viscosity(self, value):
self._wrapped.contents.viscosity = value
@property
def o_margin(self):
return self._wrapped.contents.o_margin
@o_margin.setter
def o_margin(self, value):
self._wrapped.contents.o_margin = value
@property
def o_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.o_solref, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@o_solref.setter
def o_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.o_solref, val_ptr, 2 * sizeof(c_double))
@property
def o_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.o_solimp, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@o_solimp.setter
def o_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.o_solimp, val_ptr, 3 * sizeof(c_double))
@property
def mpr_tolerance(self):
return self._wrapped.contents.mpr_tolerance
@mpr_tolerance.setter
def mpr_tolerance(self, value):
self._wrapped.contents.mpr_tolerance = value
@property
def mpr_iterations(self):
return self._wrapped.contents.mpr_iterations
@mpr_iterations.setter
def mpr_iterations(self, value):
self._wrapped.contents.mpr_iterations = value
@property
def integrator(self):
return self._wrapped.contents.integrator
@integrator.setter
def integrator(self, value):
self._wrapped.contents.integrator = value
@property
def collision(self):
return self._wrapped.contents.collision
@collision.setter
def collision(self, value):
self._wrapped.contents.collision = value
@property
def impedance(self):
return self._wrapped.contents.impedance
@impedance.setter
def impedance(self, value):
self._wrapped.contents.impedance = value
@property
def reference(self):
return self._wrapped.contents.reference
@reference.setter
def reference(self, value):
self._wrapped.contents.reference = value
@property
def solver(self):
return self._wrapped.contents.solver
@solver.setter
def solver(self, value):
self._wrapped.contents.solver = value
@property
def iterations(self):
return self._wrapped.contents.iterations
@iterations.setter
def iterations(self, value):
self._wrapped.contents.iterations = value
@property
def disableflags(self):
return self._wrapped.contents.disableflags
@disableflags.setter
def disableflags(self, value):
self._wrapped.contents.disableflags = value
@property
def enableflags(self):
return self._wrapped.contents.enableflags
@enableflags.setter
def enableflags(self, value):
self._wrapped.contents.enableflags = value
class MjVisualWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def global_(self):
return self._wrapped.contents.global_
@global_.setter
def global_(self, value):
self._wrapped.contents.global_ = value
@property
def quality(self):
return self._wrapped.contents.quality
@quality.setter
def quality(self, value):
self._wrapped.contents.quality = value
@property
def headlight(self):
return self._wrapped.contents.headlight
@headlight.setter
def headlight(self, value):
self._wrapped.contents.headlight = value
@property
def map_(self):
return self._wrapped.contents.map_
@map_.setter
def map_(self, value):
self._wrapped.contents.map_ = value
@property
def scale(self):
return self._wrapped.contents.scale
@scale.setter
def scale(self, value):
self._wrapped.contents.scale = value
@property
def rgba(self):
return self._wrapped.contents.rgba
@rgba.setter
def rgba(self, value):
self._wrapped.contents.rgba = value
class MjStatisticWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def meanmass(self):
return self._wrapped.contents.meanmass
@meanmass.setter
def meanmass(self, value):
self._wrapped.contents.meanmass = value
@property
def meansize(self):
return self._wrapped.contents.meansize
@meansize.setter
def meansize(self, value):
self._wrapped.contents.meansize = value
@property
def extent(self):
return self._wrapped.contents.extent
@extent.setter
def extent(self, value):
self._wrapped.contents.extent = value
@property
def center(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.center, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@center.setter
def center(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.center, val_ptr, 3 * sizeof(c_double))
class MjDataWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nstack(self):
return self._wrapped.contents.nstack
@nstack.setter
def nstack(self, value):
self._wrapped.contents.nstack = value
@property
def nbuffer(self):
return self._wrapped.contents.nbuffer
@nbuffer.setter
def nbuffer(self, value):
self._wrapped.contents.nbuffer = value
@property
def pstack(self):
return self._wrapped.contents.pstack
@pstack.setter
def pstack(self, value):
self._wrapped.contents.pstack = value
@property
def maxstackuse(self):
return self._wrapped.contents.maxstackuse
@maxstackuse.setter
def maxstackuse(self, value):
self._wrapped.contents.maxstackuse = value
@property
def ne(self):
return self._wrapped.contents.ne
@ne.setter
def ne(self, value):
self._wrapped.contents.ne = value
@property
def nf(self):
return self._wrapped.contents.nf
@nf.setter
def nf(self, value):
self._wrapped.contents.nf = value
@property
def nefc(self):
return self._wrapped.contents.nefc
@nefc.setter
def nefc(self, value):
self._wrapped.contents.nefc = value
@property
def ncon(self):
return self._wrapped.contents.ncon
@ncon.setter
def ncon(self, value):
self._wrapped.contents.ncon = value
@property
def nwarning(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.nwarning, dtype=np.int, count=(8)), (8, ))
arr.setflags(write=False)
return arr
@nwarning.setter
def nwarning(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.nwarning, val_ptr, 8 * sizeof(c_int))
@property
def warning_info(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.warning_info, dtype=np.int, count=(8)), (8, ))
arr.setflags(write=False)
return arr
@warning_info.setter
def warning_info(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.warning_info, val_ptr, 8 * sizeof(c_int))
@property
def timer_duration(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.timer_duration, dtype=np.double, count=(14)), (14, ))
arr.setflags(write=False)
return arr
@timer_duration.setter
def timer_duration(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.timer_duration, val_ptr, 14 * sizeof(c_double))
@property
def timer_ncall(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.timer_ncall, dtype=np.double, count=(14)), (14, ))
arr.setflags(write=False)
return arr
@timer_ncall.setter
def timer_ncall(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.timer_ncall, val_ptr, 14 * sizeof(c_double))
@property
def mocaptime(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocaptime, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@mocaptime.setter
def mocaptime(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocaptime, val_ptr, 3 * sizeof(c_double))
@property
def time(self):
return self._wrapped.contents.time
@time.setter
def time(self, value):
self._wrapped.contents.time = value
@property
def energy(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.energy, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@energy.setter
def energy(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.energy, val_ptr, 2 * sizeof(c_double))
@property
def solverstat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solverstat, dtype=np.double, count=(4)), (4, ))
arr.setflags(write=False)
return arr
@solverstat.setter
def solverstat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solverstat, val_ptr, 4 * sizeof(c_double))
@property
def solvertrace(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solvertrace, dtype=np.double, count=(200)), (200, ))
arr.setflags(write=False)
return arr
@solvertrace.setter
def solvertrace(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solvertrace, val_ptr, 200 * sizeof(c_double))
@property
def buffer(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.buffer, dtype=np.uint8, count=(self.nbuffer)), (self.nbuffer, ))
arr.setflags(write=False)
return arr
@buffer.setter
def buffer(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.buffer, val_ptr, self.nbuffer * sizeof(c_ubyte))
@property
def stack(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.stack, dtype=np.double, count=(self.nstack)), (self.nstack, ))
arr.setflags(write=False)
return arr
@stack.setter
def stack(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.stack, val_ptr, self.nstack * sizeof(c_double))
@property
def qpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos, dtype=np.double, count=(self._size_src.nq*1)), (self._size_src.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos.setter
def qpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos, val_ptr, self._size_src.nq*1 * sizeof(c_double))
@property
def qvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qvel, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qvel.setter
def qvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qvel, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def act(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.act, dtype=np.double, count=(self._size_src.na*1)), (self._size_src.na, 1, ))
arr.setflags(write=False)
return arr
@act.setter
def act(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.act, val_ptr, self._size_src.na*1 * sizeof(c_double))
@property
def ctrl(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ctrl, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@ctrl.setter
def ctrl(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ctrl, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def qfrc_applied(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_applied, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_applied.setter
def qfrc_applied(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_applied, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def xfrc_applied(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xfrc_applied, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@xfrc_applied.setter
def xfrc_applied(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xfrc_applied, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def qacc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qacc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qacc.setter
def qacc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qacc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def act_dot(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.act_dot, dtype=np.double, count=(self._size_src.na*1)), (self._size_src.na, 1, ))
arr.setflags(write=False)
return arr
@act_dot.setter
def act_dot(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.act_dot, val_ptr, self._size_src.na*1 * sizeof(c_double))
@property
def mocap_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocap_pos, dtype=np.double, count=(self._size_src.nmocap*3)), (self._size_src.nmocap, 3, ))
arr.setflags(write=False)
return arr
@mocap_pos.setter
def mocap_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocap_pos, val_ptr, self._size_src.nmocap*3 * sizeof(c_double))
@property
def mocap_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocap_quat, dtype=np.double, count=(self._size_src.nmocap*4)), (self._size_src.nmocap, 4, ))
arr.setflags(write=False)
return arr
@mocap_quat.setter
def mocap_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocap_quat, val_ptr, self._size_src.nmocap*4 * sizeof(c_double))
@property
def userdata(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.userdata, dtype=np.double, count=(self._size_src.nuserdata*1)), (self._size_src.nuserdata, 1, ))
arr.setflags(write=False)
return arr
@userdata.setter
def userdata(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.userdata, val_ptr, self._size_src.nuserdata*1 * sizeof(c_double))
@property
def sensordata(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensordata, dtype=np.double, count=(self._size_src.nsensordata*1)), (self._size_src.nsensordata, 1, ))
arr.setflags(write=False)
return arr
@sensordata.setter
def sensordata(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.sensordata, val_ptr, self._size_src.nsensordata*1 * sizeof(c_double))
@property
def xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xpos, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@xpos.setter
def xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xpos, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def xquat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xquat, dtype=np.double, count=(self._size_src.nbody*4)), (self._size_src.nbody, 4, ))
arr.setflags(write=False)
return arr
@xquat.setter
def xquat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xquat, val_ptr, self._size_src.nbody*4 * sizeof(c_double))
@property
def xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xmat, dtype=np.double, count=(self._size_src.nbody*9)), (self._size_src.nbody, 9, ))
arr.setflags(write=False)
return arr
@xmat.setter
def xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xmat, val_ptr, self._size_src.nbody*9 * sizeof(c_double))
@property
def xipos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xipos, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@xipos.setter
def xipos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xipos, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def ximat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ximat, dtype=np.double, count=(self._size_src.nbody*9)), (self._size_src.nbody, 9, ))
arr.setflags(write=False)
return arr
@ximat.setter
def ximat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ximat, val_ptr, self._size_src.nbody*9 * sizeof(c_double))
@property
def xanchor(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xanchor, dtype=np.double, count=(self._size_src.njnt*3)), (self._size_src.njnt, 3, ))
arr.setflags(write=False)
return arr
@xanchor.setter
def xanchor(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xanchor, val_ptr, self._size_src.njnt*3 * sizeof(c_double))
@property
def xaxis(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xaxis, dtype=np.double, count=(self._size_src.njnt*3)), (self._size_src.njnt, 3, ))
arr.setflags(write=False)
return arr
@xaxis.setter
def xaxis(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xaxis, val_ptr, self._size_src.njnt*3 * sizeof(c_double))
@property
def geom_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_xpos, dtype=np.double, count=(self._size_src.ngeom*3)), (self._size_src.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_xpos.setter
def geom_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_xpos, val_ptr, self._size_src.ngeom*3 * sizeof(c_double))
@property
def geom_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_xmat, dtype=np.double, count=(self._size_src.ngeom*9)), (self._size_src.ngeom, 9, ))
arr.setflags(write=False)
return arr
@geom_xmat.setter
def geom_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_xmat, val_ptr, self._size_src.ngeom*9 * sizeof(c_double))
@property
def site_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_xpos, dtype=np.double, count=(self._size_src.nsite*3)), (self._size_src.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_xpos.setter
def site_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_xpos, val_ptr, self._size_src.nsite*3 * sizeof(c_double))
@property
def site_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_xmat, dtype=np.double, count=(self._size_src.nsite*9)), (self._size_src.nsite, 9, ))
arr.setflags(write=False)
return arr
@site_xmat.setter
def site_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_xmat, val_ptr, self._size_src.nsite*9 * sizeof(c_double))
@property
def cam_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_xpos, dtype=np.double, count=(self._size_src.ncam*3)), (self._size_src.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_xpos.setter
def cam_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_xpos, val_ptr, self._size_src.ncam*3 * sizeof(c_double))
@property
def cam_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_xmat, dtype=np.double, count=(self._size_src.ncam*9)), (self._size_src.ncam, 9, ))
arr.setflags(write=False)
return arr
@cam_xmat.setter
def cam_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_xmat, val_ptr, self._size_src.ncam*9 * sizeof(c_double))
@property
def light_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_xpos, dtype=np.double, count=(self._size_src.nlight*3)), (self._size_src.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_xpos.setter
def light_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_xpos, val_ptr, self._size_src.nlight*3 * sizeof(c_double))
@property
def light_xdir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_xdir, dtype=np.double, count=(self._size_src.nlight*3)), (self._size_src.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_xdir.setter
def light_xdir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_xdir, val_ptr, self._size_src.nlight*3 * sizeof(c_double))
@property
def com_subtree(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.com_subtree, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@com_subtree.setter
def com_subtree(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.com_subtree, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def cdof(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cdof, dtype=np.double, count=(self._size_src.nv*6)), (self._size_src.nv, 6, ))
arr.setflags(write=False)
return arr
@cdof.setter
def cdof(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cdof, val_ptr, self._size_src.nv*6 * sizeof(c_double))
@property
def cinert(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cinert, dtype=np.double, count=(self._size_src.nbody*10)), (self._size_src.nbody, 10, ))
arr.setflags(write=False)
return arr
@cinert.setter
def cinert(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cinert, val_ptr, self._size_src.nbody*10 * sizeof(c_double))
@property
def ten_wrapadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_wrapadr, dtype=np.int, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_wrapadr.setter
def ten_wrapadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.ten_wrapadr, val_ptr, self._size_src.ntendon*1 * sizeof(c_int))
@property
def ten_wrapnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_wrapnum, dtype=np.int, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_wrapnum.setter
def ten_wrapnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.ten_wrapnum, val_ptr, self._size_src.ntendon*1 * sizeof(c_int))
@property
def ten_length(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_length, dtype=np.double, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_length.setter
def ten_length(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_length, val_ptr, self._size_src.ntendon*1 * sizeof(c_double))
@property
def ten_moment(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_moment, dtype=np.double, count=(self._size_src.ntendon*self._size_src.nv)), (self._size_src.ntendon, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@ten_moment.setter
def ten_moment(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_moment, val_ptr, self._size_src.ntendon*self._size_src.nv * sizeof(c_double))
@property
def wrap_obj(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_obj, dtype=np.int, count=(self._size_src.nwrap*2)), (self._size_src.nwrap, 2, ))
arr.setflags(write=False)
return arr
@wrap_obj.setter
def wrap_obj(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.wrap_obj, val_ptr, self._size_src.nwrap*2 * sizeof(c_int))
@property
def wrap_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_xpos, dtype=np.double, count=(self._size_src.nwrap*6)), (self._size_src.nwrap, 6, ))
arr.setflags(write=False)
return arr
@wrap_xpos.setter
def wrap_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wrap_xpos, val_ptr, self._size_src.nwrap*6 * sizeof(c_double))
@property
def actuator_length(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_length, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_length.setter
def actuator_length(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_length, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def actuator_moment(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_moment, dtype=np.double, count=(self._size_src.nu*self._size_src.nv)), (self._size_src.nu, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@actuator_moment.setter
def actuator_moment(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_moment, val_ptr, self._size_src.nu*self._size_src.nv * sizeof(c_double))
@property
def crb(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.crb, dtype=np.double, count=(self._size_src.nbody*10)), (self._size_src.nbody, 10, ))
arr.setflags(write=False)
return arr
@crb.setter
def crb(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.crb, val_ptr, self._size_src.nbody*10 * sizeof(c_double))
@property
def qM(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qM, dtype=np.double, count=(self._size_src.nM*1)), (self._size_src.nM, 1, ))
arr.setflags(write=False)
return arr
@qM.setter
def qM(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qM, val_ptr, self._size_src.nM*1 * sizeof(c_double))
@property
def qLD(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLD, dtype=np.double, count=(self._size_src.nM*1)), (self._size_src.nM, 1, ))
arr.setflags(write=False)
return arr
@qLD.setter
def qLD(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLD, val_ptr, self._size_src.nM*1 * sizeof(c_double))
@property
def qLDiagInv(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLDiagInv, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qLDiagInv.setter
def qLDiagInv(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLDiagInv, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qLDiagSqrtInv(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLDiagSqrtInv, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qLDiagSqrtInv.setter
def qLDiagSqrtInv(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLDiagSqrtInv, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_type, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_type.setter
def efc_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_type, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_id(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_id, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_id.setter
def efc_id(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_id, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_rownnz(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rownnz, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_rownnz.setter
def efc_rownnz(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rownnz, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_rowadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rowadr, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_rowadr.setter
def efc_rowadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rowadr, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_colind(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_colind, dtype=np.int, count=(self._size_src.njmax*self._size_src.nv)), (self._size_src.njmax, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@efc_colind.setter
def efc_colind(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_colind, val_ptr, self._size_src.njmax*self._size_src.nv * sizeof(c_int))
@property
def efc_rownnz_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rownnz_T, dtype=np.int, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@efc_rownnz_T.setter
def efc_rownnz_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rownnz_T, val_ptr, self._size_src.nv*1 * sizeof(c_int))
@property
def efc_rowadr_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rowadr_T, dtype=np.int, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@efc_rowadr_T.setter
def efc_rowadr_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rowadr_T, val_ptr, self._size_src.nv*1 * sizeof(c_int))
@property
def efc_colind_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_colind_T, dtype=np.int, count=(self._size_src.nv*self._size_src.njmax)), (self._size_src.nv, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_colind_T.setter
def efc_colind_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_colind_T, val_ptr, self._size_src.nv*self._size_src.njmax * sizeof(c_int))
@property
def efc_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_solref, dtype=np.double, count=(self._size_src.njmax*2)), (self._size_src.njmax, 2, ))
arr.setflags(write=False)
return arr
@efc_solref.setter
def efc_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_solref, val_ptr, self._size_src.njmax*2 * sizeof(c_double))
@property
def efc_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_solimp, dtype=np.double, count=(self._size_src.njmax*3)), (self._size_src.njmax, 3, ))
arr.setflags(write=False)
return arr
@efc_solimp.setter
def efc_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_solimp, val_ptr, self._size_src.njmax*3 * sizeof(c_double))
@property
def efc_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_margin, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_margin.setter
def efc_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_margin, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_frictionloss, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_frictionloss.setter
def efc_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_frictionloss, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_pos, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_pos.setter
def efc_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_pos, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_J(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_J, dtype=np.double, count=(self._size_src.njmax*self._size_src.nv)), (self._size_src.njmax, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@efc_J.setter
def efc_J(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_J, val_ptr, self._size_src.njmax*self._size_src.nv * sizeof(c_double))
@property
def efc_J_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_J_T, dtype=np.double, count=(self._size_src.nv*self._size_src.njmax)), (self._size_src.nv, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_J_T.setter
def efc_J_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_J_T, val_ptr, self._size_src.nv*self._size_src.njmax * sizeof(c_double))
@property
def efc_diagApprox(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_diagApprox, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_diagApprox.setter
def efc_diagApprox(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_diagApprox, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_D(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_D, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_D.setter
def efc_D(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_D, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_R(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_R, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_R.setter
def efc_R(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_R, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_AR(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_AR, dtype=np.double, count=(self._size_src.njmax*self._size_src.njmax)), (self._size_src.njmax, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_AR.setter
def efc_AR(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_AR, val_ptr, self._size_src.njmax*self._size_src.njmax * sizeof(c_double))
@property
def e_ARchol(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.e_ARchol, dtype=np.double, count=(self._size_src.nemax*self._size_src.nemax)), (self._size_src.nemax, self._size_src.nemax, ))
arr.setflags(write=False)
return arr
@e_ARchol.setter
def e_ARchol(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.e_ARchol, val_ptr, self._size_src.nemax*self._size_src.nemax * sizeof(c_double))
@property
def fc_e_rect(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_e_rect, dtype=np.double, count=(self._size_src.njmax*self._size_src.nemax)), (self._size_src.njmax, self._size_src.nemax, ))
arr.setflags(write=False)
return arr
@fc_e_rect.setter
def fc_e_rect(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_e_rect, val_ptr, self._size_src.njmax*self._size_src.nemax * sizeof(c_double))
@property
def fc_AR(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_AR, dtype=np.double, count=(self._size_src.njmax*self._size_src.njmax)), (self._size_src.njmax, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@fc_AR.setter
def fc_AR(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_AR, val_ptr, self._size_src.njmax*self._size_src.njmax * sizeof(c_double))
@property
def ten_velocity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_velocity, dtype=np.double, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_velocity.setter
def ten_velocity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_velocity, val_ptr, self._size_src.ntendon*1 * sizeof(c_double))
@property
def actuator_velocity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_velocity, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_velocity.setter
def actuator_velocity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_velocity, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def cvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cvel, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cvel.setter
def cvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cvel, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cdof_dot(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cdof_dot, dtype=np.double, count=(self._size_src.nv*6)), (self._size_src.nv, 6, ))
arr.setflags(write=False)
return arr
@cdof_dot.setter
def cdof_dot(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cdof_dot, val_ptr, self._size_src.nv*6 * sizeof(c_double))
@property
def qfrc_bias(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_bias, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_bias.setter
def qfrc_bias(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_bias, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_passive(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_passive, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_passive.setter
def qfrc_passive(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_passive, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_vel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_vel, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_vel.setter
def efc_vel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_vel, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_aref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_aref, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_aref.setter
def efc_aref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_aref, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def actuator_force(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_force, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_force.setter
def actuator_force(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_force, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def qfrc_actuator(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_actuator, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_actuator.setter
def qfrc_actuator(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_actuator, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_unc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_unc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_unc.setter
def qfrc_unc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_unc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qacc_unc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qacc_unc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qacc_unc.setter
def qacc_unc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qacc_unc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_b(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_b, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_b.setter
def efc_b(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_b, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def fc_b(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_b, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@fc_b.setter
def fc_b(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_b, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_force(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_force, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_force.setter
def efc_force(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_force, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def qfrc_constraint(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_constraint, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_constraint.setter
def qfrc_constraint(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_constraint, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_inverse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_inverse, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_inverse.setter
def qfrc_inverse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_inverse, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def cacc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cacc, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cacc.setter
def cacc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cacc, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cfrc_int(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cfrc_int, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cfrc_int.setter
def cfrc_int(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cfrc_int, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cfrc_ext(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cfrc_ext, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cfrc_ext.setter
def cfrc_ext(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cfrc_ext, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
class MjModelWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nq(self):
return self._wrapped.contents.nq
@nq.setter
def nq(self, value):
self._wrapped.contents.nq = value
@property
def nv(self):
return self._wrapped.contents.nv
@nv.setter
def nv(self, value):
self._wrapped.contents.nv = value
@property
def nu(self):
return self._wrapped.contents.nu
@nu.setter
def nu(self, value):
self._wrapped.contents.nu = value
@property
def na(self):
return self._wrapped.contents.na
@na.setter
def na(self, value):
self._wrapped.contents.na = value
@property
def nbody(self):
return self._wrapped.contents.nbody
@nbody.setter
def nbody(self, value):
self._wrapped.contents.nbody = value
@property
def njnt(self):
return self._wrapped.contents.njnt
@njnt.setter
def njnt(self, value):
self._wrapped.contents.njnt = value
@property
def ngeom(self):
return self._wrapped.contents.ngeom
@ngeom.setter
def ngeom(self, value):
self._wrapped.contents.ngeom = value
@property
def nsite(self):
return self._wrapped.contents.nsite
@nsite.setter
def nsite(self, value):
self._wrapped.contents.nsite = value
@property
def ncam(self):
return self._wrapped.contents.ncam
@ncam.setter
def ncam(self, value):
self._wrapped.contents.ncam = value
@property
def nlight(self):
return self._wrapped.contents.nlight
@nlight.setter
def nlight(self, value):
self._wrapped.contents.nlight = value
@property
def nmesh(self):
return self._wrapped.contents.nmesh
@nmesh.setter
def nmesh(self, value):
self._wrapped.contents.nmesh = value
@property
def nmeshvert(self):
return self._wrapped.contents.nmeshvert
@nmeshvert.setter
def nmeshvert(self, value):
self._wrapped.contents.nmeshvert = value
@property
def nmeshface(self):
return self._wrapped.contents.nmeshface
@nmeshface.setter
def nmeshface(self, value):
self._wrapped.contents.nmeshface = value
@property
def nmeshgraph(self):
return self._wrapped.contents.nmeshgraph
@nmeshgraph.setter
def nmeshgraph(self, value):
self._wrapped.contents.nmeshgraph = value
@property
def nhfield(self):
return self._wrapped.contents.nhfield
@nhfield.setter
def nhfield(self, value):
self._wrapped.contents.nhfield = value
@property
def nhfielddata(self):
return self._wrapped.contents.nhfielddata
@nhfielddata.setter
def nhfielddata(self, value):
self._wrapped.contents.nhfielddata = value
@property
def ntex(self):
return self._wrapped.contents.ntex
@ntex.setter
def ntex(self, value):
self._wrapped.contents.ntex = value
@property
def ntexdata(self):
return self._wrapped.contents.ntexdata
@ntexdata.setter
def ntexdata(self, value):
self._wrapped.contents.ntexdata = value
@property
def nmat(self):
return self._wrapped.contents.nmat
@nmat.setter
def nmat(self, value):
self._wrapped.contents.nmat = value
@property
def npair(self):
return self._wrapped.contents.npair
@npair.setter
def npair(self, value):
self._wrapped.contents.npair = value
@property
def nexclude(self):
return self._wrapped.contents.nexclude
@nexclude.setter
def nexclude(self, value):
self._wrapped.contents.nexclude = value
@property
def neq(self):
return self._wrapped.contents.neq
@neq.setter
def neq(self, value):
self._wrapped.contents.neq = value
@property
def ntendon(self):
return self._wrapped.contents.ntendon
@ntendon.setter
def ntendon(self, value):
self._wrapped.contents.ntendon = value
@property
def nwrap(self):
return self._wrapped.contents.nwrap
@nwrap.setter
def nwrap(self, value):
self._wrapped.contents.nwrap = value
@property
def nsensor(self):
return self._wrapped.contents.nsensor
@nsensor.setter
def nsensor(self, value):
self._wrapped.contents.nsensor = value
@property
def nnumeric(self):
return self._wrapped.contents.nnumeric
@nnumeric.setter
def nnumeric(self, value):
self._wrapped.contents.nnumeric = value
@property
def nnumericdata(self):
return self._wrapped.contents.nnumericdata
@nnumericdata.setter
def nnumericdata(self, value):
self._wrapped.contents.nnumericdata = value
@property
def ntext(self):
return self._wrapped.contents.ntext
@ntext.setter
def ntext(self, value):
self._wrapped.contents.ntext = value
@property
def ntextdata(self):
return self._wrapped.contents.ntextdata
@ntextdata.setter
def ntextdata(self, value):
self._wrapped.contents.ntextdata = value
@property
def nkey(self):
return self._wrapped.contents.nkey
@nkey.setter
def nkey(self, value):
self._wrapped.contents.nkey = value
@property
def nuser_body(self):
return self._wrapped.contents.nuser_body
@nuser_body.setter
def nuser_body(self, value):
self._wrapped.contents.nuser_body = value
@property
def nuser_jnt(self):
return self._wrapped.contents.nuser_jnt
@nuser_jnt.setter
def nuser_jnt(self, value):
self._wrapped.contents.nuser_jnt = value
@property
def nuser_geom(self):
return self._wrapped.contents.nuser_geom
@nuser_geom.setter
def nuser_geom(self, value):
self._wrapped.contents.nuser_geom = value
@property
def nuser_site(self):
return self._wrapped.contents.nuser_site
@nuser_site.setter
def nuser_site(self, value):
self._wrapped.contents.nuser_site = value
@property
def nuser_tendon(self):
return self._wrapped.contents.nuser_tendon
@nuser_tendon.setter
def nuser_tendon(self, value):
self._wrapped.contents.nuser_tendon = value
@property
def nuser_actuator(self):
return self._wrapped.contents.nuser_actuator
@nuser_actuator.setter
def nuser_actuator(self, value):
self._wrapped.contents.nuser_actuator = value
@property
def nuser_sensor(self):
return self._wrapped.contents.nuser_sensor
@nuser_sensor.setter
def nuser_sensor(self, value):
self._wrapped.contents.nuser_sensor = value
@property
def nnames(self):
return self._wrapped.contents.nnames
@nnames.setter
def nnames(self, value):
self._wrapped.contents.nnames = value
@property
def nM(self):
return self._wrapped.contents.nM
@nM.setter
def nM(self, value):
self._wrapped.contents.nM = value
@property
def nemax(self):
return self._wrapped.contents.nemax
@nemax.setter
def nemax(self, value):
self._wrapped.contents.nemax = value
@property
def njmax(self):
return self._wrapped.contents.njmax
@njmax.setter
def njmax(self, value):
self._wrapped.contents.njmax = value
@property
def nconmax(self):
return self._wrapped.contents.nconmax
@nconmax.setter
def nconmax(self, value):
self._wrapped.contents.nconmax = value
@property
def nstack(self):
return self._wrapped.contents.nstack
@nstack.setter
def nstack(self, value):
self._wrapped.contents.nstack = value
@property
def nuserdata(self):
return self._wrapped.contents.nuserdata
@nuserdata.setter
def nuserdata(self, value):
self._wrapped.contents.nuserdata = value
@property
def nmocap(self):
return self._wrapped.contents.nmocap
@nmocap.setter
def nmocap(self, value):
self._wrapped.contents.nmocap = value
@property
def nsensordata(self):
return self._wrapped.contents.nsensordata
@nsensordata.setter
def nsensordata(self, value):
self._wrapped.contents.nsensordata = value
@property
def nbuffer(self):
return self._wrapped.contents.nbuffer
@nbuffer.setter
def nbuffer(self, value):
self._wrapped.contents.nbuffer = value
@property
def opt(self):
return self._wrapped.contents.opt
@opt.setter
def opt(self, value):
self._wrapped.contents.opt = value
@property
def vis(self):
return self._wrapped.contents.vis
@vis.setter
def vis(self, value):
self._wrapped.contents.vis = value
@property
def stat(self):
return self._wrapped.contents.stat
@stat.setter
def stat(self, value):
self._wrapped.contents.stat = value
@property
def buffer(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.buffer, dtype=np.uint8, count=(self.nbuffer)), (self.nbuffer, ))
arr.setflags(write=False)
return arr
@buffer.setter
def buffer(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.buffer, val_ptr, self.nbuffer * sizeof(c_ubyte))
@property
def qpos0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos0, dtype=np.double, count=(self.nq*1)), (self.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos0.setter
def qpos0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos0, val_ptr, self.nq*1 * sizeof(c_double))
@property
def qpos_spring(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos_spring, dtype=np.double, count=(self.nq*1)), (self.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos_spring.setter
def qpos_spring(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos_spring, val_ptr, self.nq*1 * sizeof(c_double))
@property
def body_parentid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_parentid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_parentid.setter
def body_parentid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_parentid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_rootid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_rootid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_rootid.setter
def body_rootid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_rootid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_weldid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_weldid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_weldid.setter
def body_weldid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_weldid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_mocapid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_mocapid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_mocapid.setter
def body_mocapid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_mocapid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_jntnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_jntnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_jntnum.setter
def body_jntnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_jntnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_jntadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_jntadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_jntadr.setter
def body_jntadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_jntadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_dofnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_dofnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_dofnum.setter
def body_dofnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_dofnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_dofadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_dofadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_dofadr.setter
def body_dofadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_dofadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_geomnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_geomnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_geomnum.setter
def body_geomnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_geomnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_geomadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_geomadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_geomadr.setter
def body_geomadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_geomadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_pos, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_pos.setter
def body_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_pos, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_quat, dtype=np.double, count=(self.nbody*4)), (self.nbody, 4, ))
arr.setflags(write=False)
return arr
@body_quat.setter
def body_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_quat, val_ptr, self.nbody*4 * sizeof(c_double))
@property
def body_ipos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_ipos, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_ipos.setter
def body_ipos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_ipos, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_iquat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_iquat, dtype=np.double, count=(self.nbody*4)), (self.nbody, 4, ))
arr.setflags(write=False)
return arr
@body_iquat.setter
def body_iquat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_iquat, val_ptr, self.nbody*4 * sizeof(c_double))
@property
def body_mass(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_mass, dtype=np.double, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_mass.setter
def body_mass(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_mass, val_ptr, self.nbody*1 * sizeof(c_double))
@property
def body_inertia(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_inertia, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_inertia.setter
def body_inertia(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_inertia, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_invweight0, dtype=np.double, count=(self.nbody*2)), (self.nbody, 2, ))
arr.setflags(write=False)
return arr
@body_invweight0.setter
def body_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_invweight0, val_ptr, self.nbody*2 * sizeof(c_double))
@property
def body_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_user, dtype=np.double, count=(self.nbody*self.nuser_body)), (self.nbody, self.nuser_body, ))
arr.setflags(write=False)
return arr
@body_user.setter
def body_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_user, val_ptr, self.nbody*self.nuser_body * sizeof(c_double))
@property
def jnt_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_type, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_type.setter
def jnt_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_type, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_qposadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_qposadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_qposadr.setter
def jnt_qposadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_qposadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_dofadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_dofadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_dofadr.setter
def jnt_dofadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_dofadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_bodyid, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_bodyid.setter
def jnt_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_bodyid, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_limited(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_limited, dtype=np.uint8, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_limited.setter
def jnt_limited(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.jnt_limited, val_ptr, self.njnt*1 * sizeof(c_ubyte))
@property
def jnt_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_solref, dtype=np.double, count=(self.njnt*2)), (self.njnt, 2, ))
arr.setflags(write=False)
return arr
@jnt_solref.setter
def jnt_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_solref, val_ptr, self.njnt*2 * sizeof(c_double))
@property
def jnt_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_solimp, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_solimp.setter
def jnt_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_solimp, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_pos, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_pos.setter
def jnt_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_pos, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_axis(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_axis, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_axis.setter
def jnt_axis(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_axis, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_stiffness(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_stiffness, dtype=np.double, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_stiffness.setter
def jnt_stiffness(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_stiffness, val_ptr, self.njnt*1 * sizeof(c_double))
@property
def jnt_range(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_range, dtype=np.double, count=(self.njnt*2)), (self.njnt, 2, ))
arr.setflags(write=False)
return arr
@jnt_range.setter
def jnt_range(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_range, val_ptr, self.njnt*2 * sizeof(c_double))
@property
def jnt_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_margin, dtype=np.double, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_margin.setter
def jnt_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_margin, val_ptr, self.njnt*1 * sizeof(c_double))
@property
def jnt_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_user, dtype=np.double, count=(self.njnt*self.nuser_jnt)), (self.njnt, self.nuser_jnt, ))
arr.setflags(write=False)
return arr
@jnt_user.setter
def jnt_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_user, val_ptr, self.njnt*self.nuser_jnt * sizeof(c_double))
@property
def dof_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_bodyid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_bodyid.setter
def dof_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_bodyid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_jntid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_jntid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_jntid.setter
def dof_jntid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_jntid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_parentid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_parentid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_parentid.setter
def dof_parentid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_parentid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_Madr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_Madr, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_Madr.setter
def dof_Madr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_Madr, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_frictional(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_frictional, dtype=np.uint8, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_frictional.setter
def dof_frictional(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.dof_frictional, val_ptr, self.nv*1 * sizeof(c_ubyte))
@property
def dof_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_solref, dtype=np.double, count=(self.nv*2)), (self.nv, 2, ))
arr.setflags(write=False)
return arr
@dof_solref.setter
def dof_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_solref, val_ptr, self.nv*2 * sizeof(c_double))
@property
def dof_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_solimp, dtype=np.double, count=(self.nv*3)), (self.nv, 3, ))
arr.setflags(write=False)
return arr
@dof_solimp.setter
def dof_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_solimp, val_ptr, self.nv*3 * sizeof(c_double))
@property
def dof_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_frictionloss, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_frictionloss.setter
def dof_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_frictionloss, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_armature(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_armature, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_armature.setter
def dof_armature(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_armature, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_damping(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_damping, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_damping.setter
def dof_damping(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_damping, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_invweight0, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_invweight0.setter
def dof_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_invweight0, val_ptr, self.nv*1 * sizeof(c_double))
@property
def geom_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_type, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_type.setter
def geom_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_type, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_contype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_contype, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_contype.setter
def geom_contype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_contype, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_conaffinity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_conaffinity, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_conaffinity.setter
def geom_conaffinity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_conaffinity, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_condim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_condim, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_condim.setter
def geom_condim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_condim, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_bodyid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_bodyid.setter
def geom_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_bodyid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_dataid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_dataid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_dataid.setter
def geom_dataid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_dataid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_matid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_matid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_matid.setter
def geom_matid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_matid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_group(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_group, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_group.setter
def geom_group(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_group, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_solmix(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solmix, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_solmix.setter
def geom_solmix(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_solmix, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solref, dtype=np.double, count=(self.ngeom*2)), (self.ngeom, 2, ))
arr.setflags(write=False)
return arr
@geom_solref.setter
def geom_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_solref, val_ptr, self.ngeom*2 * sizeof(c_double))
@property
def geom_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solimp, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_solimp.setter
def geom_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_solimp, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_size, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_size.setter
def geom_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_size, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_rbound(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_rbound, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_rbound.setter
def geom_rbound(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_rbound, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_pos, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_pos.setter
def geom_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_pos, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_quat, dtype=np.double, count=(self.ngeom*4)), (self.ngeom, 4, ))
arr.setflags(write=False)
return arr
@geom_quat.setter
def geom_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_quat, val_ptr, self.ngeom*4 * sizeof(c_double))
@property
def geom_friction(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_friction, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_friction.setter
def geom_friction(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_friction, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_margin, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_margin.setter
def geom_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_margin, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_gap(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_gap, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_gap.setter
def geom_gap(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_gap, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_user, dtype=np.double, count=(self.ngeom*self.nuser_geom)), (self.ngeom, self.nuser_geom, ))
arr.setflags(write=False)
return arr
@geom_user.setter
def geom_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_user, val_ptr, self.ngeom*self.nuser_geom * sizeof(c_double))
@property
def geom_rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_rgba, dtype=np.float, count=(self.ngeom*4)), (self.ngeom, 4, ))
arr.setflags(write=False)
return arr
@geom_rgba.setter
def geom_rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.geom_rgba, val_ptr, self.ngeom*4 * sizeof(c_float))
@property
def site_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_type, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_type.setter
def site_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_type, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_bodyid, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_bodyid.setter
def site_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_bodyid, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_matid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_matid, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_matid.setter
def site_matid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_matid, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_group(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_group, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_group.setter
def site_group(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_group, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_size, dtype=np.double, count=(self.nsite*3)), (self.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_size.setter
def site_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_size, val_ptr, self.nsite*3 * sizeof(c_double))
@property
def site_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_pos, dtype=np.double, count=(self.nsite*3)), (self.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_pos.setter
def site_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_pos, val_ptr, self.nsite*3 * sizeof(c_double))
@property
def site_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_quat, dtype=np.double, count=(self.nsite*4)), (self.nsite, 4, ))
arr.setflags(write=False)
return arr
@site_quat.setter
def site_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_quat, val_ptr, self.nsite*4 * sizeof(c_double))
@property
def site_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_user, dtype=np.double, count=(self.nsite*self.nuser_site)), (self.nsite, self.nuser_site, ))
arr.setflags(write=False)
return arr
@site_user.setter
def site_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_user, val_ptr, self.nsite*self.nuser_site * sizeof(c_double))
@property
def site_rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_rgba, dtype=np.float, count=(self.nsite*4)), (self.nsite, 4, ))
arr.setflags(write=False)
return arr
@site_rgba.setter
def site_rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.site_rgba, val_ptr, self.nsite*4 * sizeof(c_float))
@property
def cam_mode(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_mode, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_mode.setter
def cam_mode(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.cam_mode, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def cam_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_bodyid, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_bodyid.setter
def cam_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.cam_bodyid, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def cam_targetbodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_targetbodyid, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_targetbodyid.setter
def cam_targetbodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.cam_targetbodyid, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def cam_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_pos, dtype=np.double, count=(self.ncam*3)), (self.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_pos.setter
def cam_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_pos, val_ptr, self.ncam*3 * sizeof(c_double))
@property
def cam_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_quat, dtype=np.double, count=(self.ncam*4)), (self.ncam, 4, ))
arr.setflags(write=False)
return arr
@cam_quat.setter
def cam_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_quat, val_ptr, self.ncam*4 * sizeof(c_double))
@property
def cam_poscom0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_poscom0, dtype=np.double, count=(self.ncam*3)), (self.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_poscom0.setter
def cam_poscom0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_poscom0, val_ptr, self.ncam*3 * sizeof(c_double))
@property
def cam_pos0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_pos0, dtype=np.double, count=(self.ncam*3)), (self.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_pos0.setter
def cam_pos0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_pos0, val_ptr, self.ncam*3 * sizeof(c_double))
@property
def cam_mat0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_mat0, dtype=np.double, count=(self.ncam*9)), (self.ncam, 9, ))
arr.setflags(write=False)
return arr
@cam_mat0.setter
def cam_mat0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_mat0, val_ptr, self.ncam*9 * sizeof(c_double))
@property
def cam_fovy(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_fovy, dtype=np.double, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_fovy.setter
def cam_fovy(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_fovy, val_ptr, self.ncam*1 * sizeof(c_double))
@property
def cam_ipd(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_ipd, dtype=np.double, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_ipd.setter
def cam_ipd(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_ipd, val_ptr, self.ncam*1 * sizeof(c_double))
@property
def light_mode(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_mode, dtype=np.int, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_mode.setter
def light_mode(self, value):
val_ptr =
|
np.array(value, dtype=np.float64)
|
numpy.array
|
import torch as torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, pad_sequence
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import collections
import glob
import numpy as np
import tqdm
import os
import time
import datetime
from pytz import timezone
from fnm_ccar_dset import FNMCCARDataset, paddingCollator, load_data
class LinearBlock(nn.Module):
def __init__(self, input_size, linear_conf):
super(LinearBlock, self).__init__()
dc = collections.OrderedDict()
prev_s = input_size
for i, (s, r) in enumerate(linear_conf):
dc['linear'+str(i+1)]=nn.Linear(prev_s, s)
dc['relu'+str(i+1)] = nn.ReLU(inplace=True)
dc['batchn'+str(i+1)] = nn.BatchNorm1d(s)
dc['drpout'+str(i+1)] = nn.Dropout(r)
prev_s = s
self.linblock = nn.Sequential(dc)
def forward(self, x):
return self.linblock(x)
class MacroMortEcoder(nn.Module):
def __init__(self, seq_n_features, lstm_conf, emb_acq_dims, emb_seq_dims, linear_conf):
super(MacroMortEcoder, self).__init__()
emb_acq_dict = collections.OrderedDict()
emb_acq_dim_sum = 0
for name, c, d in emb_acq_dims:
emb_acq_dict[name] = nn.Embedding(c, d)
emb_acq_dim_sum += d
self.emb_acq = nn.ModuleDict(emb_acq_dict)
emb_seq_dict = collections.OrderedDict()
emb_seq_dim_sum = 0
for name, c, d in emb_seq_dims:
emb_seq_dict[name] = nn.Embedding(c, d)
emb_seq_dim_sum += d
self.emb_seq = nn.ModuleDict(emb_seq_dict)
emb_dim_total = emb_acq_dim_sum + emb_seq_dim_sum
lstm_size = lstm_conf['lstm_size']
lstm_layers = lstm_conf['lstm_layers']
lstm_dropout = lstm_conf['lstm_dropout']
self.lstm = nn.LSTM(
input_size = seq_n_features + emb_dim_total,
hidden_size = lstm_size,
num_layers = lstm_layers,
dropout = lstm_dropout,
batch_first = True,
bidirectional = True
)
#lin_block_input = (1 + self.lstm.bidirectional) * \
# self.lstm.num_layers * self.lstm.hidden_size
self.linblock = LinearBlock(self.lstm.hidden_size, linear_conf)
def forward(self, seq, seq_len, ymd, acq):
ea = [x(acq[:,i]) for i, (a, x) in enumerate(self.emb_acq.items())]
ea = torch.cat(ea, 1)
ea = ea.reshape(seq.shape[0], -1, ea.shape[1])\
.expand(-1, seq.shape[1], -1)
ey = [x(ymd[:,:,i]) for i, (a, x) in enumerate(self.emb_seq.items())]
ey = torch.cat(ey, 2)
s = torch.cat([seq, ea, ey], 2)
self.lstm.flatten_parameters()
packed_input = pack_padded_sequence(s, seq_len, batch_first=True)
_, (ht, _) = self.lstm(packed_input)
# move batch from dim 1 to 0
#out = ht.permute(1, 0, 2).view(-1,
# 2 * self.lstm.num_layers * self.lstm.hidden_size)
out = ht.view(self.lstm.num_layers, 2, -1, self.lstm.hidden_size)
out = out[-1, 0, :, :] + out[-1, 1, :, :]
out = self.linblock(out)
# middle = int(out.shape[1]/2)
# m = out[:, :middle]
# s = out[:, middle:]
# return m,s
batch_idx = torch.arange(seq.shape[0], device=seq.device)
dlq = seq[batch_idx, seq_len.long()-1, -9:]
return out, dlq
class MacroMortDecoder(nn.Module):
def __init__(self, input_size, lstm_conf, pre_lin_conf, post_lin_conf):
super(MacroMortDecoder, self).__init__()
self.pre_linblock = LinearBlock(input_size, pre_lin_conf)
self.lstm = nn.LSTM(
input_size = lstm_conf['input_size'],
hidden_size = lstm_conf['lstm_size'],
num_layers = lstm_conf['lstm_layers'],
dropout = lstm_conf['lstm_dropout'],
batch_first = False,
bidirectional = False
)
#post_input_size = (1 + self.lstm.bidirectional) * \
# self.lstm.num_layers * self.lstm.hidden_size
self.post_linblock = LinearBlock(self.lstm.hidden_size+9, post_lin_conf)
def forward(self, zim, dlq, macro):
self.lstm.flatten_parameters()
zim = self.pre_linblock(zim)
out = []
for t in range(12):
decinp = torch.cat([zim, dlq, macro[:, t, :]], 1)
decinp = decinp.unsqueeze(0) # sequence length is 1
_, (hx, _) = self.lstm(decinp)
lstm_out = hx[-1, :, :]
lstm_out = torch.cat([dlq, lstm_out], 1)
dlq_dist = self.post_linblock(lstm_out)
out.append(dlq_dist)
# batch, 9
_, idx = F.softmax(dlq_dist, 1).max(1)
dlq = torch.zeros_like(dlq)
dlq[torch.arange(dlq.shape[0]), idx]=1
dlq_seq = torch.stack(out, 2) # (batch, 9[0,...,7,EOS], 12)
return dlq_seq
class CCARM4Model(nn.Module):
r"""
Fill up
"""
def __init__(self, macroMortEcoder, macroMortDecoder):
"""
Parameters:
seq_n_features: number of features inside sequence. note that first
9 (0-6 + 12) represent one hot encoding for DLQ
lstm_size: the size of hidden unit in LSTM
linear_size: the number of of output units for the first linear
embed_dims: the list of tuples, where the first element of tuple
represents the number of categories and second element of the
tuple if the embedding size
embed_drp: drop out percentage after embedding layer
"""
super(CCARM4Model, self).__init__()
self.encoder = macroMortEcoder
self.decoder = macroMortDecoder
def forward(self, seq, seq_len, ymd, acq, macro_pred):
out, dlq = self.encoder(seq, seq_len, ymd, acq)
dlq_seq = self.decoder(out, dlq, macro_pred)
return dlq_seq
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
class Losses:
def __init__(self, patience=10):
self.train_losses = []
self.valid_losses = []
self.test_losses = []
def addValidLoss(self, item):
self.valid_losses.append(item)
class TrainingContext:
def __init__(self, model_path, model, loss_function, optimizer,
trainDL, validDL, SAVE_EVERY,
PRINT_EVERY):
self.model_path = model_path
self.model = model
self.loss_function = loss_function
self.optimizer = optimizer
self.trainDL = trainDL
self.validDL = validDL
self.train_losses = []
self.valid_losses = []
self.SAVE_EVERY = SAVE_EVERY
self.PRINT_EVERY = PRINT_EVERY
self.device = torch.device("cpu")
def trainStep(self, seq, seq_len, ymd, acq, macro_pred, target):
self.model.zero_grad()
target_hat = self.model(seq, seq_len, ymd, acq, macro_pred)
target = target.to(target_hat.device)
loss = self.loss_function(target_hat, target)
loss_item = loss.item()
loss.backward()
self.optimizer.step()
return loss_item
def trainLoop(self, epoch):
self.model.train()
tq = tqdm.tqdm(self.trainDL)
losses = []
for bidx, (seq, seq_len, ymd, acq, macro_pred, target) in enumerate(tq):
tq.set_description('Train: %i' % bidx)
loss_item = self.trainStep(seq, seq_len, ymd, acq, macro_pred, target)
losses.append(loss_item)
if bidx % self.PRINT_EVERY == 0:
mean_loss =
|
np.mean(losses)
|
numpy.mean
|
#!/usr/bin/env python
""" Remove nan from vertex coordinates and uv coordinates
"""
import argparse
import pymesh
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("input_mesh");
parser.add_argument("output_mesh");
return parser.parse_args();
def main():
args = parse_args();
mesh = pymesh.load_mesh(args.input_mesh);
assert(mesh.has_attribute("corner_texture"));
mesh.enable_connectivity();
vertices = np.copy(mesh.vertices);
bad_vertex = np.logical_not(np.all(np.isfinite(mesh.vertices), axis=1));
bad_vertex_indices =
|
np.arange(mesh.num_vertices, dtype=int)
|
numpy.arange
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, <NAME>, Social Robotics Lab, University of Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Allows to reconstruct odometry from /additional_odom_data topic, using
# new calibration factors, as well as plotting the overall path that was travelled.
# Requires a bag file to be played back using rosbag play / rqt_bag.
import rospy, math, numpy, tf
from collections import deque
from spencer_bagfile_tools.msg import AdditionalOdometryData
from dynamic_reconfigure.server import Server
from spencer_bagfile_tools.cfg import ReconstructOdometryConfig
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
from std_msgs.msg import ColorRGBA
from nav_msgs.msg import Odometry
class State(object):
def __init__(self):
self.x = self.y = self.theta = 0
self.totalDistance = 0
self.stamp = rospy.Time(0)
class OdometryController(object):
def __init__(self):
self.msgHistory = []
self.stateHistory = self.emptyStateHistory()
self.previousMsg = self.previousState = None
self.rebuildingEntirePath = False
self.zeroPosition()
self.WHEEL_BASE = 0.665
self.TICKS_PER_METER_LEFT = 56263.5
self.TICKS_PER_METER_RIGHT = 57099.7
self.previousTimestampMarkerCount = 0
def zeroPosition(self):
self.stateHistory.append(State())
self.previousState = self.stateHistory[0]
def run(self):
self.markerArrayPublisher = rospy.Publisher("/spencer_bagfile_tools/reconstructed_odom_path", MarkerArray, queue_size=1)
self.odomPublisher = rospy.Publisher("/spencer/sensors/odom", Odometry, queue_size=3)
reconfigureServer = Server(ReconstructOdometryConfig, self.reconfigure)
topicName = "/spencer/sensors/additional_odom_data"
self.subscriber = rospy.Subscriber(topicName, AdditionalOdometryData, self.additionalOdometryDataCallback)
rospy.loginfo("Reconstructing odometry from " + topicName + ", now listening for messages...")
rospy.spin()
def additionalOdometryDataCallback(self, msg):
if not self.rebuildingEntirePath:
self.updateState(msg)
self.msgHistory.append(msg)
self.publishOdom()
self.visualizePath()
def reconfigure(self, config, level):
self.extraCalibOverallMultiplier = config["extra_calib_overall_multiplier"]
self.extraCalibLeftMultiplier = config["extra_calib_left_multiplier"]
self.lineWidth = config["line_width"]
self.arrowLength = config["arrow_length"]
self.showWaypoints = config["show_waypoints"]
self.recalculatePath = config["recalculate_path"]
if level > 0 and self.recalculatePath:
self.rebuildEntirePath()
return config
def emptyStateHistory(self):
# Limit max. state history length to prevent bad performance after driving for a while
# NOTE: msgHistory might still grow unboundedly, but there's no way of avoiding that...
# However, that is mainly a memory issue as the whole history is only processed in rebuildEntirePath()
return deque(maxlen=5000)
def rebuildEntirePath(self):
rospy.loginfo("Odometry parameters have changed! Rebuilding entire path!")
if self.rebuildingEntirePath:
return
self.rebuildingEntirePath = True
self.stateHistory = self.emptyStateHistory()
self.zeroPosition()
self.previousMsg = None
for msg in self.msgHistory:
self.updateState(msg)
self.rebuildingEntirePath = False
self.publishOdom()
self.visualizePath()
def updateState(self, msg):
newState = State()
newState.stamp = msg.header.stamp
previousLeftTicks = self.previousMsg.ticksLeft if self.previousMsg else msg.ticksLeft
previousRightTicks = self.previousMsg.ticksRight if self.previousMsg else msg.ticksRight
leftDiff = msg.ticksLeft - previousLeftTicks
rightDiff = msg.ticksRight - previousRightTicks
# Calculate metric travelled distances of both wheels and the base
metersTravelledLeft = leftDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier * msg.calibLeftEncMultiplier * self.extraCalibLeftMultiplier / self.TICKS_PER_METER_LEFT
metersTravelledRight = rightDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier / self.TICKS_PER_METER_RIGHT
distance = (metersTravelledLeft + metersTravelledRight) / 2.0
# Update position and bearing
newState.theta = self.previousState.theta + (metersTravelledLeft - metersTravelledRight) / self.WHEEL_BASE
newState.theta -= (int((newState.theta/(2*math.pi) ))) * 2*math.pi # clip to 2pi
newState.totalDistance = self.previousState.totalDistance + math.fabs(distance)
newState.x = self.previousState.x + distance * math.sin(newState.theta)
newState.y = self.previousState.y + distance * math.cos(newState.theta)
positionTolerance = 0.1 # in meters
if math.hypot(newState.x - self.stateHistory[-1].x, newState.y - self.stateHistory[-1].y) > positionTolerance:
# Do not cache every single state if the change in position is minimal, otherwise we'll soon run
# out of memory (note we still store previousState, since it is needed by publishOdom() and updateState())
self.stateHistory.append(newState)
self.previousState = newState # FIXME
self.previousMsg = msg
def publishOdom(self):
odom = Odometry()
odom.header.stamp = self.previousMsg.header.stamp if self.previousMsg else rospy.Time.now()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = self.previousState.x
odom.pose.pose.position.y = self.previousState.y
for row in xrange(0, 6):
for col in xrange(0, 6):
odom.pose.covariance[6*row+col] = 0 if row != col else 0.1
odom.twist.covariance[6*row+col] = 0 if row != col else 999999
q = tf.transformations.quaternion_from_euler(0, 0, -self.previousState.theta + math.pi/2)
odom.pose.pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])
if len(self.stateHistory) >= 2:
odom.twist.twist.linear.x = odom.pose.pose.position.x - self.stateHistory[-2].x
odom.twist.twist.linear.y = odom.pose.pose.position.y - self.stateHistory[-2].y
self.odomPublisher.publish(odom)
def visualizePath(self):
if self.markerArrayPublisher.get_num_connections() <= 0:
return
markerArray = MarkerArray()
pathMarker = Marker()
pathMarker.header.stamp = rospy.Time.now()
pathMarker.header.frame_id = "odom"
pathMarker.ns = "Path"
pathMarker.type = Marker.LINE_STRIP
pathMarker.id = 0
pathMarker.color = ColorRGBA(r=1, g=1, a=1)
pathMarker.scale.x = 0.05 * self.lineWidth
waypointMarker = Marker()
waypointMarker.header = pathMarker.header
waypointMarker.ns = "Waypoints"
waypointMarker.type = Marker.SPHERE_LIST
waypointMarker.id = 1
waypointMarker.color = ColorRGBA(r=1, g=1, a=1)
waypointMarker.scale.x = waypointMarker.scale.y = 0.1 * self.lineWidth
lastWaypointTime = float("-inf")
lastWaypointPos = (float("99999"), float("99999"))
# Generate path and waypoints
for state in self.stateHistory:
pathMarker.points.append(Point(x=state.x, y=state.y))
if state.stamp.to_sec() - lastWaypointTime > 5 and self.showWaypoints:
dx = state.x - lastWaypointPos[0]
dy = state.y - lastWaypointPos[1]
if math.sqrt(dx*dx + dy*dy) > 1:
lastWaypointTime = state.stamp.to_sec()
lastWaypointPos = (state.x, state.y)
waypointMarker.points.append(Point(x=state.x, y=state.y))
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.type = Marker.TEXT_VIEW_FACING
timestampMarker.id = 3 + len(markerArray.markers)
timestampMarker.color = ColorRGBA(r=0.6, a=1)
timestampMarker.scale.z = 0.1 * self.lineWidth
timestampMarker.pose.position.x = state.x
timestampMarker.pose.position.y = state.y
timestampMarker.text = "%.1f" % state.stamp.to_sec()
markerArray.markers.append(timestampMarker)
# Delete old markers
currentTimestampMarkerCount = len(markerArray.markers)
for i in xrange(0, self.previousTimestampMarkerCount - currentTimestampMarkerCount):
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.action = Marker.DELETE
timestampMarker.id = 3 + currentTimestampMarkerCount + i
markerArray.markers.append(timestampMarker)
self.previousTimestampMarkerCount = currentTimestampMarkerCount
# Velocity arrow
velocitySmoothingNoPoints = 5
if len(pathMarker.points) > velocitySmoothingNoPoints:
arrowHeadMarker = Marker()
arrowHeadMarker.header = pathMarker.header
arrowHeadMarker.ns = "Path-ArrowHead"
arrowHeadMarker.type = Marker.LINE_STRIP
arrowHeadMarker.id = 2
arrowHeadMarker.color = ColorRGBA(r=1, g=1, a=1)
arrowHeadMarker.scale.x = arrowHeadMarker.scale.y = 0.1 * self.lineWidth
pointTip = numpy.array([pathMarker.points[-1].x, pathMarker.points[-1].y])
lastVelocity = numpy.array([pathMarker.points[-1].x - pathMarker.points[-velocitySmoothingNoPoints].x,
pathMarker.points[-1].y - pathMarker.points[-velocitySmoothingNoPoints].y])
speed =
|
numpy.linalg.norm(lastVelocity)
|
numpy.linalg.norm
|
#!/usr/bin/env python
# coding: utf-8
# # Imports
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
import matplotlib.pyplot as plt
import json
import tempfile
import itertools
#from google.colab import drive
from mat4py import loadmat
print(tf.__version__)
# # Data pre-processing
def downscale(data, resolution):
# 10 min resolution.. (data.shape[0], 3, 1440) -> (data.shape[0], 10, 3, 144).. breaks one 3,1440 length trajectory into ten 3,144 length trajectories
# Use ~12 timesteps -> 2-5 timesteps (Use ~2 hours to predict 20-50 mins)
return np.mean(data.reshape(data.shape[0], data.shape[1], int(data.shape[2]/resolution), resolution), axis=3)
def process_data(aligned_data, time_horizon, ph):
# 10 min resolution.. breaks each (3,144) trajectory into (144-ph-time_horizon,3,time_horizon) samples
data = np.zeros((aligned_data.shape[0] * (aligned_data.shape[2]-ph-time_horizon), aligned_data.shape[1], time_horizon))
label = np.zeros((aligned_data.shape[0] * (aligned_data.shape[2]-ph-time_horizon), ph))
count = 0
for i in range(aligned_data.shape[0]): # for each sample
for j in range(aligned_data.shape[2]-ph-time_horizon): # TH length sliding window across trajectory
data[count] = aligned_data[i,:,j:j+time_horizon]
label[count] = aligned_data[i,0,j+time_horizon:j+time_horizon+ph]
count+=1
return data, label
def load_mpc(time_horizon, ph, resolution, batch): # int, int, int, bool
# Load train data
g = np.loadtxt('CGM_prediction_data/glucose_readings_train.csv', delimiter=',')
c = np.loadtxt('CGM_prediction_data/meals_carbs_train.csv', delimiter=',')
it = np.loadtxt('CGM_prediction_data/insulin_therapy_train.csv', delimiter=',')
# Load test data
g_ = np.loadtxt('CGM_prediction_data/glucose_readings_test.csv', delimiter=',')
c_ = np.loadtxt('CGM_prediction_data/meals_carbs_test.csv', delimiter=',')
it_ = np.loadtxt('CGM_prediction_data/insulin_therapy_test.csv', delimiter=',')
# Time align train & test data
aligned_train_data = downscale(np.array([(g[i,:], c[i,:], it[i,:]) for i in range(g.shape[0])]), resolution)
aligned_test_data = downscale(np.array([(g_[i,:], c_[i,:], it_[i,:]) for i in range(g_.shape[0])]), resolution)
print(aligned_train_data.shape)
# Break time aligned data into train & test samples
if batch:
train_data, train_label = process_data(aligned_train_data, time_horizon, ph)
test_data, test_label = process_data(aligned_test_data, time_horizon, ph)
return np.swapaxes(train_data,1,2), train_label, np.swapaxes(test_data,1,2), test_label
else:
return aligned_train_data, aligned_test_data
def load_uva(time_horizon, ph, resolution, batch):
data = loadmat('uva-padova-data/sim_results.mat')
train_data = np.zeros((231,3,1440))
test_data = np.zeros((99,3,1440))
# Separate train and test sets.. last 3 records of each patient will be used for testing
count_train = 0
count_test = 0
for i in range(33):
for j in range(10):
if j>=7:
test_data[count_test,0,:] = np.asarray(data['data']['results']['sensor'][count_test+count_train]['signals']['values']).flatten()[:1440]
test_data[count_test,1,:] = np.asarray(data['data']['results']['CHO'][count_test+count_train]['signals']['values']).flatten()[:1440]
test_data[count_test,2,:] = np.asarray(data['data']['results']['BOLUS'][count_test+count_train]['signals']['values']).flatten()[:1440] + np.asarray(data['data']['results']['BASAL'][i]['signals']['values']).flatten()[:1440]
count_test+=1
else:
train_data[count_train,0,:] = np.asarray(data['data']['results']['sensor'][count_test+count_train]['signals']['values']).flatten()[:1440]
train_data[count_train,1,:] = np.asarray(data['data']['results']['CHO'][count_test+count_train]['signals']['values']).flatten()[:1440]
train_data[count_train,2,:] = np.asarray(data['data']['results']['BOLUS'][count_test+count_train]['signals']['values']).flatten()[:1440] + np.asarray(data['data']['results']['BASAL'][i]['signals']['values']).flatten()[:1440]
count_train+=1
train_data = downscale(train_data, resolution)
test_data = downscale(test_data, resolution)
if batch:
train_data, train_label = process_data(train_data, time_horizon, ph)
test_data, test_label = process_data(test_data, time_horizon, ph)
return np.swapaxes(train_data,1,2)*0.0555, train_label*0.0555, np.swapaxes(test_data,1,2)*0.0555, test_label*0.0555 # convert to mmol/L
else:
return train_data, test_data
# # Make bidirectional LSTM prunable & define custom metrics
class PruneBidirectional(tf.keras.layers.Bidirectional, tfmot.sparsity.keras.PrunableLayer):
def get_prunable_weights(self):
# print(self.forward_layer._trainable_weights)
# print(self.backward_layer._trainable_weights)
# print(len(self.get_trainable_weights()))
# print(self.get_weights()[0], self.get_weights()[0].shape)
# return self.get_weights()
return self.trainable_weights
def loss_metric1(y_true, y_pred):
loss = tf.keras.losses.MeanSquaredError()
return loss(y_true[:,0], y_pred[:,0])
def loss_metric2(y_true, y_pred):
loss = tf.keras.losses.MeanSquaredError()
return loss(y_true[:,1], y_pred[:,1])
def loss_metric3(y_true, y_pred):
loss = tf.keras.losses.MeanSquaredError()
return loss(y_true[:,2], y_pred[:,2])
def loss_metric4(y_true, y_pred):
loss = tf.keras.losses.MeanSquaredError()
return loss(y_true[:,3], y_pred[:,3])
def loss_metric5(y_true, y_pred):
loss = tf.keras.losses.MeanSquaredError()
return loss(y_true[:,4], y_pred[:,4])
def loss_metric6(y_true, y_pred):
loss = tf.keras.losses.MeanSquaredError()
return loss(y_true[:,5], y_pred[:,5])
def bilstm(ph, training):
inp = tf.keras.Input(shape=(train_data.shape[1], train_data.shape[2]))
model = PruneBidirectional(tf.keras.layers.LSTM(200, return_sequences=True))(inp)
model = tf.keras.layers.Dropout(rate=0.5)(model, training=training)
model = PruneBidirectional(tf.keras.layers.LSTM(200, return_sequences=True))(model)
model = tf.keras.layers.Dropout(rate=0.5)(model, training=training)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(ph, activation=None)(model)
x = tf.keras.Model(inputs=inp, outputs=model)
x.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
return x
def crnn(ph, training):
inp = tf.keras.Input(shape=(train_data.shape[1], train_data.shape[2]))
model = tf.keras.layers.Conv1D(256, 4, activation='relu', padding='same')(inp)
model = tf.keras.layers.MaxPool1D(pool_size=2, strides=1, padding='same')(model)
model = tf.keras.layers.Dropout(rate=0.5)(model, training=training)
model = tf.keras.layers.Conv1D(512, 4, activation='relu', padding='same')(model)
model = tf.keras.layers.MaxPool1D(pool_size=2, strides=1, padding='same')(model)
model = tf.keras.layers.Dropout(rate=0.5)(model, training=training)
model = tf.keras.layers.LSTM(200, return_sequences=True)(model)
model = tf.keras.layers.Dropout(rate=0.5)(model, training=training)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(ph, activation=None)(model)
x = tf.keras.Model(inputs=inp, outputs=model)
x.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
return x
# # Custom callback to save pruning results
# Custom sparsity callback
import re
import csv
class SparsityCallback(tf.keras.callbacks.Callback):##tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
non_trainable = [i.name for i in self.model.non_trainable_weights]
masks = []
for i in range(len(non_trainable)):
if re.match('(.*)mask(.*)', non_trainable[i]):
masks.append(self.model.non_trainable_weights[i].numpy())
masks = [i.flatten() for i in masks]
masks = np.concatenate(masks).ravel()
print('\n', np.count_nonzero(masks), 1-(np.count_nonzero(masks)/float(masks.shape[0])))
# with open('saved_models/uva_bilstm_sparsity.csv','ab') as f: #uva_crnn_sparisty.csv, uva_lstm_sparsity.csv, uva_bilstm_sparsity.csv
# np.savetxt(f,np.asarray([1-(np.count_nonzero(masks)/float(masks.shape[0]))]))
# csv_writer = csv.writer(f)
# csv_writer.writerow(str(1-(np.count_nonzero(masks)/float(masks.shape[0]))))#,delimiter=',')
# f.close()
# print(np.concatenate(masks).ravel(), np.concatenate(masks).ravel().shape)
# # Prune MPC generated models
# ## CRNN
# pruning crnn
#get_ipython().run_line_magic('load_ext', 'tensorboard')
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
BATCH_SIZE = 32
EPOCHS = 50
BATCH = True # indicates whether to convert data into batches
training = True
train_data, train_label, test_data, test_label = load_mpc(TIME_HORIZON, PH, RESOLUTION, BATCH)
model = tf.keras.models.load_model('../saved/postgraduate_dissertation/saved_models/mpc_guided_crnn.h5',custom_objects={'loss_metric1':loss_metric1, 'loss_metric2':loss_metric2, 'loss_metric3':loss_metric3, 'loss_metric4':loss_metric4,'loss_metric5':loss_metric5,'loss_metric6':loss_metric6})
#model = bilstm(PH, training)
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0,
final_sparsity=0.98,
begin_step=2997,
end_step=2997*EPOCHS,
frequency=2977)
}
print(model.summary())
logdir = tempfile.mkdtemp()
print(logdir)
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
# tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
SparsityCallback()
]
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
print(model_for_pruning.summary())
pruned_crnn = model_for_pruning.fit(x=train_data,
y=train_label,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_data, test_label),
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
#tf.keras.models.save_model(model_for_export, '../saved/history/pruned_mpc_guided_crnn.h5', include_optimizer=False)
#get_ipython().system('ls saved_models')
#print(pruned_model.summary())
#%tensorboard --logdir={logdir}
#model.save('../saved/history/pruned_mpc_guided_bilstm.h5')
#json.dump(pruned_crnn.history, open('../saved/history/pruned_mpc_guided_crnn_history', 'w'))
#!ls saved_models
#%tensorboard --logdir={log_dir}
# ## LSTM
# In[ ]:
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
BATCH_SIZE = 32
EPOCHS = 50
BATCH = True # indicates whether to convert data into batches
training = True
train_data, train_label, test_data, test_label = load_mpc(TIME_HORIZON, PH, RESOLUTION, BATCH)
model = tf.keras.models.load_model('../saved/postgraduate_dissertation/saved_models/mpc_guided_lstm.h5',custom_objects={'loss_metric1':loss_metric1, 'loss_metric2':loss_metric2, 'loss_metric3':loss_metric3, 'loss_metric4':loss_metric4,'loss_metric5':loss_metric5,'loss_metric6':loss_metric6})
#model = bilstm(PH, training)
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0,
final_sparsity=0.98,
begin_step=2997,
end_step=2997*EPOCHS,
frequency=2977)
}
print(model.summary())
logdir = tempfile.mkdtemp()
print(logdir)
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
# tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
SparsityCallback()
]
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
print(model_for_pruning.summary())
pruned_lstm = model_for_pruning.fit(x=train_data,
y=train_label,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_data, test_label),
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
#tf.keras.models.save_model(model_for_export, '../saved/history/pruned_mpc_guided_lstm.h5', include_optimizer=False)
#json.dump(pruned_lstm.history, open('../saved/history/pruned_mpc_guided_lstm_history', 'w'))
#get_ipython().system('ls saved_models')
# ## Bidirectional LSTM
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
BATCH_SIZE = 32
EPOCHS = 150
BATCH = True # indicates whether to convert data into batches
training = True
train_data, train_label, test_data, test_label = load_mpc(TIME_HORIZON, PH, RESOLUTION, BATCH)
#model = tf.keras.models.load_model('saved_models/mpc_guided_lstm.h5',custom_objects={'loss_metric1':loss_metric1, 'loss_metric2':loss_metric2, 'loss_metric3':loss_metric3, 'loss_metric4':loss_metric4,'loss_metric5':loss_metric5,'loss_metric6':loss_metric6})
model = bilstm(PH, training)
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0,
final_sparsity=0.98,
begin_step=2997*100,
end_step=2997*EPOCHS,
frequency=2977)
}
print(model.summary())
logdir = tempfile.mkdtemp()
print(logdir)
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
# tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
SparsityCallback()
]
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
print(model_for_pruning.summary())
pruned_lstm = model_for_pruning.fit(x=train_data,
y=train_label,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_data, test_label),
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
#tf.keras.models.save_model(model_for_export, '../saved/history/pruned_mpc_guided_bilstm.h5', include_optimizer=False)
#json.dump(pruned_lstm.history, open('../saved/history/pruned_mpc_guided_bilstm_history', 'w'))
#get_ipython().system('ls saved_models')
# # MPC Pruning results
# In[ ]:
lstm_val_loss_10 = json.load(open('../saved/history/pruned_mpc_guided_lstm_history'))['val_loss_metric1']
lstm_val_loss_20 = json.load(open('../saved/history/pruned_mpc_guided_lstm_history'))['val_loss_metric2']
lstm_val_loss_30 = json.load(open('../saved/history/pruned_mpc_guided_lstm_history'))['val_loss_metric3']
lstm_val_loss_40 = json.load(open('../saved/history/pruned_mpc_guided_lstm_history'))['val_loss_metric4']
lstm_val_loss_50 = json.load(open('../saved/history/pruned_mpc_guided_lstm_history'))['val_loss_metric5']
lstm_val_loss_60 = json.load(open('../saved/history/pruned_mpc_guided_lstm_history'))['val_loss_metric6']
crnn_val_loss_10 = json.load(open('../saved/history/pruned_mpc_guided_crnn_history'))['val_loss_metric1']
crnn_val_loss_20 = json.load(open('../saved/history/pruned_mpc_guided_crnn_history'))['val_loss_metric2']
crnn_val_loss_30 = json.load(open('../saved/history/pruned_mpc_guided_crnn_history'))['val_loss_metric3']
crnn_val_loss_40 = json.load(open('../saved/history/pruned_mpc_guided_crnn_history'))['val_loss_metric4']
crnn_val_loss_50 = json.load(open('../saved/history/pruned_mpc_guided_crnn_history'))['val_loss_metric5']
crnn_val_loss_60 = json.load(open('../saved/history/pruned_mpc_guided_crnn_history'))['val_loss_metric6']
bilstm_val_loss_10 = json.load(open('../saved/history/pruned_mpc_guided_bilstm_history'))['val_loss_metric1'][100:]
bilstm_val_loss_20 = json.load(open('../saved/history/pruned_mpc_guided_bilstm_history'))['val_loss_metric2'][100:]
bilstm_val_loss_30 = json.load(open('../saved/history/pruned_mpc_guided_bilstm_history'))['val_loss_metric3'][100:]
bilstm_val_loss_40 = json.load(open('../saved/history/pruned_mpc_guided_bilstm_history'))['val_loss_metric4'][100:]
bilstm_val_loss_50 = json.load(open('../saved/history/pruned_mpc_guided_bilstm_history'))['val_loss_metric5'][100:]
bilstm_val_loss_60 = json.load(open('../saved/history/pruned_mpc_guided_bilstm_history'))['val_loss_metric6'][100:]
x_crnn = np.genfromtxt('../saved/history/crnn_sparsity.csv')
x_lstm = np.genfromtxt('../saved/history/lstm_sparsity.csv')
x_bilstm = np.genfromtxt('../saved/history/bilstm_sparsity.csv')[100:]
fig, axes = plt.subplots(2,3)
plt.rcParams["figure.figsize"] = (20,10)
axes[0,0].plot(x_lstm, np.sqrt(lstm_val_loss_10), label='LSTM')
axes[0,1].plot(x_lstm, np.sqrt(lstm_val_loss_20), label='LSTM')
axes[0,2].plot(x_lstm, np.sqrt(lstm_val_loss_30), label='LSTM')
axes[1,0].plot(x_lstm, np.sqrt(lstm_val_loss_40), label='LSTM')
axes[1,1].plot(x_lstm, np.sqrt(lstm_val_loss_50), label='LSTM')
axes[1,2].plot(x_lstm, np.sqrt(lstm_val_loss_60), label='LSTM')
axes[0,0].plot(x_crnn, np.sqrt(crnn_val_loss_10), label='CRNN')
axes[0,1].plot(x_crnn, np.sqrt(crnn_val_loss_20), label='CRNN')
axes[0,2].plot(x_crnn, np.sqrt(crnn_val_loss_30), label='CRNN')
axes[1,0].plot(x_crnn, np.sqrt(crnn_val_loss_40), label='CRNN')
axes[1,1].plot(x_crnn, np.sqrt(crnn_val_loss_50), label='CRNN')
axes[1,2].plot(x_crnn, np.sqrt(crnn_val_loss_60), label='CRNN')
axes[0,0].plot(x_bilstm, np.sqrt(bilstm_val_loss_10), label='Bidirectional LSTM')
axes[0,1].plot(x_bilstm, np.sqrt(bilstm_val_loss_20), label='Bidirectional LSTM')
axes[0,2].plot(x_bilstm, np.sqrt(bilstm_val_loss_30), label='Bidirectional LSTM')
axes[1,0].plot(x_bilstm, np.sqrt(bilstm_val_loss_40), label='Bidirectional LSTM')
axes[1,1].plot(x_bilstm, np.sqrt(bilstm_val_loss_50), label='Bidirectional LSTM')
axes[1,2].plot(x_bilstm, np.sqrt(bilstm_val_loss_60), label='Bidirectional LSTM')
axes[0,0].title.set_text('10 minute prediction validation loss')
axes[0,1].title.set_text('20 minute prediction validation loss')
axes[0,2].title.set_text('30 minute prediction validation loss')
axes[1,0].title.set_text('40 minute prediction validation loss')
axes[1,1].title.set_text('50 minute prediction validation loss')
axes[1,2].title.set_text('60 minute prediction validation loss')
axes[0,0].set_ylabel('RMSE (mmol/L)')
axes[1,0].set_ylabel('RMSE (mmol/L)')
axes[1,0].set_xlabel('Sparsity (%)')
axes[1,1].set_xlabel('Sparsity (%)')
axes[1,2].set_xlabel('Sparsity (%)')
axes[0,0].legend()
axes[0,1].legend()
axes[0,2].legend()
axes[1,0].legend()
axes[1,1].legend()
axes[1,2].legend()
#plt.rcParams["figure.figsize"] = (20,10)
custom_ylim = (0,2)
plt.setp(axes, ylim=custom_ylim)
plt.show()
# # Prune UVA Padova models
# ## CRNN
# In[ ]:
# pruning crnn
#get_ipython().run_line_magic('load_ext', 'tensorboard')
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
BATCH_SIZE = 32
EPOCHS = 50
BATCH = True # indicates whether to convert data into batches
training = True
train_data, train_label, test_data, test_label = load_uva(TIME_HORIZON, PH, RESOLUTION, BATCH)
model = tf.keras.models.load_model('../saved/postgraduate_dissertation/saved_models/uva_padova_crnn.h5',custom_objects={'loss_metric1':loss_metric1, 'loss_metric2':loss_metric2, 'loss_metric3':loss_metric3, 'loss_metric4':loss_metric4,'loss_metric5':loss_metric5,'loss_metric6':loss_metric6})
#model = bilstm(PH, training)
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0,
final_sparsity=0.98,
begin_step=910,
end_step=910*EPOCHS,
frequency=910)
}
print(model.summary())
logdir = tempfile.mkdtemp()
print(logdir)
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
# tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
SparsityCallback()
]
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
print(model_for_pruning.summary())
pruned_crnn = model_for_pruning.fit(x=train_data,
y=train_label,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_data, test_label),
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
#tf.keras.models.save_model(model_for_export, '../saved/history/pruned_uva_padova_crnn.h5', include_optimizer=False)
#
#get_ipython().system('ls saved_models')
#print(pruned_model.summary())
#%tensorboard --logdir={logdir}
#model.save('../saved/history/pruned_mpc_guided_bilstm.h5')
#json.dump(pruned_crnn.history, open('../saved/history/pruned_uva_padova_crnn_history', 'w'))
# ## LSTM
# In[ ]:
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
BATCH_SIZE = 32
EPOCHS = 50
BATCH = True # indicates whether to convert data into batches
training = True
train_data, train_label, test_data, test_label = load_uva(TIME_HORIZON, PH, RESOLUTION, BATCH)
model = tf.keras.models.load_model('../saved/postgraduate_dissertation/saved_models/uva_padova_lstm.h5',custom_objects={'loss_metric1':loss_metric1, 'loss_metric2':loss_metric2, 'loss_metric3':loss_metric3, 'loss_metric4':loss_metric4,'loss_metric5':loss_metric5,'loss_metric6':loss_metric6})
#model = bilstm(PH, training)
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0,
final_sparsity=0.98,
begin_step=910,
end_step=910*EPOCHS,
frequency=910)
}
print(model.summary())
logdir = tempfile.mkdtemp()
print(logdir)
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
# tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
SparsityCallback()
]
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
print(model_for_pruning.summary())
pruned_lstm = model_for_pruning.fit(x=train_data,
y=train_label,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_data, test_label),
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
#tf.keras.models.save_model(model_for_export, '../saved/history/pruned_uva_padova_lstm.h5', include_optimizer=False)
#json.dump(pruned_lstm.history, open('../saved/history/pruned_uva_padova_lstm_history', 'w'))
#get_ipython().system('ls saved_models')
# ## BiLSTM
# In[ ]:
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
BATCH_SIZE = 32
EPOCHS = 150
BATCH = True # indicates whether to convert data into batches
training = True
train_data, train_label, test_data, test_label = load_uva(TIME_HORIZON, PH, RESOLUTION, BATCH)
#model = tf.keras.models.load_model('saved_models/mpc_guided_lstm.h5',custom_objects={'loss_metric1':loss_metric1, 'loss_metric2':loss_metric2, 'loss_metric3':loss_metric3, 'loss_metric4':loss_metric4,'loss_metric5':loss_metric5,'loss_metric6':loss_metric6})
model = bilstm(PH, training)
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0,
final_sparsity=0.98,
begin_step=910*100,
end_step=910*EPOCHS,
frequency=910)
}
print(model.summary())
logdir = tempfile.mkdtemp()
print(logdir)
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
# tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
SparsityCallback()
]
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError(), loss_metric1, loss_metric2, loss_metric3, loss_metric4, loss_metric5, loss_metric6])
print(model_for_pruning.summary())
pruned_lstm = model_for_pruning.fit(x=train_data,
y=train_label,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_data, test_label),
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
#tf.keras.models.save_model(model_for_export, '../saved/history/pruned_uva_padova_bilstm.h5', include_optimizer=False)
#json.dump(pruned_lstm.history, open('../saved/history/pruned_uva_padova_bilstm_history', 'w'))
#get_ipython().system('ls saved_models')
# # UVA Padova Pruning results
# In[ ]:
lstm_val_loss_10 = json.load(open('../saved/history/pruned_uva_padova_lstm_history'))['val_loss_metric1']
lstm_val_loss_20 = json.load(open('../saved/history/pruned_uva_padova_lstm_history'))['val_loss_metric2']
lstm_val_loss_30 = json.load(open('../saved/history/pruned_uva_padova_lstm_history'))['val_loss_metric3']
lstm_val_loss_40 = json.load(open('../saved/history/pruned_uva_padova_lstm_history'))['val_loss_metric4']
lstm_val_loss_50 = json.load(open('../saved/history/pruned_uva_padova_lstm_history'))['val_loss_metric5']
lstm_val_loss_60 = json.load(open('../saved/history/pruned_uva_padova_lstm_history'))['val_loss_metric6']
crnn_val_loss_10 = json.load(open('../saved/history/pruned_uva_padova_crnn_history'))['val_loss_metric1']
crnn_val_loss_20 = json.load(open('../saved/history/pruned_uva_padova_crnn_history'))['val_loss_metric2']
crnn_val_loss_30 = json.load(open('../saved/history/pruned_uva_padova_crnn_history'))['val_loss_metric3']
crnn_val_loss_40 = json.load(open('../saved/history/pruned_uva_padova_crnn_history'))['val_loss_metric4']
crnn_val_loss_50 = json.load(open('../saved/history/pruned_uva_padova_crnn_history'))['val_loss_metric5']
crnn_val_loss_60 = json.load(open('../saved/history/pruned_uva_padova_crnn_history'))['val_loss_metric6']
bilstm_val_loss_10 = json.load(open('../saved/history/pruned_uva_padova_bilstm_history'))['val_loss_metric1'][100:]
bilstm_val_loss_20 = json.load(open('../saved/history/pruned_uva_padova_bilstm_history'))['val_loss_metric2'][100:]
bilstm_val_loss_30 = json.load(open('../saved/history/pruned_uva_padova_bilstm_history'))['val_loss_metric3'][100:]
bilstm_val_loss_40 = json.load(open('../saved/history/pruned_uva_padova_bilstm_history'))['val_loss_metric4'][100:]
bilstm_val_loss_50 = json.load(open('../saved/history/pruned_uva_padova_bilstm_history'))['val_loss_metric5'][100:]
bilstm_val_loss_60 = json.load(open('../saved/history/pruned_uva_padova_bilstm_history'))['val_loss_metric6'][100:]
x_crnn = np.genfromtxt('../saved/history/uva_crnn_sparsity.csv')#[6:]
x_lstm = np.genfromtxt('../saved/history/uva_lstm_sparsity.csv')
x_bilstm = np.genfromtxt('../saved/history/uva_bilstm_sparsity.csv')[100:]
print(x_crnn.shape, x_lstm.shape, x_bilstm.shape)
fig, axes = plt.subplots(2,3)
plt.rcParams["figure.figsize"] = (20,10)
axes[0,0].plot(x_lstm,
|
np.sqrt(lstm_val_loss_10)
|
numpy.sqrt
|
import unittest
import matplotlib
import matplotlib.pyplot
matplotlib.use("Agg")
matplotlib.pyplot.switch_backend("Agg")
class Test(unittest.TestCase):
def test_cantilever_beam(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import CantileverBeam
ndim = 3
problem = CantileverBeam(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(0.01, 0.05, num)
x[:, 1] = 0.5
x[:, 2] = 0.5
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_robot_arm(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import RobotArm
ndim = 2
problem = RobotArm(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(0.0, 1.0, num)
x[:, 1] = np.pi
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_rosenbrock(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Rosenbrock
ndim = 2
problem = Rosenbrock(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-2, 2.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_sphere(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Sphere
ndim = 2
problem = Sphere(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-10, 10.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_branin(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Branin
ndim = 2
problem = Branin(ndim=ndim)
num = 100
x =
|
np.ones((num, ndim))
|
numpy.ones
|
# coding: utf-8
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 11:05:23 2017
@author: zhangji
"""
from matplotlib import pyplot as plt
# plt.rcParams['figure.figsize'] = (18.5, 10.5)
# fontsize = 40
import os
# import glob
import numpy as np
from datetime import datetime
# import matplotlib
import re
from scanf import scanf
from scipy import interpolate, integrate
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d.proj3d import proj_transform
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.colors import Normalize
from matplotlib.ticker import Locator
from matplotlib.collections import LineCollection
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker
# from scipy.optimize import curve_fit
# font = {'size': 20}
# matplotlib.rc('font', **font)
# np.set_printoptions(linewidth=90, precision=5)
markerstyle_list = ['^', 'v', 'o', 's', 'p', 'd', 'H',
'1', '2', '3', '4', '8', 'P', '*',
'h', '+', 'x', 'X', 'D', '|', '_', ]
color_list = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2',
'#7f7f7f', '#bcbd22', '#17becf']
def read_array(text_headle, FILE_DATA, array_length=6):
t_match = re.search(text_headle, FILE_DATA)
if t_match is not None:
t1 = t_match.end()
myformat = ('%f ' * array_length)[:-1]
temp1 = np.array(scanf(myformat, FILE_DATA[t1:]))
else:
temp1 = np.ones(array_length)
temp1[:] = np.nan
return temp1
class fullprint:
'context manager for printing full numpy arrays'
def __init__(self, **kwargs):
kwargs.setdefault('threshold', np.inf)
self.opt = kwargs
def __enter__(self):
self._opt = np.get_printoptions()
np.set_printoptions(**self.opt)
def __exit__(self, type, value, traceback):
np.set_printoptions(**self._opt)
def func_line(x, a0, a1):
y = a0 + a1 * x
return y
def fit_line(ax, x, y, x0, x1, ifprint=1, linestyle='-.', linewidth=1, extendline=False,
color='k', alpha=0.7):
idx = np.array(x >= x0) & np.array(x <= x1) & np.isfinite(x) & np.isfinite(y)
tx = x[idx]
ty = y[idx]
fit_para = np.polyfit(tx, ty, 1)
pol_y = np.poly1d(fit_para)
if extendline:
fit_x = np.linspace(x.min(), x.max(), 100)
else:
fit_x = np.linspace(max(x.min(), x0), min(x.max(), x1), 100)
if ax is not None:
ax.plot(fit_x, pol_y(fit_x), linestyle, linewidth=linewidth,
color=color, alpha=alpha)
if ifprint:
print('y = %f + %f * x' % (fit_para[1], fit_para[0]), 'in range',
(x[idx].min(), x[idx].max()))
return fit_para
def fit_power_law(ax, x, y, x0, x1, ifprint=1, linestyle='-.', linewidth=1, extendline=False,
color='k', alpha=0.7):
idx = np.array(x >= x0) & np.array(x <= x1) & np.isfinite((np.log10(x))) & np.isfinite(
(np.log10(y)))
tx = np.log10(x[idx])
ty = np.log10(y[idx])
fit_para = np.polyfit(tx, ty, 1)
pol_y = np.poly1d(fit_para)
if extendline:
fit_x = np.log10(np.linspace(x.min(), x.max(), 30))
else:
fit_x = np.log10(np.linspace(max(x.min(), x0), min(x.max(), x1), 30))
if ax is not None:
ax.loglog(10 ** fit_x, 10 ** pol_y(fit_x), linestyle, linewidth=linewidth,
color=color, alpha=alpha)
if ifprint:
print('log(y) = %f + %f * log(x)' % (fit_para[1], fit_para[0]), 'in range',
(10 ** tx.min(), 10 ** tx.max()))
print('ln(y) = %f + %f * ln(x)' % (fit_para[1] * np.log(10), fit_para[0]), 'in range',
(10 ** tx.min(), 10 ** tx.max()))
return fit_para
def fit_semilogy(ax, x, y, x0, x1, ifprint=1, linestyle='-.', linewidth=1, extendline=False,
color='k', alpha=0.7):
idx = np.array(x >= x0) & np.array(x <= x1) & np.isfinite(x) & np.isfinite(np.log10(y))
tx = x[idx]
ty = np.log10(y[idx])
fit_para = np.polyfit(tx, ty, 1)
pol_y = np.poly1d(fit_para)
if extendline:
fit_x = np.linspace(x.min(), x.max(), 30)
else:
fit_x = np.linspace(max(x.min(), x0), min(x.max(), x1), 30)
if ax is not None:
ax.plot(fit_x, 10 ** pol_y(fit_x), linestyle, linewidth=linewidth,
color=color, alpha=alpha)
if ifprint:
print('log(y) = %f + %f * x' % (fit_para[1], fit_para[0]), 'in range', (tx.min(), tx.max()))
fit_para = fit_para * np.log(10)
print('ln(y) = %f + %f * x' % (fit_para[1], fit_para[0]), 'in range', (tx.min(), tx.max()))
return fit_para
def norm_self(v):
return v / np.linalg.norm(v)
def angle_2vectors(v1, v2, vct_direct=None):
v1 = norm_self(np.array(v1).ravel())
v2 = norm_self(np.array(v2).ravel())
err_msg = 'inputs are not 3 dimensional vectors. '
assert v1.size == 3, err_msg
assert v2.size == 3, err_msg
t1 = np.dot(v1, v2)
if vct_direct is None:
sign = 1
else:
vct_direct = norm_self(np.array(vct_direct).ravel())
assert vct_direct.size == 3, err_msg
sign = np.sign(np.dot(vct_direct, np.cross(v1, v2)))
theta = sign * np.arccos(t1)
return theta
def get_rot_matrix(*args, **kwargs):
from src import support_class as spc
return spc.get_rot_matrix(*args, **kwargs)
def mycot(x):
return 1 / np.tan(x)
def mycsc(x):
return 1 / np.sin(x)
def mysec(x):
return 1 / np.cos(x)
def write_pbs_head(fpbs, job_name, nodes=1):
fpbs.write('#! /bin/bash\n')
fpbs.write('#PBS -M <EMAIL>\n')
fpbs.write('#PBS -l nodes=%d:ppn=24\n' % nodes)
fpbs.write('#PBS -l walltime=72:00:00\n')
fpbs.write('#PBS -q common\n')
fpbs.write('#PBS -N %s\n' % job_name)
fpbs.write('\n')
fpbs.write('cd $PBS_O_WORKDIR\n')
fpbs.write('\n')
return True
def write_pbs_head_dbg(fpbs, job_name, nodes=1):
assert np.isclose(nodes, 1)
fpbs.write('#! /bin/bash\n')
fpbs.write('#PBS -M <EMAIL>@csrc.ac.cn\n')
fpbs.write('#PBS -l nodes=%d:ppn=24\n' % nodes)
fpbs.write('#PBS -l walltime=24:00:00\n')
fpbs.write('#PBS -q debug\n')
fpbs.write('#PBS -N %s\n' % job_name)
fpbs.write('\n')
fpbs.write('cd $PBS_O_WORKDIR\n')
fpbs.write('\n')
return True
def write_pbs_head_serial(fpbs, job_name, nodes=1):
assert np.isclose(nodes, 1)
fpbs.write('#! /bin/bash\n')
fpbs.write('#PBS -M <EMAIL>\n')
fpbs.write('#PBS -l nodes=%d:ppn=1\n' % nodes)
fpbs.write('#PBS -l walltime=1000:00:00\n')
fpbs.write('#PBS -q serial\n')
fpbs.write('#PBS -N %s\n' % job_name)
fpbs.write('\n')
fpbs.write('cd $PBS_O_WORKDIR\n')
fpbs.write('\n')
return True
def write_pbs_head_q03(fpbs, job_name, nodes=1):
fpbs.write('#! /bin/bash\n')
fpbs.write('#PBS -M <EMAIL>\n')
fpbs.write('#PBS -l nodes=%d:ppn=24\n' % nodes)
fpbs.write('#PBS -l walltime=72:00:00\n')
fpbs.write('#PBS -q q03\n')
fpbs.write('#PBS -N %s\n' % job_name)
fpbs.write('\n')
fpbs.write('cd $PBS_O_WORKDIR\n')
fpbs.write('\n')
return True
def write_pbs_head_newturb(fpbs, job_name, nodes=1):
fpbs.write('#!/bin/sh\n')
fpbs.write('#PBS -M <EMAIL>\n')
fpbs.write('#PBS -l nodes=%d:ppn=24\n' % nodes)
fpbs.write('#PBS -l walltime=24:00:00\n')
fpbs.write('#PBS -N %s\n' % job_name)
fpbs.write('\n')
fpbs.write('cd $PBS_O_WORKDIR\n')
fpbs.write('source /storage/zhang/.bashrc\n')
fpbs.write('\n')
return True
def write_pbs_head_haiguang(fpbs, job_name, nodes=1):
fpbs.write('#!/bin/sh\n')
fpbs.write('# run the job in the main node directly. ')
fpbs.write('\n')
return True
def _write_main_run_top(frun, main_hostname='ln0'):
frun.write('t_dir=$PWD \n')
frun.write('bash_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" \n\n')
# check if the script run on the main node.
frun.write('if [ $(hostname) == \'%s\' ]; then\n' % main_hostname)
frun.write(' echo \'this node is %s. \' \n' % main_hostname)
frun.write('else \n')
frun.write(' echo \'please run in the node %s. \' \n' % main_hostname)
frun.write(' exit \n')
frun.write('fi \n\n')
return True
def write_main_run(write_pbs_head, job_dir, ncase):
tname = os.path.join(job_dir, 'main_run.pbs')
print('ncase =', ncase)
print('write parallel pbs file to %s' % tname)
with open(tname, 'w') as fpbs:
write_pbs_head(fpbs, job_dir, nodes=ncase)
fpbs.write('seq 0 %d | parallel -j 1 -u --sshloginfile $PBS_NODEFILE \\\n' % (ncase - 1))
fpbs.write('\"cd $PWD;echo $PWD;bash myscript.csh {}\"')
return True
def write_main_run_comm_list(comm_list, txt_list, use_node, njob_node, job_dir,
write_pbs_head000=write_pbs_head, n_job_pbs=None,
random_order=False, ):
def _parallel_pbs_ln0(n_use_comm, njob_node, csh_name):
t2 = 'seq 0 %d | parallel -j %d -u ' % (n_use_comm - 1, njob_node)
t2 = t2 + ' --sshloginfile $PBS_NODEFILE --sshdelay 0.1 '
t2 = t2 + ' "cd $PWD; echo $PWD; echo; bash %s {} true " \n\n ' % csh_name
return t2
def _parallel_pbs_newturb(n_use_comm, njob_node, csh_name):
t2 = 'seq 0 %d | parallel -j %d -u ' % (n_use_comm - 1, njob_node)
t2 = t2 + ' --sshdelay 0.1 '
t2 = t2 + ' "cd $PWD; echo $PWD; echo; bash %s {} true " \n\n ' % csh_name
return t2
PWD = os.getcwd()
comm_list = np.array(comm_list)
txt_list = np.array(txt_list)
t_path = os.path.join(PWD, job_dir)
if not os.path.exists(t_path):
os.makedirs(t_path)
print('make folder %s' % t_path)
else:
print('exist folder %s' % t_path)
n_case = len(comm_list)
if n_job_pbs is None:
n_job_pbs = use_node * njob_node
n_pbs = (n_case // n_job_pbs) + np.sign(n_case % n_job_pbs)
if random_order:
tidx = np.arange(n_case)
np.random.shuffle(tidx)
comm_list = comm_list[tidx]
txt_list = txt_list[tidx]
# generate comm_list.sh
t_name0 = os.path.join(t_path, 'comm_list.sh')
with open(t_name0, 'w') as fcomm:
for i0, ts, f in zip(range(n_case), comm_list, txt_list):
fcomm.write('%s > %s.txt 2> %s.err \n' % (ts, f, f))
fcomm.write('echo \'%d / %d, %s start.\' \n\n' % (i0 + 1, n_case, f))
assert callable(write_pbs_head000)
if write_pbs_head000 is write_pbs_head:
main_hostname = 'ln0'
_parallel_pbs_use = _parallel_pbs_ln0
run_fun = 'qsub %s\n\n'
# elif write_pbs_head000 is write_pbs_head_q03:
# main_hostname = 'ln0'
# _parallel_pbs_use = _parallel_pbs_ln0
# run_fun = 'qsub %s\n\n'
elif write_pbs_head000 is write_pbs_head_dbg:
main_hostname = 'ln0'
_parallel_pbs_use = _parallel_pbs_ln0
run_fun = 'qsub %s\n\n'
elif write_pbs_head000 is write_pbs_head_q03:
main_hostname = 'ln0'
_parallel_pbs_use = _parallel_pbs_ln0
run_fun = 'qsub %s\n\n'
elif write_pbs_head000 is write_pbs_head_serial:
main_hostname = 'ln0'
_parallel_pbs_use = _parallel_pbs_ln0
run_fun = 'qsub %s\n\n'
elif write_pbs_head000 is write_pbs_head_newturb:
main_hostname = 'newturb'
_parallel_pbs_use = _parallel_pbs_newturb
run_fun = 'qsub %s\n\n'
assert np.isclose(use_node, 1)
elif write_pbs_head000 is write_pbs_head_haiguang:
main_hostname = 'bogon'
_parallel_pbs_use = _parallel_pbs_newturb
run_fun = 'cd $bash_dir \nnohup bash %s &\ncd $t_dir\n\n'
assert np.isclose(use_node, 1)
else:
raise ValueError('wrong write_pbs_head000')
# generate .pbs file and .csh file
t_name0 = os.path.join(t_path, 'main_run.sh')
with open(t_name0, 'w') as frun:
_write_main_run_top(frun, main_hostname=main_hostname)
# noinspection PyTypeChecker
for t1 in np.arange(n_pbs, dtype='int'):
use_comm = comm_list[t1 * n_job_pbs: np.min(((t1 + 1) * n_job_pbs, n_case))]
use_txt = txt_list[t1 * n_job_pbs: np.min(((t1 + 1) * n_job_pbs, n_case))]
n_use_comm = len(use_comm)
tnode = np.min((use_node, np.ceil(n_use_comm / njob_node)))
pbs_name = 'run%03d.pbs' % t1
csh_name = 'run%03d.csh' % t1
# generate .pbs file
t_name = os.path.join(t_path, pbs_name)
with open(t_name, 'w') as fpbs:
# pbs_head = '%s_%s' % (job_dir, pbs_name)
pbs_head = '%s_%d' % (job_dir, t1)
write_pbs_head000(fpbs, pbs_head, nodes=tnode)
fpbs.write(_parallel_pbs_use(n_use_comm, njob_node, csh_name))
# generate .csh file for submit
t_name = os.path.join(t_path, csh_name)
with open(t_name, 'w') as fcsh:
fcsh.write('#!/bin/csh -fe \n\n')
t2 = 'comm_list=('
for t3 in use_comm:
t2 = t2 + '"%s" ' % t3
t2 = t2 + ') \n\n'
fcsh.write(t2)
t2 = 'txt_list=('
for t3 in use_txt:
t2 = t2 + '"%s" ' % t3
t2 = t2 + ') \n\n'
fcsh.write(t2)
fcsh.write('echo ${comm_list[$1]} \'>\' ${txt_list[$1]}.txt'
' \'2>\' ${txt_list[$1]}.err \n')
fcsh.write('echo $(expr $1 + 1) / %d, ${txt_list[$1]} start. \n' % n_case)
fcsh.write('echo \n')
fcsh.write('if [ ${2:-false} = true ]; then \n')
fcsh.write(' ${comm_list[$1]} > ${txt_list[$1]}.txt 2> ${txt_list[$1]}.err \n')
fcsh.write('fi \n\n')
frun.write(run_fun % pbs_name)
frun.write('\n')
print('input %d cases.' % n_case)
print('generate %d pbs files in total.' % n_pbs)
if random_order:
print(' --->>random order mode is ON. ')
print('Command of first case is:')
print(comm_list[0])
return True
def write_main_run_local(comm_list, njob_node, job_dir, random_order=False, ):
PWD = os.getcwd()
comm_list = np.array(comm_list)
n_comm = comm_list.size
sh_name = 'main_run.sh'
pbs_name = 'pbs.main_run'
csh_name = 'csh.main_run'
t_path = os.path.join(PWD, job_dir)
if not os.path.exists(t_path):
os.makedirs(t_path)
print('make folder %s' % t_path)
else:
print('exist folder %s' % t_path)
if random_order:
tidx = np.arange(n_comm)
np.random.shuffle(tidx)
comm_list = comm_list[tidx]
# generate comm_list.sh
t_name0 = os.path.join(t_path, 'comm_list.sh')
with open(t_name0, 'w') as fcomm:
for i0, ts in enumerate(comm_list):
fcomm.write('%s \n' % ts)
fcomm.write('echo \'%d / %d start.\' \n\n' % (i0 + 1, n_comm))
# generate .pbs file
t_name = os.path.join(t_path, pbs_name)
with open(t_name, 'w') as fpbs:
fpbs.write('#!/bin/sh\n')
fpbs.write('# run the job locally. \n')
fpbs.write('echo start job at $(date) \n')
t2 = 'seq 0 %d | parallel -j %d -u ' % (n_comm - 1, njob_node)
t2 = t2 + ' --sshdelay 0.1 '
t2 = t2 + ' "cd $PWD; echo $PWD; echo; bash %s {} true " \n ' % csh_name
fpbs.write(t2)
fpbs.write('echo finish job at $(date) \n')
fpbs.write('\n')
# generate .csh file
t_name = os.path.join(t_path, csh_name)
with open(t_name, 'w') as fcsh:
fcsh.write('#!/bin/csh -fe \n\n')
t2 = 'comm_list=('
for t3 in comm_list:
t2 = t2 + '"%s" ' % t3
t2 = t2 + ') \n\n'
fcsh.write(t2)
fcsh.write('echo ${comm_list[$1]} \n')
fcsh.write('echo $(expr $1 + 1) / %d start. \n' % n_comm)
fcsh.write('echo \n')
fcsh.write('if [ ${2:-false} = true ]; then \n')
fcsh.write(' ${comm_list[$1]} \n')
fcsh.write('fi \n\n')
# generate .sh file
t_name = os.path.join(t_path, sh_name)
with open(t_name, 'w') as fsh:
fsh.write('t_dir=$PWD \n ')
fsh.write('bash_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" '
'>/dev/null 2>&1 && pwd )" \n ')
fsh.write('echo $bash_dir \n ')
fsh.write('cd $bash_dir \n ')
nohup_name = 'nohup_%s.out' % '$(date +"%Y%m%d_%H%M%S")'
fsh.write('nohup bash %s > %s 2>&1 & \n' % (pbs_name, nohup_name))
fsh.write('echo Try the command to see the output information. \n ')
fsh.write('echo tail -f %s \n ' % nohup_name)
fsh.write('cd $t_dir \n ')
fsh.write('\n ')
print('Input %d cases. ' % n_comm)
print('Random order mode is %s. ' % random_order)
print('Command of first case is:')
print(comm_list[0])
return True
def write_myscript(job_name_list, job_dir):
t1 = ' '.join(['\"%s\"' % job_name for job_name in job_name_list])
tname = os.path.join(job_dir, 'myscript.csh')
print('write myscript csh file to %s' % tname)
with open(tname, 'w') as fcsh:
fcsh.write('#!/bin/sh -fe\n')
fcsh.write('job_name_list=(%s)\n' % t1)
fcsh.write('\n')
fcsh.write('echo ${job_name_list[$1]}\n')
fcsh.write('cd ${job_name_list[$1]}\n')
fcsh.write('bash ${job_name_list[$1]}.sh\n')
return True
def set_axes_equal(ax, rad_fct=0.5):
figsize = ax.figure.get_size_inches()
l1, l2 = ax.get_position().bounds[2:] * figsize
lmax = np.max((l1, l2))
if ax.name == "3d":
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = rad_fct * np.max(np.abs(limits[:, 1] - limits[:, 0]))
radius_x = l1 / lmax * radius
radius_y = l1 / lmax * radius
radius_z = l2 / lmax * radius
ax.set_xlim3d([origin[0] - radius_x, origin[0] + radius_x])
ax.set_ylim3d([origin[1] - radius_y, origin[1] + radius_y])
ax.set_zlim3d([origin[2] - radius_z, origin[2] + radius_z])
else:
limits = np.array([
ax.get_xlim(),
ax.get_ylim(),
])
origin = np.mean(limits, axis=1)
radius = rad_fct * np.max(np.abs(limits[:, 1] - limits[:, 0]))
radius_x = l1 / lmax * radius
radius_y = l2 / lmax * radius
ax.set_xlim([origin[0] - radius_x, origin[0] + radius_x])
ax.set_ylim([origin[1] - radius_y, origin[1] + radius_y])
return ax
# Topics: line, color, LineCollection, cmap, colorline, codex
'''
Defines a function colorline that draws a (multi-)colored 2D line with coordinates x and y.
The color is taken from optional data in z, and creates a LineCollection.
z can be:
- empty, in which case a default coloring will be used based on the position along the input arrays
- a single number, for a uniform color [this can also be accomplished with the usual plt.plot]
- an array of the length of at least the same length as x, to color according to this data
- an array of a smaller length, in which case the colors are repeated along the curve
The function colorline returns the LineCollection created, which can be modified afterwards.
See also: plt.streamplot
'''
# Data manipulation:
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates, in the correct format for LineCollection:
an array of the form numlines x (points per line) x 2 (x and y) array
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
# Interface to LineCollection:
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), ax=None, norm=plt.Normalize(0.0, 1.0),
label='', linewidth=3, alpha=1.0):
'''
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
'''
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, x.size)
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.patch.set_facecolor('white')
else:
plt.sca(ax)
# fig = plt.gcf()
segments = make_segments(x, y)
lc = LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha)
ax.add_collection(lc)
return lc
def colorline3d(tnodes, tcl, quiver_length_fct=None, clb_title='', show_project=False, tu=None,
nu_show=50, return_fig=False, ax0=None, tcl_lim=None, tcl_fontsize=10,
cmap=plt.get_cmap('jet')):
if ax0 is None:
fig = plt.figure(figsize=(8, 8), dpi=100)
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(1, 1, 1, projection='3d')
else:
assert hasattr(ax0, 'get_zlim')
plt.sca(ax0)
fig = plt.gcf()
if tcl_lim is None:
tcl_lim = (tcl.min(), tcl.max())
ax0.plot(tnodes[:, 0], tnodes[:, 1], tnodes[:, 2]).pop(0).remove()
cax1 = inset_axes(ax0, width="80%", height="5%", bbox_to_anchor=(0.1, 0.1, 0.8, 1),
loc=9, bbox_transform=ax0.transAxes, borderpad=0, )
norm = plt.Normalize(*tcl_lim)
cmap = cmap
# Create the 3D-line collection object
points = tnodes.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(tcl)
ax0.add_collection3d(lc, zs=points[:, :, 2].flatten(), zdir='z')
clb = fig.colorbar(lc, cax=cax1, orientation="horizontal")
clb.ax.tick_params(labelsize=tcl_fontsize)
clb.ax.set_title(clb_title)
clb_ticks = np.linspace(*tcl_lim, 5)
clb.set_ticks(clb_ticks)
clb.ax.set_yticklabels(clb_ticks)
set_axes_equal(ax0)
if show_project:
ax0.plot(np.ones_like(tnodes[:, 0]) * ax0.get_xlim()[0], tnodes[:, 1], tnodes[:, 2], '--k',
alpha=0.2)
ax0.plot(tnodes[:, 0], np.ones_like(tnodes[:, 1]) * ax0.get_ylim()[1], tnodes[:, 2], '--k',
alpha=0.2)
ax0.plot(tnodes[:, 0], tnodes[:, 1], np.ones_like(tnodes[:, 0]) * ax0.get_zlim()[0], '--k',
alpha=0.2)
if not tu is None:
assert not quiver_length_fct is None
t_stp = np.max((1, tu.shape[0] // nu_show))
color_len = tnodes[::t_stp, 0].size
quiver_length = np.max(tnodes.max(axis=0) - tnodes.min(axis=0)) * quiver_length_fct
# colors = [cmap(1.0 * i / color_len) for i in range(color_len)]
# ax0.quiver(tnodes[::t_stp, 0], tnodes[::t_stp, 1], tnodes[::t_stp, 2],
# tu[::t_stp, 0], tu[::t_stp, 1], tu[::t_stp, 2],
# length=quiver_length, arrow_length_ratio=0.2, pivot='tail', normalize=False,
# colors=colors)
ax0.quiver(tnodes[::t_stp, 0], tnodes[::t_stp, 1], tnodes[::t_stp, 2],
tu[::t_stp, 0], tu[::t_stp, 1], tu[::t_stp, 2],
length=quiver_length, arrow_length_ratio=0.2, pivot='tail', normalize=False,
colors='k')
plt.sca(ax0)
ax0.set_xlabel('$X_1$')
ax0.set_ylabel('$X_2$')
ax0.set_zlabel('$X_3$')
# for spine in ax0.spines.values():
# spine.set_visible(False)
# plt.tight_layout()
t1 = fig if return_fig else True
return t1
def add_inset(ax0, rect, *args, **kwargs):
box = ax0.get_position()
xlim = ax0.get_xlim()
ylim = ax0.get_ylim()
inptx = interpolate.interp1d(xlim, (0, box.x1 - box.x0))
inpty = interpolate.interp1d(ylim, (0, box.y1 - box.y0))
left = inptx(rect[0]) + box.x0
bottom = inpty(rect[1]) + box.y0
width = inptx(rect[2] + rect[0]) - inptx(rect[0])
height = inpty(rect[3] + rect[1]) - inpty(rect[1])
new_rect = np.hstack((left, bottom, width, height))
return ax0.figure.add_axes(new_rect, *args, **kwargs)
def multicolor_ylabel(ax, list_of_strings, list_of_colors, axis='x', anchorpad=0, **kw):
"""this function creates axes labels with multiple colors
ax specifies the axes object where the labels should be drawn
list_of_strings is a list of all of the text items
list_if_colors is a corresponding list of colors for the strings
axis='x', 'y', or 'both' and specifies which label(s) should be drawn"""
# x-axis label
if axis == 'x' or axis == 'both':
boxes = [TextArea(text, textprops=dict(color=color, ha='left', va='bottom', **kw))
for text, color in zip(list_of_strings, list_of_colors)]
xbox = HPacker(children=boxes, align="center", pad=0, sep=5)
anchored_xbox = AnchoredOffsetbox(loc='lower left', child=xbox, pad=anchorpad,
frameon=False, bbox_to_anchor=(0.2, -0.09),
bbox_transform=ax.transAxes, borderpad=0.)
ax.add_artist(anchored_xbox)
# y-axis label
if axis == 'y' or axis == 'both':
boxes = [TextArea(text, textprops=dict(color=color, ha='left', va='bottom',
rotation=90, **kw))
for text, color in zip(list_of_strings[::-1], list_of_colors)]
ybox = VPacker(children=boxes, align="center", pad=0, sep=5)
anchored_ybox = AnchoredOffsetbox(loc='lower left', child=ybox, pad=anchorpad,
frameon=False, bbox_to_anchor=(-0.105, 0.25),
bbox_transform=ax.transAxes, borderpad=0.)
ax.add_artist(anchored_ybox)
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
view_interval = self.axis.get_view_interval()
if view_interval[-1] > majorlocs[-1]:
majorlocs = np.hstack((majorlocs, view_interval[-1]))
assert np.all(majorlocs >= 0)
if np.isclose(majorlocs[0], 0):
majorlocs = majorlocs[1:]
# # iterate through minor locs, handle the lowest part, old version
# minorlocs = []
# for i in range(1, len(majorlocs)):
# majorstep = majorlocs[i] - majorlocs[i - 1]
# if abs(majorlocs[i - 1] + majorstep / 2) < self.linthresh:
# ndivs = 10
# else:
# ndivs = 9
# minorstep = majorstep / ndivs
# locs = np.arange(majorlocs[i - 1], majorlocs[i], minorstep)[1:]
# minorlocs.extend(locs)
# iterate through minor locs, handle the lowest part, my version
minorlocs = []
for i in range(1, len(majorlocs)):
tloc = majorlocs[i - 1]
tgap = majorlocs[i] - majorlocs[i - 1]
tstp = majorlocs[i - 1] * self.linthresh * 10
while tloc < tgap and not np.isclose(tloc, tgap):
tloc = tloc + tstp
minorlocs.append(tloc)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
# user define color norm
class midPowerNorm(Normalize):
def __init__(self, gamma=10, midpoint=1, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
assert gamma > 1
self.gamma = gamma
self.midpoint = midpoint
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
midpoint = self.midpoint
logmid = np.log(midpoint) / np.log(gamma)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask =
|
np.ma.getmask(result)
|
numpy.ma.getmask
|
import numpy as np
import tensorflow as tf
#定义函数:将中心点、高、宽坐标 转化为[x0, y0, x1, y1]坐标形式
def detections_boxes(detections):
center_x, center_y, width, height, attrs = tf.split(detections,
[1, 1, 1, 1, -1],
axis=-1)
w2 = width / 2
h2 = height / 2
x0 = center_x - w2
y0 = center_y - h2
x1 = center_x + w2
y1 = center_y + h2
boxes = tf.concat([x0, y0, x1, y1], axis=-1)
detections = tf.concat([boxes, attrs], axis=-1)
return detections
#定义函数计算两个框的内部重叠情况(IOU)box1,box2为左上、右下的坐标[x0, y0, x1, x2]
def _iou(box1, box2):
b1_x0, b1_y0, b1_x1, b1_y1 = box1
b2_x0, b2_y0, b2_x1, b2_y1 = box2
int_x0 = max(b1_x0, b2_x0)
int_y0 = max(b1_y0, b2_y0)
int_x1 = min(b1_x1, b2_x1)
int_y1 = min(b1_y1, b2_y1)
int_area = (int_x1 - int_x0) * (int_y1 - int_y0)
b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0)
b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0)
#分母加个1e-05,避免除数为 0
iou = int_area / (b1_area + b2_area - int_area + 1e-05)
return iou
#使用NMS方法,对结果去重
# 或者调用 tf.image.non_max_suppression
def non_max_suppression(predictions_with_boxes,
confidence_threshold,
iou_threshold=0.4):
conf_mask = np.expand_dims(
(predictions_with_boxes[:, :, 4] > confidence_threshold), -1)
predictions = predictions_with_boxes * conf_mask
result = {}
for i, image_pred in enumerate(predictions):
shape = image_pred.shape
print("shape1", shape)
non_zero_idxs = np.nonzero(image_pred)
image_pred = image_pred[non_zero_idxs[0]]
print("shape2", image_pred.shape)
image_pred = image_pred.reshape(-1, shape[-1])
bbox_attrs = image_pred[:, :5]
classes = image_pred[:, 5:]
classes = np.argmax(classes, axis=-1)
unique_classes = list(set(classes.reshape(-1)))
for cls in unique_classes:
cls_mask = classes == cls
cls_boxes = bbox_attrs[np.nonzero(cls_mask)]
cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]
cls_scores = cls_boxes[:, -1]
cls_boxes = cls_boxes[:, :-1]
while len(cls_boxes) > 0:
box = cls_boxes[0]
score = cls_scores[0]
if not cls in result:
result[cls] = []
result[cls].append((box, score))
cls_boxes = cls_boxes[1:]
ious = np.array([_iou(box, x) for x in cls_boxes])
iou_mask = ious < iou_threshold
cls_boxes = cls_boxes[
|
np.nonzero(iou_mask)
|
numpy.nonzero
|
"""Serialization Unit Tests"""
import numpy as np
from pytest import raises
from proxystore.serialize import serialize, deserialize
from proxystore.serialize import SerializationError
def test_serialization() -> None:
"""Test serialization"""
x = b'test string'
b = serialize(x)
assert deserialize(b) == x
x = 'test string'
b = serialize(x)
assert deserialize(b) == x
x =
|
np.array([1, 2, 3])
|
numpy.array
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. *
|
np.sqrt(2.)
|
numpy.sqrt
|
import matplotlib.pyplot as plt
import modelmiezelb.arg_inel_mieze_model as arg
import os
###############################################################################
from numpy import linspace, tile, trapz, all, isclose, arange, ones, atleast_2d, where
from pprint import pprint
from time import time
###############################################################################
from modelmiezelb.correction import CorrectionFactor, DetectorEfficiencyCorrectionFactor, EnergyCutOffCorrectionFactor
from modelmiezelb.lineshape import LorentzianLine, F_cLine, F_ILine
from modelmiezelb.sqe_model import SqE, UPPER_INTEGRATION_LIMIT
###############################################################################
from modelmiezelb.utils.util import detector_efficiency, triangle_distribution, energy_from_lambda, energy_lambda_nrange
###############################################################################
from pprint import pprint
###############################################################################
# Path quarrels
testdir = os.path.dirname(os.path.abspath(__file__))
def test_CorrectionFactor_instantiation():
corrf1 = CorrectionFactor(None)
print(f"Instance required calculation parameters {corrf1._required_params}")
print(f"Class required calculation parameters {CorrectionFactor._required_params}")
#------------------------------------------------------------------------------
def test_DetectorEfficiencyCorrectionFactor():
# We need some lines
L1 = LorentzianLine("Lorentzian1", (-5.0, 5.0), x0=0.0, width=0.4, c=0.0, weight=2)
L2 = LorentzianLine(name="Lorentzian2", domain=(-5.0, 5.0), x0=-1.0, width=0.4, c=0.02, weight=1)
# Contruct a SqE model
sqe = SqE(lines=(L1, L2), lam=6.0, dlam=0.12, lSD=3.43, T=20)
# Instantiate a detector efficiency corr factor
decf = DetectorEfficiencyCorrectionFactor(sqe)
ne = 10
nlam = 5
lam = 6.0 * linspace(1-0.12*1.01, 1+0.12*1.01, nlam)
lams = tile(lam, (ne, 1))
a = -0.99999 * energy_from_lambda(lam)
b = 15.0 + a
es = linspace(a, b, ne)
deteff = detector_efficiency(es, lams, 1)
tria = triangle_distribution(lams, 6.0, 0.12)
print("Triangular wavelenght distr.: ", tria)
print("Triangular wavelength distr. shape: ", tria.shape)
print("Det. eff. values: ", deteff)
print("Det. eff. values shape: :", deteff.shape)
sqevals = sqe(es)
print("Manual mult.: ", sqevals * deteff * tria)
print("Class result: ", decf(es, lams))
print("Are manual and deteffcorrfac identical?: ", all((sqevals * deteff * tria) == decf(es, lams)))
#------------------------------------------------------------------------------
def test_DetectorEfficiencyCorrectionFactor_compare_with_arg():
# We need some lines
L1 = LorentzianLine(name="Lorentzian1", domain=(-16.0, 16.0), x0=-1.0, width=0.4, c=0.0, weight=1)
# Contruct a SqE model
sqe = SqE(lines=(L1,), lam=6.0, dlam=0.12, lSD=3.43, T=20)
# Instantiate a detector efficiency corr factor
decf = DetectorEfficiencyCorrectionFactor(sqe)
ne = 15000
nlam = 20
lam = 6.0 * linspace(1-0.12*1.01, 1+0.12*1.01, nlam)
lams = tile(lam, (ne, 1))
a = -0.99999 * energy_from_lambda(lam)
b = 15.0 + a
es = linspace(a, b, ne)
test_decf_val = arg.DetFac_Eint_lamInt(
arg.fqe_I,
-1.0,
0.4,
15000,
20,
6.0,
0.12,
1,
0.0,
1.0,
20,
0.5,
0.00001,
350.
)
decf_val_man_int = decf(es, lams)
decf_val = decf.correction(es, lams)
print("arg res: ", test_decf_val)
print("class calc res manualy integrated: ", decf_val_man_int)
print("class res: ", decf_val)
#------------------------------------------------------------------------------
def test_EnergyCutOffCorrectionFactor_vs_arg():
# We need some lines
L1 = LorentzianLine(name="Lorentzian1", domain=(-16.0, 16.0), x0=-1.0, width=0.4, c=0.0, weight=1)
# Contruct a SqE model
sqe = SqE(lines=(L1,), lam=6.0, dlam=0.12, lSD=3.43, T=20)
# Instantiate a energy cutoff corr factor
eccf = EnergyCutOffCorrectionFactor(sqe)
ne = 15000
nlam = 20
lam = 6.0 * linspace(1-0.12*1.01, 1+0.12*1.01, nlam)
lams = tile(lam, (ne, 1))
a = -0.99999 * energy_from_lambda(lam)
b = 15.0 + a
es = linspace(a, b, ne)
test_eccf_vals = arg.CutFac_Eint(
arg.lorentzian,
-1.0,
0.4,
15000,
20,
6.0,
0.12,
1,
0.0,
1.0,
20,
0.5,
0.00001,
350.
)
eccf_vals = eccf.correction(es, lams)
# print(test_eccf_vals.shape)
print(test_eccf_vals)
# print(eccf_vals.shape)
print(eccf_vals)
# print(isclose(eccf_vals, test_eccf_vals, atol=0.01))
# argsqe = arg.SvqE(arg.lorentzian,
# es[::50,0],
# -1.0,
# 0.4,
# 0.0,
# 1.0,
# 6.0,
# 20,
# 0.000001,
# 0.1,
# 350.)
# print(argsqe)
# plt.plot(es[::50,0], sqe(es[::50,0]))
# plt.plot(es[::50,0], argsqe)
# plt.show()
#------------------------------------------------------------------------------
def test_correctionFactor_dimensionality():
# We need some lines
L1 = LorentzianLine(name="Lorentzian1", domain=(-16.0, 16.0), x0=-1.0, width=0.4, c=0.0, weight=1)
# Contruct a SqE model
sqe = SqE(lines=(L1,), lam=6.0, dlam=0.12, lSD=3.43, T=20)
# Instantiate a detector efficiency corr factor
decf = DetectorEfficiencyCorrectionFactor(sqe)
# Instantiate a energy cutoff corr factor
eccf = EnergyCutOffCorrectionFactor(sqe)
ne = 15
nlam = 5
lam = 6.0 * linspace(1-0.12*1.01, 1+0.12*1.01, nlam)
lams = tile(lam, (ne, 1))
a = -0.99999 * energy_from_lambda(lam)
b = 15.0 + a
es = linspace(a, b, ne)
print(lams)
print(es)
print(decf.calc(es, lams))
print(eccf.calc(es, lams))
#------------------------------------------------------------------------------
def test_EnergyCutoffCorrectionFactor():
# We need some lines
L1 = LorentzianLine("Lorentzian1", (-5.0, 5.0), x0=0.0, width=0.4, c=0.0, weight=2)
L2 = LorentzianLine(name="Lorentzian2", domain=(-5.0, 5.0), x0=-1.0, width=0.4, c=0.02, weight=1)
# Contruct a SqE model
sqe = SqE(lines=(L1, L2), lam=6.0, dlam=0.12, lSD=3.43, T=20)
new_domain = (-1 * energy_from_lambda(6.0), UPPER_INTEGRATION_LIMIT)
sqe.update_domain(new_domain)
# init energycutoff
eccf = EnergyCutOffCorrectionFactor(sqe, ne=10000, nlam=20)
ne = 10000
nlam = 5
lam = 6.0 * linspace(1-0.12*1.01, 1+0.12*1.01, nlam)
lams = tile(lam, (ne, 1))
a = -0.99999 * energy_from_lambda(lam)
b = 15.0 + a
es = linspace(a, b, ne)
### Calculate the trapz integral over the S(q,E)
# Only over domain (interval length: 15 meV)
I_over_dom_only = trapz(sqe(es[:, 2]), es[:, 2])
print("Trapz integration over the domain.")
print(f"Interval {a[2]:.4f} - {b[2]:.4f} -> length {b[2]-a[2]:.4f} meV.")
print(f"#Steps = {ne}")
print(f"Integral value: {I_over_dom_only:.4f}")
# plt.plot(es[:,2], sqe(es[:,2]), label="Over domain only")
# plt.show()
# Beyond domain same array length
es_same_length = linspace(-UPPER_INTEGRATION_LIMIT, UPPER_INTEGRATION_LIMIT, ne)
I_beyond_dom_same_length = trapz(sqe(es_same_length), es_same_length)
print("\nTrapz integration beyond the domain with varrying stepsize.")
print(f"Interval {-UPPER_INTEGRATION_LIMIT} - {UPPER_INTEGRATION_LIMIT} -> length {30.0} meV.")
print(f"#Steps = {ne}")
print(f"Integral value: {I_beyond_dom_same_length:.4f}")
# plt.plot(es_same_length, sqe(es_same_length), ls="--", label="Beyond domain ne=10000")
# plt.show()
# Beyond domain same step size
es_same_stepsize =
|
arange(-UPPER_INTEGRATION_LIMIT, UPPER_INTEGRATION_LIMIT+0.001, 15e-3)
|
numpy.arange
|
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_true, assert_raises, assert_equal
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP, _ajd_pham
from mne.utils import requires_sklearn, slow_test
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
@slow_test
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[2:12:3] # subselect channels -> disable proj!
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=False)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
y = epochs.events[:, -1]
# Init
assert_raises(ValueError, CSP, n_components='foo')
for reg in ['foo', -0.1, 1.1]:
assert_raises(ValueError, CSP, reg=reg)
for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]:
CSP(reg=reg)
for cov_est in ['foo', None]:
assert_raises(ValueError, CSP, cov_est=cov_est)
for cov_est in ['concat', 'epoch']:
CSP(cov_est=cov_est)
n_components = 3
csp = CSP(n_components=n_components)
# Fit
csp.fit(epochs_data, epochs.events[:, -1])
assert_equal(len(csp.mean_), n_components)
assert_equal(len(csp.std_), n_components)
# Transform
X = csp.fit_transform(epochs_data, y)
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(sources, X)
# Test data exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs)
# Test plots
epochs.pick_types(meg='mag')
cmap = ('RdBu', True)
components = np.arange(n_components)
for plot in (csp.plot_patterns, csp.plot_filters):
plot(epochs.info, components=components, res=12, show=False, cmap=cmap)
# Test covariance estimation methods (results should be roughly equal)
np.random.seed(0)
csp_epochs = CSP(cov_est="epoch")
csp_epochs.fit(epochs_data, y)
for attr in ('filters_', 'patterns_'):
corr = np.corrcoef(getattr(csp, attr).ravel(),
getattr(csp_epochs, attr).ravel())[0, 1]
assert_true(corr >= 0.94)
# Test with more than 2 classes
epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks,
event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4),
baseline=(None, 0), proj=False, preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_channels = epochs_data.shape[1]
for cov_est in ['concat', 'epoch']:
csp = CSP(n_components=n_components, cov_est=cov_est)
csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
assert_equal(len(csp._classes), 4)
assert_array_equal(csp.filters_.shape, [n_channels, n_channels])
assert_array_equal(csp.patterns_.shape, [n_channels, n_channels])
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
@requires_sklearn
def test_csp_pipeline():
"""Test if CSP works in a pipeline
"""
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
csp = CSP(reg=1)
svc = SVC()
pipe = Pipeline([("CSP", csp), ("SVC", svc)])
pipe.set_params(CSP__reg=0.2)
assert_true(pipe.get_params()["CSP__reg"] == 0.2)
def test_ajd():
"""Test if Approximate joint diagonalization implementation obtains same
results as the Matlab implementation by <NAME>.
"""
# Generate a set of cavariances matrices for test purpose
n_times, n_channels = 10, 3
seed = np.random.RandomState(0)
diags = 2.0 + 0.1 * seed.randn(n_times, n_channels)
A = 2 * seed.rand(n_channels, n_channels) - 1
A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T
covmats = np.empty((n_times, n_channels, n_channels))
for i in range(n_times):
covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)
V, D = _ajd_pham(covmats)
# Results obtained with original matlab implementation
V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574],
[0.694689013234610, 0.775690358505945, -1.162043086446043],
[-0.592603135588066, -0.598996925696260, 1.009550086271192]]
|
assert_array_almost_equal(V, V_matlab)
|
numpy.testing.assert_array_almost_equal
|
import os
import numpy as np
def load_networks(data_path):
'''Get a list of paths for all the files inside data_path'''
networks_dir = []
for file in os.listdir(data_path):
networks_dir += [os.path.join(data_path, file)]
return networks_dir
def get_degree_distribution(degrees):
degrees = np.fromiter(dict(degrees).values(), dtype=int)
nodes, counts = np.unique(degrees, return_counts=True)
prob_dens = counts / np.sum(counts)
return nodes, prob_dens
# Analytic solutions to the model
def barabasi_sol(m, t_i, t):
"""
Returns the degree evolution of the Barabasi-Albert Model.
Parameters:
m : int = Number of initial edges
t_i : int = Time at which the node attached to the network with m edges.
t : int = Time vector for the range of time expected.
Returns:
ndarray = Degree evolution of the node attached at time t_i
"""
return m *
|
np.sqrt(t / t_i)
|
numpy.sqrt
|
"""Unit tests for raw_ships_io.py."""
import copy
import unittest
import numpy
from ml4tc.io import raw_ships_io
TOLERANCE = 1e-6
DISTANCES_METRES = numpy.array([-200, -100, -50, 0, 100, 150, 200], dtype=float)
DISTANCES_KM = numpy.array([-0.2, -0.1, -0.05, 0, 0.1, 0.15, 0.2])
TEMPERATURES_DECICELSIUS = numpy.array(
[-200, -100, -50, 0, 100, 150, 200], dtype=float
)
TEMPERATURES_KELVINS = numpy.array([
253.15, 263.15, 268.15, 273.15, 283.15, 288.15, 293.15
])
PRESSURES_1000MB_DEPARTURES_DECAPASCALS = numpy.array(
[-200, -100, -50, 0, 100, 150, 200], dtype=float
)
PRESSURES_PASCALS = numpy.array(
[98000, 99000, 99500, 100000, 101000, 101500, 102000], dtype=float
)
FORECAST_HOUR_LINE_5DAY = (
' -12 -6 0 6 12 18 24 30 36 42 48 54 60 66 '
'72 78 84 90 96 102 108 114 120 TIME'
)
HOUR_INDEX_TO_CHAR_INDICES_5DAY = {
0: numpy.array([1, 5], dtype=int),
1: numpy.array([6, 10], dtype=int),
2: numpy.array([11, 15], dtype=int),
3: numpy.array([16, 20], dtype=int),
4: numpy.array([21, 25], dtype=int),
5: numpy.array([26, 30], dtype=int),
6: numpy.array([31, 35], dtype=int),
7: numpy.array([36, 40], dtype=int),
8: numpy.array([41, 45], dtype=int),
9: numpy.array([46, 50], dtype=int),
10: numpy.array([51, 55], dtype=int),
11: numpy.array([56, 60], dtype=int),
12: numpy.array([61, 65], dtype=int),
13: numpy.array([66, 70], dtype=int),
14: numpy.array([71, 75], dtype=int),
15: numpy.array([76, 80], dtype=int),
16: numpy.array([81, 85], dtype=int),
17: numpy.array([86, 90], dtype=int),
18: numpy.array([91, 95], dtype=int),
19:
|
numpy.array([96, 100], dtype=int)
|
numpy.array
|
"""Calculate."""
# --- import --------------------------------------------------------------------------------------
import numpy as np
from .. import units as wt_units
# --- define --------------------------------------------------------------------------------------
__all__ = ["mono_resolution", "nm_width", "symmetric_sqrt"]
# --- functions -----------------------------------------------------------------------------------
def mono_resolution(grooves_per_mm, slit_width, focal_length, output_color, output_units="wn"):
"""Calculate the resolution of a monochromator.
Parameters
----------
grooves_per_mm : number
Grooves per millimeter.
slit_width : number
Slit width in microns.
focal_length : number
Focal length in mm.
output_color : number
Output color in nm.
output_units : string (optional)
Output units. Default is wn.
Returns
-------
float
Resolution.
"""
d_lambda = 1e6 * slit_width / (grooves_per_mm * focal_length) # nm
upper = output_color + d_lambda / 2 # nm
lower = output_color - d_lambda / 2 # nm
return abs(
wt_units.converter(upper, "nm", output_units)
- wt_units.converter(lower, "nm", output_units)
)
def nm_width(center, width, units="wn"):
"""Given a center and width, in energy units, get back a width in nm.
Parameters
----------
center : number
Center (in energy units).
width : number
Width (in energy units).
units : string (optional)
Input units. Default is wn.
Returns
-------
number
Width in nm.
"""
red = wt_units.converter(center - width / 2., units, "nm")
blue = wt_units.converter(center + width / 2., units, "nm")
return red - blue
def symmetric_sqrt(x, out=None):
"""Compute the 'symmetric' square root: sign(x) * sqrt(abs(x)).
Parameters
----------
x : array_like or number
Input array.
out : ndarray, None, or tuple of ndarray and None (optional)
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
np.ndarray
Symmetric square root of arr.
"""
factor =
|
np.sign(x)
|
numpy.sign
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
import os
import sys
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Embedding, LSTM, Bidirectional,Multiply
# Merge,
from keras.layers import BatchNormalization, merge, add
from keras.layers.core import Flatten, Reshape
from keras.layers.merge import Concatenate, concatenate, subtract, multiply
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.optimizers import Adam, RMSprop
import keras.backend.tensorflow_backend as KTF
import numpy as np
from tqdm import tqdm
from keras.layers import Input, CuDNNGRU, GRU
from numpy import linalg as LA
import scipy
#from sklearn.model_selection import KFold, ShuffleSplit
from keras import backend as K
import re
#from multiHead import SparseSelfAttention
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
config = tf.ConfigProto()
config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
sess = tf.Session(config=config)
KTF.set_session(sess)
# In[2]:
hidden_dim = 256 #256
from six.moves import cPickle as pickle #for performance
def save_dict(di_, filename_):
with open(filename_, 'wb') as f:
pickle.dump(di_, f)
def load_dict(filename_):
with open(filename_, 'rb') as f:
ret_di = pickle.load(f)
return ret_di
# In[ ]:
all_protein_seqs_emb = []
all_smiles_seqs_emb = []
EMB_NO = 12
for i in range(1,EMB_NO+1):
if i < 10:
embedding_no = '0'+str(i)
else:
embedding_no = i
protein_seqs_emb = load_dict('dataset/embedding256-12layers/atomwise_BindingDB-full_protein_maxlen1022_dim256-layer{}.pkl'.format(embedding_no))
smiles_seqs_emb = load_dict('dataset/DTADATA/embedding256-12layers/atomwise_BindingDB-full_smiles_maxlen100_dim256-layer{}.pkl'.format(embedding_no))
all_protein_seqs_emb.append(protein_seqs_emb)
all_smiles_seqs_emb.append(smiles_seqs_emb)
def dict_mean(all_emb):
sums = Counter()
counters = Counter()
for itemset in all_emb:
sums.update(itemset)
counters.update(itemset.keys())
ret = {x: sums[x]/counters[x] for x in sums.keys()}
return ret
from collections import Counter
protein_mean_emb = dict_mean(all_protein_seqs_emb)
smiles_mean_emb = dict_mean(all_smiles_seqs_emb)
# In[ ]:
def cindex_score(y_true, y_pred):
g = tf.subtract(tf.expand_dims(y_pred, -1), y_pred)
g = tf.cast(g == 0.0, tf.float32) * 0.5 + tf.cast(g > 0.0, tf.float32)
f = tf.subtract(tf.expand_dims(y_true, -1), y_true) > 0.0
f = tf.matrix_band_part(tf.cast(f, tf.float32), -1, 0)
g = tf.reduce_sum(tf.multiply(g, f))
f = tf.reduce_sum(f)
return tf.where(tf.equal(g, 0), 0.0, g/f) #select
# In[4]:
def load_emb_from_dict(emb_dict, key, max_len):
X = np.zeros(( max_len,hidden_dim ))
emb = emb_dict[key]
emb_shape = emb.shape[0]
if emb_shape > max_len:
X = emb[:max_len]
else:
X[:emb_shape,:] = emb
return X
import keras
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, prots, drugs, Y, batch_size=256):
'Initialization'
self.batch_size = batch_size
self.prots = prots
self.drugs = drugs
self.Y = Y
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.prots) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, y = self.__data_generation(indexes)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.prots))
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
input_list = []
X_drug = np.zeros((self.batch_size, smilen,hidden_dim))
X_prot_seq = np.zeros((self.batch_size, seq_len,hidden_dim))
for i, ID in enumerate(list_IDs_temp):
X_drug[i] = load_emb_from_dict(smiles_mean_emb, self.drugs[ID], smilen)
X_prot_seq[i] = load_emb_from_dict(protein_mean_emb, self.prots[ID], seq_len)
input_list.append(X_drug)
input_list.append(X_prot_seq)
y = np.zeros((self.batch_size))
# Generate data
for i, ID in enumerate(list_IDs_temp):
y[i] = self.Y[ID]
return input_list , y
# In[ ]:
def Highway(value, n_layers, activation="tanh", gate_bias=0):
""" Highway layers:
a minus bias means the network is biased towards carry behavior in the initial stages"""
dim = K.int_shape(value)[-1]
bias = keras.initializers.Constant(gate_bias)
for i in range(n_layers):
T_gate = Dense(units=dim, bias_initializer=bias, activation="sigmoid")(value)
C_gate = Lambda(lambda x: 1.0 - x, output_shape=(dim,))(T_gate)
transform = Dense(units=dim, activation=activation)(value)
transform_gated = Multiply()([T_gate, transform])
carry_gated = Multiply()([C_gate, value])
value = Add()([transform_gated, carry_gated])
return value
# In[7]:
#from keras_radam import RAdam
#from keras_lookahead import Lookahead
from keras.layers import Lambda,Add, CuDNNGRU,TimeDistributed, Bidirectional,Softmax
from keras import regularizers
from keras.regularizers import l2
import tensorflow as tf
from keras import regularizers
from sklearn.model_selection import KFold, ShuffleSplit
smilen = 100
seq_len = 1000
# Squeeze and Excitation
def se_block(input, channels, r=8):
# Squeeze
x = GlobalAveragePooling1D()(input)
# Excitation
x = Dense(channels//r, activation="relu")(x)
x = Dense(channels, activation="sigmoid")(x)
return Multiply()([input, x])
def coeff_fun_prot(x):
import tensorflow as tf
import keras
tmp_a_1=tf.keras.backend.mean(x[0],axis=-1,keepdims=True)
tmp_a_1=tf.nn.softmax(tmp_a_1)
tmp=tf.tile(tmp_a_1,(1,1,keras.backend.int_shape(x[1])[2]))
return tf.multiply(x[1],tmp)
def att_func(x):
import tensorflow as tf
import keras
tmp_a_2=tf.keras.backend.permute_dimensions(x[1],(0,2,1))
mean_all=tf.keras.backend.sigmoid(tf.keras.backend.batch_dot(tf.keras.backend.mean(x[0],axis=1,keepdims=True),tf.keras.backend.mean(tmp_a_2,axis=-1,keepdims=True)))
tmp_a=tf.keras.backend.sigmoid(tf.keras.backend.batch_dot(x[0],tmp_a_2))*mean_all
#tmp_a=tf.nn.softmax(tmp_a)
return tmp_a
def coeff_fun_lig(x):
import tensorflow as tf
import keras
tmp1=tf.keras.backend.permute_dimensions(x[0],(0,2,1))
tmp_a_1=tf.keras.backend.mean(tmp1,axis=-1,keepdims=True)
tmp_a_1=tf.nn.softmax(tmp_a_1)
tmp=tf.tile(tmp_a_1,(1,1,keras.backend.int_shape(x[1])[2]))
return tf.multiply(x[1],tmp)
def conv_block(inputs, seblock, NUM_FILTERS,FILTER_LENGTH1):
conv1_encode = Conv1D(filters=NUM_FILTERS, kernel_size=FILTER_LENGTH1, activation='relu', padding='valid', strides=1)(inputs)
if seblock:
conv1_encode = se_block(conv1_encode,NUM_FILTERS)
conv2_encode = Conv1D(filters=NUM_FILTERS*2, kernel_size=FILTER_LENGTH1, activation='relu', padding='valid', strides=1)(conv1_encode)
if seblock:
conv2_encode = se_block(conv2_encode,NUM_FILTERS*2)
return conv2_encode
def fc_net(encode_interaction):
n_layers = 4
gate = Highway(n_layers = n_layers, value=encode_interaction, gate_bias=-2)
FC1 = Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(0.01))(gate)
FC2 = Dropout(0.4)(FC1)
FC2 = Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(0.01))(FC2)
FC2 = Dropout(0.4)(FC2)
FC2 = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.01))(FC2)
# FC2 = Dropout(0.3)(FC2)
# And add a logistic regression on top
predictions = Dense(1, kernel_initializer='normal')(FC2)
return predictions
def share_conv_block(protein_conv1_encode, protein_conv2_encode,comp_conv1_encode,comp_conv2_encode,prot_emb, comp_emb):
prot_emb = protein_conv1_encode(prot_emb)
prot_emb = protein_conv2_encode(prot_emb)
comp_emb = comp_conv1_encode(comp_emb)
comp_emb = comp_conv2_encode(comp_emb)
encode_protein = GlobalMaxPooling1D()(prot_emb)
encode_smiles = GlobalMaxPooling1D()(comp_emb)
encode_interaction = Concatenate()([encode_smiles, encode_protein])
predictions = fc_net(encode_interaction)
return predictions
def FFN(inputs):
encode = Dense(256, activation='relu')(inputs)
encode = Dense(256)(encode)
return encode
def build_model():
drugInput = Input(shape=(smilen,hidden_dim))
protInput = Input(shape=(seq_len,hidden_dim))
# share CNN
NUM_FILTERS = hidden_dim
FILTER_LENGTH1 = 3
n_layers = 4
seblock = True
# encode_prot = FFN(protInput)
# encode_smiles = FFN(drugInput)
# # att_tmp=TimeDistributed(Dense(hidden_dim,use_bias=False))(encode_prot)
# att=Lambda(att_func)([encode_prot,encode_smiles])
# encode_prot=Lambda(coeff_fun_prot)([att,encode_prot])
# encode_smiles=Lambda(coeff_fun_lig)([att,encode_smiles])
encode_smiles = conv_block(drugInput,seblock, NUM_FILTERS, 3)
encode_prot = conv_block(protInput,seblock, NUM_FILTERS, 3)
encode_smiles = GlobalMaxPooling1D()(encode_smiles)
encode_prot = GlobalMaxPooling1D()(encode_prot)
encode_interaction = Concatenate()([encode_smiles, encode_prot])
# gate = Highway(n_layers = n_layers, value=encode_interaction, gate_bias=-2)
predictions = fc_net(encode_interaction)
# And add a logistic regression on top
# predictions = Dense(1, kernel_initializer='normal')(gate) #OR no activation, rght now it's between 0-1, do I want this??? activation='sigmoid'
interactionModel = Model(inputs= [drugInput,protInput ], outputs=[predictions])
# adam = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False)
# ranger = Lookahead(RAdam())
interactionModel.compile(optimizer= 'adam', loss='mse', metrics=[cindex_score]) #, metrics=['cindex_score']
return interactionModel
model = build_model()
print(model.summary())
# In[ ]:
from keras.callbacks import ModelCheckpoint, EarlyStopping,ReduceLROnPlateau
from keras.callbacks import TensorBoard
#from sklearn.metrics import mean_squared_error
from rlscore.measure import cindex
from sklearn.model_selection import KFold
from emtrics import *
all_loss = np.zeros((5,1))
all_ci = np.zeros((5,1))
all_ci2 = np.zeros((5,1))
all_mse2 = np.zeros((5,1))
all_r = np.zeros((5,1))
all_aupr = np.zeros((5,1))
all_rm2 = np.zeros((5,1))
data_file = 'dataset/BindingDB-uniq-data.csv'
all_drug = []
all_protein = []
all_Y = []
with open(data_file, 'r') as f:
all_lines = f.readlines()
for line in all_lines:
row = line.rstrip().split(',')
all_drug.append(row[0])
all_protein.append(row[1])
all_Y.append(float(row[2]))
print(len(all_Y), len(all_drug), len(all_protein))
batch_size = 256
# set random_state as
kf = KFold(n_splits=5, shuffle=True)
for split, ( train_index, test_index) in enumerate( kf.split(all_Y)):
print(train_index,test_index )
train_protein_cv = np.array(all_protein)[train_index]
train_drug_cv = np.array(all_drug)[train_index]
train_Y_cv = np.array(all_Y)[train_index]
test_protein_cv = np.array(all_protein)[test_index]
test_drug_cv = np.array(all_drug)[test_index]
test_Y_cv = np.array(all_Y)[test_index]
train_size = train_protein_cv.shape[0]
valid_size = 0 #int(len(all_Y)/5.0) # 7051 #?
training_generator = DataGenerator( train_protein_cv[:train_size-valid_size], train_drug_cv[:train_size-valid_size],
np.array(train_Y_cv[:train_size-valid_size]),batch_size=batch_size)
# validate_generator = DataGenerator( train_protein_cv[train_size-valid_size:],
# train_drug_cv[train_size-valid_size:],
# np.array(train_Y_cv[train_size-valid_size:]),batch_size=batch_size)
save_model_name = 'models/bdbki-embedding-avg'+str(split)
model = build_model()
save_checkpoint = ModelCheckpoint(save_model_name, verbose=1,save_best_only=True, monitor='loss', save_weights_only=True, mode='min')
earlyStopping = EarlyStopping(monitor='loss', patience=25, verbose=1,mode='min')
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
model.fit_generator(generator=training_generator,
epochs = 500 ,
verbose=1,callbacks=[earlyStopping, save_checkpoint])
# model.fit_generator(generator=training_generator,
# epochs = 500 ,
# verbose=1, validation_data=validate_generator,
# callbacks=[earlyStopping, save_checkpoint])
input_list = []
X_drug = np.zeros((len(test_drug_cv), smilen,hidden_dim))
X_prot_seq = np.zeros((len(test_protein_cv), seq_len,hidden_dim))
for i in range(len(test_protein_cv)):
X_drug[i] = load_emb_from_dict(smiles_mean_emb, test_drug_cv[i], smilen)
X_prot_seq[i] = load_emb_from_dict(protein_mean_emb, test_protein_cv[i], seq_len)
input_list.append(X_drug)
input_list.append(X_prot_seq)
model.load_weights(save_model_name)
y_pred = model.predict(input_list)
test_Y_cv = np.float64(np.array(test_Y_cv))
y_pred = np.float64(np.array(y_pred))
ci2 = cindex(test_Y_cv, y_pred)
rm2 = get_rm2(test_Y_cv, y_pred[:,0])
mse = get_mse(test_Y_cv, y_pred[:,0])
pearson = get_pearson(test_Y_cv, y_pred[:,0])
spearman = get_spearman(test_Y_cv, y_pred[:,0])
rmse = get_rmse(test_Y_cv, y_pred[:,0])
aupr = get_aupr(test_Y_cv, y_pred[:,0], threshold=12.1)
print('rm2:', rm2)
print('mse:', mse)
print('pearson', pearson)
print('ci:', ci2)
print('AUPR', aupr)
all_mse2[split] = mse
all_r[split] = pearson
all_aupr[split] = aupr
all_rm2[split] = rm2
all_ci2[split] = ci2
# In[10]:
print('cindex:',np.mean(all_ci), np.std(all_ci))
print('rm2:',
|
np.mean(all_rm2)
|
numpy.mean
|
"""
oksar3
Program to calcuate forward models of interferograms, strain tensor, etc.
from Okada subroutine.
Heritage:
- originally fringes.c written by <NAME>
- updated to oksar tjw
- oksar_strain: added strain tensor calculation tjw
- oksar3: added new line of sight calculator tjw feb 2003
- Modified into Python by RowanCockett, 3point Science Aug 2014
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import properties
import vectormath as vmath
import utm
import matplotlib.pyplot as plt
class EarthquakeInterferogram(properties.HasProperties):
title = properties.String(
'name of the earthquake',
required=True
)
description = properties.String(
'description of the event',
required=False
)
location = properties.Vector2(
'interferogram location (bottom N, left E)',
required=True
)
location_UTM_zone = properties.Integer(
'UTM zone',
required=True
)
shape = properties.Array(
'number of pixels in the interferogram',
shape=(2,),
dtype=int,
required=True
)
pixel_size = properties.Array(
'Size of each pixel (northing, easting)',
shape=(2,),
dtype=float,
required=True
)
data = properties.Array(
'Processed interferogram data (unwrapped)',
dtype=float,
required=True
)
ref = properties.Vector2(
'interferogram reference',
required=True
)
ref_incidence = properties.Float(
'Incidence angle',
required=True
)
scaling = properties.Float(
'Scaling of the interferogram',
default=1.0
)
satellite_name = properties.String('Name of the satelite.')
satellite_fringe_interval = properties.Float(
'Fringe interval',
default=0.028333
)
satellite_azimuth = properties.Float(
'satellite_azimuth',
required=True
)
satellite_altitude = properties.Float(
'satellite_altitude',
required=True
)
local_rigidity = properties.Float(
'Local rigidity',
default=3e10
)
local_earth_radius = properties.Float(
'Earth radius',
default=6371000.
)
date1 = properties.DateTime(
'date1',
required=True
)
date2 = properties.DateTime(
'date2',
required=True
)
processed_by = properties.String(
'processed_by',
required=True
)
processed_date = properties.DateTime(
'processed_date',
required=True
)
copyright = properties.String(
'copyright',
required=True
)
data_source = properties.String(
'data_source',
required=True
)
event_date = properties.DateTime('Date of the earthquake')
event_gcmt_id = properties.String('GCMT ID')
event_name = properties.String('Earthquake name')
event_country = properties.String('Earthquake country')
def _get_plot_data(self):
vectorNx = (
np.r_[
0,
np.cumsum(
(self.pixel_size[0],) * self.shape[0]
)
] + self.location[0]
)
vectorNy = (
np.r_[
0,
np.cumsum(
(self.pixel_size[1],) * self.shape[1]
)
] + self.location[1]
) - self.pixel_size[1] * self.shape[1]
data = self.data.copy()
data = np.flipud(data.reshape(self.shape, order='F').T)
data[data == 0] = np.nan
data *= self.scaling
return vectorNx, vectorNy, data
def plot_interferogram(self, wrap=True, ax=None):
self.assert_valid
if ax is None:
plt.figure()
ax = plt.subplot(111)
vectorNx, vectorNy, data = self._get_plot_data()
if wrap:
cmap = plt.cm.hsv
data = data % self.satellite_fringe_interval
vmin, vmax = 0.0, self.satellite_fringe_interval
else:
cmap = plt.cm.jet
vmin = np.nanmin(data)
vmax = np.nanmax(data)
out = ax.pcolormesh(
vectorNx,
vectorNy,
np.ma.masked_where(np.isnan(data), data),
vmin=vmin,
vmax=vmax,
cmap=cmap
)
ax.set_title(self.title)
ax.axis('equal')
ax.set_xlabel('Easting, m (UTM Zone {})'.format(
self.location_UTM_zone
))
ax.set_ylabel('Northing, m')
cb = plt.colorbar(out, ax=ax)
cb.set_label('Displacement, m')
return out
def plot_mask(self, ax=None, opacity=0.2):
if ax is None:
plt.figure()
ax = plt.subplot(111)
vectorNx, vectorNy, data = self._get_plot_data()
from matplotlib import colors
cmap = colors.ListedColormap([(1, 1, 1, opacity)])
out = ax.pcolormesh(
vectorNx,
vectorNy,
np.ma.masked_where(~np.isnan(data), data),
cmap=cmap
)
ax.set_title(self.title)
ax.axis('equal')
ax.set_xlabel('Easting, m (UTM Zone {})'.format(
self.location_UTM_zone
))
ax.set_ylabel('Northing, m')
return out
def get_LOS_vector(self, locations):
"""
calculate beta - the angle at earth center between reference point
and satellite nadir
"""
if not isinstance(locations, list):
locations = [locations]
utmZone = self.location_UTM_zone
refPoint = vmath.Vector3(self.ref.x, self.ref.y, 0)
satAltitude = self.satellite_altitude
satAzimuth = self.satellite_azimuth
satIncidence = self.ref_incidence
earthRadius = self.local_earth_radius
DEG2RAD = np.pi / 180.
alpha = satIncidence * DEG2RAD
beta = (earthRadius / (satAltitude + earthRadius)) * np.sin(alpha)
beta = alpha - np.arcsin(beta)
beta = beta / DEG2RAD
# calculate angular separation of (x,y) from satellite track passing
# through (origx, origy) with azimuth satAzimuth
# Long lat **NOT** lat long
origy, origx = utm.to_latlon(
refPoint.x, refPoint.y, np.abs(utmZone), northern=utmZone > 0
)
xy = np.array([
utm.to_latlon(u[0], u[1], np.abs(utmZone), northern=utmZone > 0)
for u in locations
])
y = xy[:, 0]
x = xy[:, 1]
angdist = self._ang_to_gc(x, y, origx, origy, satAzimuth)
# calculate beta2, the angle at earth center between roaming point and
# satellite nadir track, assuming right-looking satellite
beta2 = beta - angdist
beta2 = beta2 * DEG2RAD
# calculate alpha2, the new incidence angle
alpha2 = np.sin(beta2) / (
np.cos(beta2) - (earthRadius / (earthRadius + satAltitude))
)
alpha2 = np.arctan(alpha2)
alpha2 = alpha2 / DEG2RAD
# calculate pointing vector
satIncidence = 90 - alpha2
satAzimuth = 360 - satAzimuth
los_x = -np.cos(satAzimuth * DEG2RAD) * np.cos(satIncidence * DEG2RAD)
los_y = -np.sin(satAzimuth * DEG2RAD) * np.cos(satIncidence * DEG2RAD)
los_z = np.sin(satIncidence * DEG2RAD)
return vmath.Vector3Array([los_x, los_y, los_z])
@staticmethod
def _ang_to_gc(x, y, origx, origy, satAzimuth):
"""
Calculate angular distance to great circle passing through
given point
"""
Ngc = np.zeros(3)
cartxy = np.zeros((len(x), 3))
satAzimuth = np.deg2rad(satAzimuth)
origx = np.deg2rad(origx)
origy = np.deg2rad(origy)
x = np.deg2rad(x)
y = np.deg2rad(y)
# 1. calc geocentric norm vec to great circle, Ngc = Rz*Ry*Rx*[0;1;0]
# where Rz = rotation of origx about geocentric z-axis
# where Ry = rotation of origy about geocentric y-axis
# where Rx = rotation of satAzimuth about geocentric x-axis
# and [0;1;0] is geocentric norm vec to N-S Great Circle through 0 0
Ngc[0] = (
(np.sin(satAzimuth) * np.sin(origy) * np.cos(origx)) -
(np.cos(satAzimuth) * np.sin(origx))
)
Ngc[1] = (
(np.sin(satAzimuth) * np.sin(origy) * np.sin(origx)) +
(np.cos(satAzimuth) * np.cos(origx))
)
Ngc[2] = -np.sin(satAzimuth) * np.cos(origy)
# 2. calculate unit vector geocentric coordinates for lon/lat
# position (x,y)
cartxy[:, 0] = np.cos(x) * np.cos(y)
cartxy[:, 1] = np.sin(x) * np.cos(y)
cartxy[:, 2] = np.sin(y)
# 3. Dot product between Ngc and cartxy gives angle 90 degrees
# bigger than what we want
angdist = (
Ngc[0]*cartxy[:, 0] +
Ngc[1]*cartxy[:, 1] +
Ngc[2]*cartxy[:, 2]
)
angdist = np.rad2deg(np.arccos(angdist)) - 90
return angdist
class Oksar(properties.HasProperties):
beta = properties.Float('beta', default=3E10)
mu = properties.Float('mu', default=3E10)
strike = properties.Float('Strike', min=0, max=360)
dip = properties.Float('Dip', default=45, min=0, max=90)
rake = properties.Float('Rake', default=90, min=-180, max=180)
slip = properties.Float('Slip', default=0.5, min=0)
length = properties.Float('Fault length', default=10000., min=0)
center = properties.Vector2('Center of the fault plane.')
depth_top = properties.Float('Top of fault', min=0)
depth_bottom = properties.Float('Bottom of fault', default=10000, min=0)
O = properties.Vector2(
'Origin of the simulation domain', required=True
)
U = properties.Vector2(
'U direction of the simulation domain', required=True
)
V = properties.Vector2(
'V direction of the simulation domain', required=True
)
shape = properties.Array(
'number of pixels in the simulation',
shape=(2,),
default=(300, 300),
dtype=int,
# required=True
)
@property
def simulation_grid(self):
self.assert_valid
vec, shape = vmath.ouv2vec(
vmath.Vector3(self.O[0], self.O[1], 0),
vmath.Vector3(self.U[0], self.U[1], 0),
vmath.Vector3(self.V[0], self.V[1], 0),
self.shape
)
return vec
@property
def displacement_vector(self):
self.assert_valid
vec = self.simulation_grid
x, y = vec.x, vec.y
DEG2RAD = 0.017453292519943
alpha = (self.beta + self.mu) / (self.beta + 2.0 * self.mu)
# Here we could loop over models
flt_x = self.center[0]
flt_y = self.center[1]
strike = self.strike
dip = self.dip
rake = self.rake
slip = self.slip
length = self.length
hmin = self.depth_top
hmax = self.depth_bottom
rrake = (rake+90.0)*DEG2RAD
sindip = np.sin(dip*DEG2RAD)
w = (hmax-hmin)/sindip
ud = slip*np.cos(rrake)
us = -slip*np.sin(rrake)
halflen = length/2.0
al2 = halflen
al1 = -al2
aw1 = hmin/sindip
aw2 = hmax/sindip
if(hmin < 0.0):
raise Exception('ERROR: Fault top above ground surface')
if(hmin == 0.0):
hmin = 0.00001
sstrike = (strike+90.0)*DEG2RAD
ct = np.cos(sstrike)
st = np.sin(sstrike)
X = ct * (-flt_x + x) - st * (-flt_y + y)
Y = ct * (-flt_y + y) + st * (-flt_x + x)
u = self._dc3d3(alpha, X, Y, -dip, al1, al2, aw1, aw2, us, ud)
UX = ct*u.x + st*u.y
UY = -st*u.x + ct*u.y
UZ = u.z
return vmath.Vector3(UX, UY, UZ)
def _dc3d3(self, alpha, X, Y, dip, al1, al2, aw1, aw2, disl1, disl2):
F0 = 0.0
F1 = 1.0
F2 = 2.0
PI2 = 6.283185307179586
EPS = 1.0E-6
u = vmath.Vector3(F0, F0, F0)
dub = vmath.Vector3(F0, F0, F0)
# %%dccon0 subroutine
# Calculates medium and fault dip constants
c0_alp3 = (F1 - alpha) / alpha
# PI2/360
pl8 = 0.017453292519943
c0_sd = np.sin(dip*pl8)
c0_cd = np.cos(dip*pl8)
if(np.abs(c0_cd) < EPS):
c0_cd = F0
if(c0_sd > F0):
c0_sd = F1
if(c0_sd < F0):
c0_sd = -F1
c0_cdcd = c0_cd * c0_cd
c0_sdcd = c0_sd * c0_cd
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p = Y * c0_cd
q = Y * c0_sd
jxi = ((X - al1) * (X - al2)) <= F0 # BOOLEAN
jet = ((p - aw1) * (p - aw2)) <= F0 # BOOLEAN
for k in [1., 2.]:
et = 0.0
if(k == 1):
et = p-aw1
else:
et = p-aw2
for j in [1., 2.]:
xi = 0.0
if(j == 1):
xi = X-al1
else:
xi = X-al2
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%dccon2 subroutine
# % calculates station geometry constants for finite source
dc_max = np.max(np.abs(np.c_[xi, et, q]))
# dc_max = max(np.abs(xi),max(np.abs(et),np.abs(q)))
xi[(np.abs(xi/dc_max) < EPS) | (np.abs(xi) < EPS)] = F0
et[(np.abs(et/dc_max) < EPS) | (np.abs(et) < EPS)] = F0
q[(np.abs(q/dc_max) < EPS) | (np.abs(q) < EPS)] = F0
dc_xi = xi
dc_et = et
dc_q = q
c2_r = np.sqrt(dc_xi*dc_xi + dc_et*dc_et + dc_q*dc_q)
if np.any(c2_r == F0):
raise Exception('singularity error ???')
c2_y = dc_et * c0_cd + dc_q * c0_sd
c2_d = dc_et * c0_sd - dc_q * c0_cd
c2_tt = np.arctan(dc_xi * dc_et / (dc_q * c2_r))
c2_tt[dc_q == F0] = F0
rxi = c2_r + dc_xi
c2_x11 = F1/(c2_r*rxi)
c2_x11[(dc_xi < F0) & (dc_q == F0) & (dc_et == F0)] = F0
ret = c2_r + dc_et
if np.any(ret < 1e-14):
raise Exception('dccon2 b %f %f %f %f' % (
ret, c2_r, dc_et, dc_q, dc_xi
))
c2_ale = np.log(ret)
c2_y11 = F1/(c2_r*ret)
ind = (dc_et < F0) & (dc_q == F0) & (dc_xi == F0)
# if((c2_r-dc_et) < 1e-14):
# raise Exception('dccon2 a %f %f %f %f %f' % (
# c2_3-dc_et, c2_r, dc_et, dc_q, dc_xi)
# )
c2_ale[ind] = -np.log(c2_r[ind]-dc_et[ind])
c2_y11[ind] = F0
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if np.any(
(
(q == F0) &
(
((jxi) & (et == F0)) |
((jet) & (xi == F0))
)
) | (c2_r == F0)
):
raise Exception('singular problems: 2')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ub subroutine
# part B of displacement and strain at depth due to buried
# faults in semi-infinite medium
rd = c2_r + c2_d
if np.any(rd < 1e-14):
raise Exception('ub %f %f %f %f %f %f' % (
rd, c2_r, c2_d, xi, et, q
))
ai3 = 0.0
ai4 = 0.0
if(c0_cd != F0):
# xx replaces x in original subroutine
xx =
|
np.sqrt(xi*xi+q*q)
|
numpy.sqrt
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pytest
import time
from openvino.inference_engine import ie_api as ie
from tests_compatibility.conftest import model_path
from ..test_utils.test_utils import generate_image # TODO: reformat into an absolute path
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
img = generate_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 9
del exec_net
del ie_core
def test_infer_net_from_buffer(device):
ie_core = ie.IECore()
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(test_net_xml, 'rb') as f:
xml = f.read()
net = ie_core.read_network(model=xml, weights=bin, init_from_buffer=True)
net2 = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
exec_net2 = ie_core.load_network(net2, device)
img = generate_image()
res = exec_net.infer({'data': img})
res2 = exec_net2.infer({'data': img})
del ie_core
del exec_net
del exec_net2
assert np.allclose(res['fc_out'], res2['fc_out'], atol=1E-4, rtol=1E-4)
def test_infer_wrong_input_name(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
img = generate_image()
with pytest.raises(AssertionError) as e:
exec_net.infer({'_data_': img})
assert "No input with name _data_ found in network" in str(e.value)
del exec_net
del ie_core
def test_input_info(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert isinstance(exec_net.input_info['data'], ie.InputInfoCPtr)
assert exec_net.input_info['data'].name == "data"
assert exec_net.input_info['data'].precision == "FP32"
assert isinstance(exec_net.input_info['data'].input_data, ie.DataPtr)
del exec_net
del ie_core
def test_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.outputs) == 1
assert "fc_out" in exec_net.outputs
assert isinstance(exec_net.outputs['fc_out'], ie.CDataPtr)
del exec_net
del ie_core
def test_access_requests(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.requests) == 5
assert isinstance(exec_net.requests[0], ie.InferRequest)
del exec_net
del ie_core
def test_async_infer_one_req(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = generate_image()
request_handler = exec_net.start_async(request_id=0, inputs={'data': img})
request_handler.wait()
res = request_handler.output_blobs['fc_out'].buffer
assert np.argmax(res) == 9
del exec_net
del ie_core
def test_async_infer_many_req(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=5)
img = generate_image()
for id in range(5):
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
request_handler.wait()
res = request_handler.output_blobs['fc_out'].buffer
assert np.argmax(res) == 9
del exec_net
del ie_core
def test_async_infer_many_req_get_idle(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
num_requests = 5
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
img = generate_image()
check_id = set()
for id in range(2*num_requests):
request_id = exec_net.get_idle_request_id()
if request_id == -1:
status = exec_net.wait(num_requests=1, timeout=ie.WaitMode.RESULT_READY)
assert(status == ie.StatusCode.OK)
request_id = exec_net.get_idle_request_id()
assert(request_id >= 0)
request_handler = exec_net.start_async(request_id=request_id, inputs={'data': img})
check_id.add(request_id)
status = exec_net.wait(timeout=ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
for id in range(num_requests):
if id in check_id:
assert
|
np.argmax(exec_net.requests[id].output_blobs['fc_out'].buffer)
|
numpy.argmax
|
import pytest
import numpy as np
from scipy import sparse
from scipy.sparse import csgraph
from scipy.linalg import eigh
from sklearn.manifold import SpectralEmbedding
from sklearn.manifold._spectral_embedding import _graph_is_connected
from sklearn.manifold._spectral_embedding import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
# non centered, sparse centers to check the
centers = np.array(
[
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
]
)
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(
n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42
)
def _assert_equal_with_sign_flipping(A, B, tol=0.0):
"""Check array A and B are equal with possible sign flipping on
each columns"""
tol_squared = tol ** 2
for A_col, B_col in zip(A.T, B.T):
assert (
np.max((A_col - B_col) ** 2) <= tol_squared
or np.max((A_col + B_col) ** 2) <= tol_squared
)
def test_sparse_graph_connected_component():
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in zip(boundaries[:-1], boundaries[1:]):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(0.1, 42, size=len(connections))
affinity = sparse.coo_matrix((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in zip(boundaries[:-1], boundaries[1:]):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert component_1.sum() == component_size
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert component_2.sum() == component_size
assert_array_equal(component_1, component_2)
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample, 0:n_sample] = (
np.abs(random_state.randn(n_sample, n_sample)) + 2
)
# second component
affinity[n_sample::, n_sample::] = (
np.abs(random_state.randn(n_sample, n_sample)) + 2
)
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert component[:n_sample].all()
assert not component[n_sample:].any()
component = _graph_connected_component(affinity, -1)
assert not component[:n_sample].any()
assert component[n_sample:].all()
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[:: 2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(
n_components=1, affinity="precomputed", random_state=np.random.RandomState(seed)
)
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert normalized_mutual_info_score(true_label, label_) == pytest.approx(1.0)
@pytest.mark.parametrize("X", [S, sparse.csr_matrix(S)], ids=["dense", "sparse"])
def test_spectral_embedding_precomputed_affinity(X, seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(
n_components=2, affinity="precomputed", random_state=np.random.RandomState(seed)
)
se_rbf = SpectralEmbedding(
n_components=2,
affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed),
)
embed_precomp = se_precomp.fit_transform(rbf_kernel(X, gamma=gamma))
embed_rbf = se_rbf.fit_transform(X)
assert_array_almost_equal(se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
_assert_equal_with_sign_flipping(embed_precomp, embed_rbf, 0.05)
def test_precomputed_nearest_neighbors_filtering():
# Test precomputed graph filtering when containing too many neighbors
n_neighbors = 2
results = []
for additional_neighbors in [0, 10]:
nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(S)
graph = nn.kneighbors_graph(S, mode="connectivity")
embedding = (
SpectralEmbedding(
random_state=0,
n_components=2,
affinity="precomputed_nearest_neighbors",
n_neighbors=n_neighbors,
)
.fit(graph)
.embedding_
)
results.append(embedding)
assert_array_equal(results[0], results[1])
@pytest.mark.parametrize("X", [S, sparse.csr_matrix(S)], ids=["dense", "sparse"])
def test_spectral_embedding_callable_affinity(X, seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(
n_components=2,
affinity=(lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed),
)
se_rbf = SpectralEmbedding(
n_components=2,
affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed),
)
embed_rbf = se_rbf.fit_transform(X)
embed_callable = se_callable.fit_transform(X)
assert_array_almost_equal(se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
_assert_equal_with_sign_flipping(embed_rbf, embed_callable, 0.05)
# TODO: Remove when pyamg does replaces sp.rand call with np.random.rand
# https://github.com/scikit-learn/scikit-learn/issues/15913
@pytest.mark.filterwarnings(
"ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*"
)
# TODO: Remove when pyamg removes the use of np.float
@pytest.mark.filterwarnings(
"ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*"
)
# TODO: Remove when pyamg removes the use of pinv2
@pytest.mark.filterwarnings(
"ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*"
)
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
pytest.importorskip("pyamg")
se_amg = SpectralEmbedding(
n_components=2,
affinity="nearest_neighbors",
eigen_solver="amg",
n_neighbors=5,
random_state=np.random.RandomState(seed),
)
se_arpack = SpectralEmbedding(
n_components=2,
affinity="nearest_neighbors",
eigen_solver="arpack",
n_neighbors=5,
random_state=np.random.RandomState(seed),
)
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
_assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5)
# same with special case in which amg is not actually used
# regression test for #10715
# affinity between nodes
row = [0, 0, 1, 2, 3, 3, 4]
col = [1, 2, 2, 3, 4, 5, 5]
val = [100, 100, 100, 1, 100, 100, 100]
affinity = sparse.coo_matrix(
(val + val, (row + col, col + row)), shape=(6, 6)
).toarray()
se_amg.affinity = "precomputed"
se_arpack.affinity = "precomputed"
embed_amg = se_amg.fit_transform(affinity)
embed_arpack = se_arpack.fit_transform(affinity)
_assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5)
# TODO: Remove filterwarnings when pyamg does replaces sp.rand call with
# np.random.rand:
# https://github.com/scikit-learn/scikit-learn/issues/15913
@pytest.mark.filterwarnings(
"ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*"
)
# TODO: Remove when pyamg removes the use of np.float
@pytest.mark.filterwarnings(
"ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*"
)
# TODO: Remove when pyamg removes the use of pinv2
@pytest.mark.filterwarnings(
"ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*"
)
def test_spectral_embedding_amg_solver_failure():
# Non-regression test for amg solver failure (issue #13393 on github)
pytest.importorskip("pyamg")
seed = 36
num_nodes = 100
X = sparse.rand(num_nodes, num_nodes, density=0.1, random_state=seed)
upper = sparse.triu(X) - sparse.diags(X.diagonal())
sym_matrix = upper + upper.T
embedding = spectral_embedding(
sym_matrix, n_components=10, eigen_solver="amg", random_state=0
)
# Check that the learned embedding is stable w.r.t. random solver init:
for i in range(3):
new_embedding = spectral_embedding(
sym_matrix, n_components=10, eigen_solver="amg", random_state=i + 1
)
_assert_equal_with_sign_flipping(embedding, new_embedding, tol=0.05)
@pytest.mark.filterwarnings("ignore:the behavior of nmi will " "change in version 0.22")
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(
n_components=n_clusters, affinity="rbf", random_state=random_state
)
se_knn = SpectralEmbedding(
n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state,
)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(km.labels_, true_labels), 1.0, 2
)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(
n_components=1,
affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>",
)
with pytest.raises(ValueError):
se.fit(S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(
n_components=1, affinity="<unknown>", random_state=np.random.RandomState(seed)
)
with pytest.raises(ValueError):
se.fit(S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array(
[
[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1],
]
)
assert not _graph_is_connected(graph)
assert not _graph_is_connected(sparse.csr_matrix(graph))
assert not _graph_is_connected(sparse.csc_matrix(graph))
graph = np.array(
[
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1],
]
)
assert _graph_is_connected(graph)
assert _graph_is_connected(sparse.csr_matrix(graph))
assert _graph_is_connected(sparse.csc_matrix(graph))
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(
sims, norm_laplacian=False, n_components=n_components, drop_first=False
)
# Verify using manual computation with dense eigh
laplacian, dd = csgraph.laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components]
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_first_eigen_vector():
# Test that the first eigenvector of spectral_embedding
# is constant and that the second is not (for a connected graph)
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 2
for seed in range(10):
embedding = spectral_embedding(
sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False,
random_state=seed,
)
assert np.std(embedding[:, 0]) == pytest.approx(0)
assert
|
np.std(embedding[:, 1])
|
numpy.std
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.