prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import time
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score, ndcg_score
import networkx as nx
import csv
from rw2 import ParallelRW2 as RW2
import pickle
import itertools
def load_graph(path):
H = nx.read_edgelist(path)
temp = sorted(H.nodes())
mapping = {k: i for i, k in enumerate(temp)}
return nx.relabel_nodes(H, mapping), mapping
def get_nodes(train_pos_edges):
subset_nodes = set()
for v1, v2 in train_pos_edges:
subset_nodes.add(v1)
subset_nodes.add(v2)
return list(subset_nodes)
def read_graph_rw2(edgelist, node2attr_path):
words_mapping = None
encode = False
attributed = True
nodes = get_nodes(edgelist)
with open(node2attr_path, 'rb') as handle:
node_labels = pickle.load(handle)
g, attributes2nodes, feat_transitions, node2int, attrs2int = load_graph_rw2(edgelist, nodes = nodes, node2attributes = node_labels, encode2int = encode, directed = False, placeholder = None, fill_empty = False)
if encode:
words_mapping = attrs2int if attributed else node2int
return g, words_mapping, feat_transitions, attributes2nodes
def get_mask(matrix, indices, value):
for v1, v2 in indices:
matrix[v1, v2] = value
matrix[v2, v1] = value
def save(no_folds, avg_values, name):
with open(name, 'w', newline = '') as file:
writer = csv.writer(file)
writer.writerow(["AUROC", "AUPRC", "P_500", "NDCG", "Computing_time"])
for i in range(no_folds):
writer.writerow([avg_values[i]["auroc"], avg_values[i]["auprc"], avg_values[i]["pr_at_500"], avg_values[i]["ndcg"], avg_values[i]["comp_time"]])
def save_top(tops, path = 'top500.csv'):
with open(path, 'w', newline = '') as file:
writer = csv.writer(file)
writer.writerow(["source", "target", "score"])
for t, s, score in tops:
writer.writerow([t, s, score])
def precision_at_k(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = | np.sum(y_true == pos_label) | numpy.sum |
# -*- coding: utf-8 -*-
"""
classes that implement the blocks for MDF version 4
Edit history
Author : yda
Date : 2020-11-12
Package name changed - asammdf to mdfstudio
Functions
---------
* Channel.metadata - Get rid of b" text when decoding byte type data
* Channel.__init__ - Set sampling rate from kwargs
* ChannelGroup.metadata - Get rid of b" text when decoding byte type data
* ChannelConversion.metadata - Get rid of b" text when decoding byte type data
* SourceInformation.metadata - Get rid of b" text when decoding byte type data
"""
from datetime import datetime, timezone
from hashlib import md5
import logging
from pathlib import Path
from struct import pack, unpack, unpack_from
from textwrap import wrap
import time
from traceback import format_exc
import xml.etree.ElementTree as ET
from zlib import compress, decompress
from numexpr import evaluate
import numpy as np
from . import v4_constants as v4c
from ..version import __version__
from .utils import (
block_fields,
extract_display_name,
FLOAT64_u,
get_text_v4,
is_file_like,
MdfException,
sanitize_xml,
UINT8_uf,
UINT64_u,
UINT64_uf,
)
SEEK_START = v4c.SEEK_START
SEEK_END = v4c.SEEK_END
COMMON_SIZE = v4c.COMMON_SIZE
COMMON_u = v4c.COMMON_u
COMMON_uf = v4c.COMMON_uf
CN_BLOCK_SIZE = v4c.CN_BLOCK_SIZE
SIMPLE_CHANNEL_PARAMS_uf = v4c.SIMPLE_CHANNEL_PARAMS_uf
logger = logging.getLogger("mdfstudio")
__all__ = [
"AttachmentBlock",
"Channel",
"ChannelArrayBlock",
"ChannelGroup",
"ChannelConversion",
"DataBlock",
"DataZippedBlock",
"EventBlock",
"FileIdentificationBlock",
"HeaderBlock",
"HeaderList",
"DataList",
"DataGroup",
"FileHistory",
"SourceInformation",
"TextBlock",
]
class AttachmentBlock:
"""When adding new attachments only embedded attachments are allowed, with
keyword argument *data* of type bytes
*AttachmentBlock* has the following attributes, that are also available as
dict like key-value pairs
ATBLOCK fields
* ``id`` - bytes : block ID; always b'##AT'
* ``reserved0`` - int : reserved bytes
* ``block_len`` - int : block bytes size
* ``links_nr`` - int : number of links
* ``next_at_addr`` - int : next ATBLOCK address
* ``file_name_addr`` - int : address of TXBLOCK that contains the attachment
file name
* ``mime_addr`` - int : address of TXBLOCK that contains the attachment
mime type description
* ``comment_addr`` - int : address of TXBLOCK/MDBLOCK that contains the
attachment comment
* ``flags`` - int : ATBLOCK flags
* ``creator_index`` - int : index of file history block
* ``reserved1`` - int : reserved bytes
* ``md5_sum`` - bytes : attachment file md5 sum
* ``original_size`` - int : original uncompress file size in bytes
* ``embedded_size`` - int : embedded compressed file size in bytes
* ``embedded_data`` - bytes : embedded atatchment bytes
Other attributes
* ``address`` - int : attachment address
* ``file_name`` - str : attachment file name
* ``mime`` - str : mime type
* ``comment`` - str : attachment comment
Parameters
----------
address : int
block address; to be used for objects created from file
stream : handle
file handle; to be used for objects created from file
for dynamically created objects :
see the key-value pairs
"""
__slots__ = (
"address",
"file_name",
"mime",
"comment",
"id",
"reserved0",
"block_len",
"links_nr",
"next_at_addr",
"file_name_addr",
"mime_addr",
"comment_addr",
"flags",
"creator_index",
"reserved1",
"md5_sum",
"original_size",
"embedded_size",
"embedded_data",
)
def __init__(self, **kwargs):
self.file_name = self.mime = self.comment = ""
try:
self.address = address = kwargs["address"]
stream = kwargs["stream"]
mapped = kwargs.get("mapped", False) or not is_file_like(stream)
if mapped:
(
self.id,
self.reserved0,
self.block_len,
self.links_nr,
self.next_at_addr,
self.file_name_addr,
self.mime_addr,
self.comment_addr,
self.flags,
self.creator_index,
self.reserved1,
self.md5_sum,
self.original_size,
self.embedded_size,
) = v4c.AT_COMMON_uf(stream, address)
address += v4c.AT_COMMON_SIZE
self.embedded_data = stream[address : address + self.embedded_size]
else:
stream.seek(address)
(
self.id,
self.reserved0,
self.block_len,
self.links_nr,
self.next_at_addr,
self.file_name_addr,
self.mime_addr,
self.comment_addr,
self.flags,
self.creator_index,
self.reserved1,
self.md5_sum,
self.original_size,
self.embedded_size,
) = v4c.AT_COMMON_u(stream.read(v4c.AT_COMMON_SIZE))
self.embedded_data = stream.read(self.embedded_size)
if self.id != b"##AT":
message = f'Expected "##AT" block @{hex(address)} but found "{self.id}"'
logger.exception(message)
raise MdfException(message)
self.file_name = get_text_v4(self.file_name_addr, stream, mapped=mapped)
self.mime = get_text_v4(self.mime_addr, stream, mapped=mapped)
self.comment = get_text_v4(self.comment_addr, stream, mapped=mapped)
except KeyError:
self.address = 0
file_name = Path(kwargs.get("file_name", None) or "bin.bin")
data = kwargs["data"]
original_size = embedded_size = len(data)
compression = kwargs.get("compression", False)
embedded = kwargs.get("embedded", False)
md5_sum = md5(data).digest()
flags = v4c.FLAG_AT_MD5_VALID
if embedded:
flags |= v4c.FLAG_AT_EMBEDDED
if compression:
flags |= v4c.FLAG_AT_COMPRESSED_EMBEDDED
data = compress(data)
embedded_size = len(data)
self.file_name = file_name.name
else:
self.file_name = str(file_name)
file_name.write_bytes(data)
embedded_size = 0
data = b""
self.id = b"##AT"
self.reserved0 = 0
self.block_len = v4c.AT_COMMON_SIZE + embedded_size
self.links_nr = 4
self.next_at_addr = 0
self.file_name_addr = 0
self.mime_addr = 0
self.comment_addr = 0
self.flags = flags
self.creator_index = 0
self.reserved1 = 0
self.md5_sum = md5_sum
self.original_size = original_size
self.embedded_size = embedded_size
self.embedded_data = data
def extract(self):
"""extract attachment data
Returns
-------
data : bytes
"""
if self.flags & v4c.FLAG_AT_EMBEDDED:
if self.flags & v4c.FLAG_AT_COMPRESSED_EMBEDDED:
data = decompress(self.embedded_data)
else:
data = self.embedded_data
if self.flags & v4c.FLAG_AT_MD5_VALID:
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if self.md5_sum == md5_sum:
return data
else:
message = f"ATBLOCK md5sum={self.md5_sum} and embedded data md5sum={md5_sum}"
logger.warning(message)
else:
return data
else:
logger.warning("external attachments not supported")
def to_blocks(self, address, blocks, defined_texts):
text = self.file_name
if text:
if text in defined_texts:
self.file_name_addr = defined_texts[text]
else:
tx_block = TextBlock(text=str(text))
self.file_name_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.file_name_addr = 0
text = self.mime
if text:
if text in defined_texts:
self.mime_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.mime_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.mime_addr = 0
text = self.comment
if text:
if text in defined_texts:
self.comment_addr = defined_texts[text]
else:
meta = text.startswith("<ATcomment")
tx_block = TextBlock(text=text, meta=meta)
self.comment_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.comment_addr = 0
blocks.append(self)
self.address = address
address += self.block_len
align = address % 8
if align % 8:
blocks.append(b"\0" * (8 - align))
address += 8 - align
return address
def __getitem__(self, item):
return self.__getattribute__(item)
def __setitem__(self, item, value):
self.__setattr__(item, value)
def __bytes__(self):
fmt = f"{v4c.FMT_AT_COMMON}{self.embedded_size}s"
result = pack(fmt, *[self[key] for key in v4c.KEYS_AT_BLOCK])
return result
class Channel:
""" If the `load_metadata` keyword argument is not provided or is False,
then the conversion, source and display name information is not processed.
Further more if the `parse_xml_comment` is not provided or is False, then
the display name information from the channel comment is not processed (this
is done to avoid expensive XML operations)
*Channel* has the following attributes, that are also available as
dict like key-value pairs
CNBLOCK fields
* ``id`` - bytes : block ID; always b'##CN'
* ``reserved0`` - int : reserved bytes
* ``block_len`` - int : block bytes size
* ``links_nr`` - int : number of links
* ``next_ch_addr`` - int : next ATBLOCK address
* ``component_addr`` - int : address of first channel in case of structure channel
composition, or ChannelArrayBlock in case of arrays
file name
* ``name_addr`` - int : address of TXBLOCK that contains the channel name
* ``source_addr`` - int : address of channel source block
* ``conversion_addr`` - int : address of channel conversion block
* ``data_block_addr`` - int : address of signal data block for VLSD channels
* ``unit_addr`` - int : address of TXBLOCK that contains the channel unit
* ``comment_addr`` - int : address of TXBLOCK/MDBLOCK that contains the
channel comment
* ``attachment_<N>_addr`` - int : address of N-th ATBLOCK referenced by the
current channel; if no ATBLOCK is referenced there will be no such key-value
pair
* ``default_X_dg_addr`` - int : address of DGBLOCK where the default X axis
channel for the current channel is found; this key-value pair will not
exist for channels that don't have a default X axis
* ``default_X_cg_addr`` - int : address of CGBLOCK where the default X axis
channel for the current channel is found; this key-value pair will not
exist for channels that don't have a default X axis
* ``default_X_ch_addr`` - int : address of default X axis
channel for the current channel; this key-value pair will not
exist for channels that don't have a default X axis
* ``channel_type`` - int : integer code for the channel type
* ``sync_type`` - int : integer code for the channel's sync type
* ``data_type`` - int : integer code for the channel's data type
* ``bit_offset`` - int : bit offset
* ``byte_offset`` - int : byte offset within the data record
* ``bit_count`` - int : channel bit count
* ``flags`` - int : CNBLOCK flags
* ``pos_invalidation_bit`` - int : invalidation bit position for the current
channel if there are invalidation bytes in the data record
* ``precision`` - int : integer code for teh precision
* ``reserved1`` - int : reserved bytes
* ``min_raw_value`` - int : min raw value of all samples
* ``max_raw_value`` - int : max raw value of all samples
* ``lower_limit`` - int : min physical value of all samples
* ``upper_limit`` - int : max physical value of all samples
* ``lower_ext_limit`` - int : min physical value of all samples
* ``upper_ext_limit`` - int : max physical value of all samples
Other attributes
* ``address`` - int : channel address
* ``attachments`` - list : list of referenced attachment blocks indexes;
the index referece to the attachment block index
* ``comment`` - str : channel comment
* ``conversion`` - ChannelConversion : channel conversion; *None* if the
channel has no conversion
* ``display_name`` - str : channel display name; this is extracted from the
XML channel comment
* ``name`` - str : channel name
* ``source`` - SourceInformation : channel source information; *None* if
the channel has no source information
* ``unit`` - str : channel unit
Parameters
----------
address : int
block address; to be used for objects created from file
stream : handle
file handle; to be used for objects created from file
load_metadata : bool
option to load conversion, source and display_name; default *True*
parse_xml_comment : bool
option to parse XML channel comment to search for display name; default
*True*
for dynamically created objects :
see the key-value pairs
"""
__slots__ = (
"name",
"unit",
"comment",
"display_name",
"conversion",
"source",
"attachment",
"address",
"dtype_fmt",
"id",
"reserved0",
"block_len",
"links_nr",
"next_ch_addr",
"component_addr",
"name_addr",
"source_addr",
"conversion_addr",
"data_block_addr",
"unit_addr",
"comment_addr",
"channel_type",
"sync_type",
"data_type",
"bit_offset",
"byte_offset",
"bit_count",
"flags",
"pos_invalidation_bit",
"precision",
"reserved1",
"attachment_nr",
"min_raw_value",
"max_raw_value",
"lower_limit",
"upper_limit",
"lower_ext_limit",
"upper_ext_limit",
"default_X_dg_addr",
"default_X_cg_addr",
"default_X_ch_addr",
"attachment_addr",
"sampling_rate",
)
def __init__(self, **kwargs):
if "sampling_rate" in kwargs:
self.sampling_rate = kwargs["sampling_rate"]
if "stream" in kwargs:
self.address = address = kwargs["address"]
self.dtype_fmt = self.attachment = None
stream = kwargs["stream"]
mapped = kwargs["mapped"]
if mapped:
(self.id, self.reserved0, self.block_len, self.links_nr) = COMMON_uf(
stream, address
)
if self.id != b"##CN":
message = (
f'Expected "##CN" block @{hex(address)} but found "{self.id}"'
)
logger.exception(message)
raise MdfException(message)
if self.block_len == CN_BLOCK_SIZE:
(
self.next_ch_addr,
self.component_addr,
self.name_addr,
self.source_addr,
self.conversion_addr,
self.data_block_addr,
self.unit_addr,
self.comment_addr,
self.channel_type,
self.sync_type,
self.data_type,
self.bit_offset,
self.byte_offset,
self.bit_count,
self.flags,
self.pos_invalidation_bit,
self.precision,
self.reserved1,
self.attachment_nr,
self.min_raw_value,
self.max_raw_value,
self.lower_limit,
self.upper_limit,
self.lower_ext_limit,
self.upper_ext_limit,
) = SIMPLE_CHANNEL_PARAMS_uf(stream, address + COMMON_SIZE)
else:
stream.seek(address + COMMON_SIZE)
block = stream.read(self.block_len - COMMON_SIZE)
links_nr = self.links_nr
links = unpack_from(f"<{links_nr}Q", block)
params = unpack_from(v4c.FMT_CHANNEL_PARAMS, block, links_nr * 8)
(
self.next_ch_addr,
self.component_addr,
self.name_addr,
self.source_addr,
self.conversion_addr,
self.data_block_addr,
self.unit_addr,
self.comment_addr,
) = links[:8]
at_map = kwargs.get("at_map", {})
if params[10]:
params = list(params)
self.attachment_addr = links[8]
self.attachment = at_map.get(links[8], 0)
self.links_nr -= params[10] - 1
self.block_len -= (params[10] - 1) * 8
params = list(params)
params[10] = 1
if params[6] & v4c.FLAG_CN_DEFAULT_X:
(
self.default_X_dg_addr,
self.default_X_cg_addr,
self.default_X_ch_addr,
) = links[-3:]
# default X not supported yet
(
self.default_X_dg_addr,
self.default_X_cg_addr,
self.default_X_ch_addr,
) = (0, 0, 0)
(
self.channel_type,
self.sync_type,
self.data_type,
self.bit_offset,
self.byte_offset,
self.bit_count,
self.flags,
self.pos_invalidation_bit,
self.precision,
self.reserved1,
self.attachment_nr,
self.min_raw_value,
self.max_raw_value,
self.lower_limit,
self.upper_limit,
self.lower_ext_limit,
self.upper_ext_limit,
) = params
tx_map = kwargs["tx_map"]
parsed_strings = kwargs["parsed_strings"]
if parsed_strings is None:
self.name = get_text_v4(self.name_addr, stream, mapped=mapped)
self.comment = get_text_v4(self.comment_addr, stream, mapped=mapped)
if kwargs["use_display_names"]:
self.display_name = extract_display_name(self.comment)
else:
self.display_name = ""
else:
self.name, self.display_name, self.comment = parsed_strings
addr = self.unit_addr
if addr in tx_map:
self.unit = tx_map[addr]
else:
self.unit = get_text_v4(addr, stream, mapped=mapped)
tx_map[addr] = self.unit
address = self.conversion_addr
if address:
cc_map = kwargs["cc_map"]
try:
if address in cc_map:
conv = cc_map[address]
else:
(size,) = UINT64_uf(stream, address + 8)
raw_bytes = stream[address : address + size]
if raw_bytes in cc_map:
conv = cc_map[raw_bytes]
else:
conv = ChannelConversion(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
tx_map=tx_map,
)
cc_map[raw_bytes] = cc_map[address] = conv
except:
logger.warning(
f"Channel conversion parsing error: {format_exc()}. The error is ignored and the channel conversion is None"
)
conv = None
self.conversion = conv
else:
self.conversion = None
address = self.source_addr
if address:
si_map = kwargs["si_map"]
try:
if address in si_map:
source = si_map[address]
else:
raw_bytes = stream[address : address + v4c.SI_BLOCK_SIZE]
if raw_bytes in si_map:
source = si_map[raw_bytes]
else:
source = SourceInformation(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
tx_map=tx_map,
)
si_map[raw_bytes] = si_map[address] = source
except:
logger.warning(
f"Channel source parsing error: {format_exc()}. The error is ignored and the channel source is None"
)
source = None
self.source = source
else:
self.source = None
else:
stream.seek(address)
block = stream.read(CN_BLOCK_SIZE)
(self.id, self.reserved0, self.block_len, self.links_nr) = COMMON_uf(
block
)
if self.id != b"##CN":
message = (
f'Expected "##CN" block @{hex(address)} but found "{self.id}"'
)
logger.exception(message)
raise MdfException(message)
if self.block_len == CN_BLOCK_SIZE:
(
self.next_ch_addr,
self.component_addr,
self.name_addr,
self.source_addr,
self.conversion_addr,
self.data_block_addr,
self.unit_addr,
self.comment_addr,
self.channel_type,
self.sync_type,
self.data_type,
self.bit_offset,
self.byte_offset,
self.bit_count,
self.flags,
self.pos_invalidation_bit,
self.precision,
self.reserved1,
self.attachment_nr,
self.min_raw_value,
self.max_raw_value,
self.lower_limit,
self.upper_limit,
self.lower_ext_limit,
self.upper_ext_limit,
) = SIMPLE_CHANNEL_PARAMS_uf(block, COMMON_SIZE)
else:
block = block[24:] + stream.read(self.block_len - CN_BLOCK_SIZE)
links_nr = self.links_nr
links = unpack_from(f"<{links_nr}Q", block)
params = unpack_from(v4c.FMT_CHANNEL_PARAMS, block, links_nr * 8)
(
self.next_ch_addr,
self.component_addr,
self.name_addr,
self.source_addr,
self.conversion_addr,
self.data_block_addr,
self.unit_addr,
self.comment_addr,
) = links[:8]
at_map = kwargs.get("at_map", {})
if params[10]:
params = list(params)
self.attachment_addr = links[8]
self.attachment = at_map.get(links[8], 0)
self.links_nr -= params[10] - 1
self.block_len -= (params[10] - 1) * 8
params[10] = 1
if params[6] & v4c.FLAG_CN_DEFAULT_X:
(
self.default_X_dg_addr,
self.default_X_cg_addr,
self.default_X_ch_addr,
) = links[-3:]
# default X not supported yet
(
self.default_X_dg_addr,
self.default_X_cg_addr,
self.default_X_ch_addr,
) = (0, 0, 0)
(
self.channel_type,
self.sync_type,
self.data_type,
self.bit_offset,
self.byte_offset,
self.bit_count,
self.flags,
self.pos_invalidation_bit,
self.precision,
self.reserved1,
self.attachment_nr,
self.min_raw_value,
self.max_raw_value,
self.lower_limit,
self.upper_limit,
self.lower_ext_limit,
self.upper_ext_limit,
) = params
tx_map = kwargs["tx_map"]
parsed_strings = kwargs["parsed_strings"]
if parsed_strings is None:
self.name = get_text_v4(self.name_addr, stream)
self.comment = get_text_v4(self.comment_addr, stream)
if kwargs["use_display_names"]:
self.display_name = extract_display_name(self.comment)
else:
self.display_name = ""
else:
self.name, self.display_name, self.comment = parsed_strings
addr = self.unit_addr
if addr in tx_map:
self.unit = tx_map[addr]
else:
self.unit = get_text_v4(addr, stream)
tx_map[addr] = self.unit
si_map = kwargs["si_map"]
cc_map = kwargs["cc_map"]
address = self.conversion_addr
if address:
try:
if address in cc_map:
conv = cc_map[address]
else:
stream.seek(address + 8)
(size,) = UINT64_u(stream.read(8))
stream.seek(address)
raw_bytes = stream.read(size)
if raw_bytes in cc_map:
conv = cc_map[raw_bytes]
else:
conv = ChannelConversion(
raw_bytes=raw_bytes,
stream=stream,
address=address,
tx_map=tx_map,
mapped=mapped,
)
cc_map[raw_bytes] = cc_map[address] = conv
except:
logger.warning(
f"Channel conversion parsing error: {format_exc()}. The error is ignored and the channel conversion is None"
)
conv = None
self.conversion = conv
else:
self.conversion = None
address = self.source_addr
if address:
try:
if address in si_map:
source = si_map[address]
else:
stream.seek(address)
raw_bytes = stream.read(v4c.SI_BLOCK_SIZE)
if raw_bytes in si_map:
source = si_map[raw_bytes]
else:
source = SourceInformation(
raw_bytes=raw_bytes,
stream=stream,
address=address,
tx_map=tx_map,
mapped=mapped,
)
si_map[raw_bytes] = si_map[address] = source
except:
logger.warning(
f"Channel source parsing error: {format_exc()}. The error is ignored and the channel source is None"
)
source = None
self.source = source
else:
self.source = None
else:
self.address = 0
self.name = self.comment = self.display_name = self.unit = ""
self.conversion = self.source = self.attachment = self.dtype_fmt = None
self.id = b"##CN"
self.reserved0 = 0
self.block_len = v4c.CN_BLOCK_SIZE
self.links_nr = 8
self.next_ch_addr = 0
self.component_addr = 0
self.name_addr = 0
self.source_addr = 0
self.conversion_addr = 0
self.data_block_addr = 0
self.unit_addr = 0
self.comment_addr = 0
try:
self.attachment_addr = kwargs["attachment_addr"]
self.block_len += 8
self.links_nr += 1
attachments = 1
except KeyError:
attachments = 0
self.channel_type = kwargs["channel_type"]
self.sync_type = kwargs.get("sync_type", 0)
self.data_type = kwargs["data_type"]
self.bit_offset = kwargs["bit_offset"]
self.byte_offset = kwargs["byte_offset"]
self.bit_count = kwargs["bit_count"]
self.flags = kwargs.get("flags", 0)
self.pos_invalidation_bit = kwargs.get("pos_invalidation_bit", 0)
self.precision = kwargs.get("precision", 3)
self.reserved1 = 0
self.attachment_nr = attachments
self.min_raw_value = kwargs.get("min_raw_value", 0)
self.max_raw_value = kwargs.get("max_raw_value", 0)
self.lower_limit = kwargs.get("lower_limit", 0)
self.upper_limit = kwargs.get("upper_limit", 0)
self.lower_ext_limit = kwargs.get("lower_ext_limit", 0)
self.upper_ext_limit = kwargs.get("upper_ext_limit", 0)
# ignore MLSD signal data
if self.channel_type == v4c.CHANNEL_TYPE_MLSD:
self.data_block_addr = 0
self.channel_type = v4c.CHANNEL_TYPE_VALUE
if self.display_name == self.name:
self.display_name = ""
def __getitem__(self, item):
return self.__getattribute__(item)
def __setitem__(self, item, value):
self.__setattr__(item, value)
def to_blocks(self, address, blocks, defined_texts, cc_map, si_map):
text = self.name
if text:
if text in defined_texts:
self.name_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.name_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.name_addr = 0
text = self.unit
if text:
if text in defined_texts:
self.unit_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.unit_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.unit_addr = 0
comment = self.comment
display_name = self.display_name
if display_name and not comment:
text = v4c.CN_COMMENT_TEMPLATE.format("", display_name)
elif display_name and comment:
if not comment.startswith("<CNcomment"):
text = v4c.CN_COMMENT_TEMPLATE.format(comment, display_name)
else:
if display_name not in comment:
try:
CNcomment = ET.fromstring(comment)
display_name_element = CNcomment.find(".//names/display")
if display_name_element is not None:
display_name_element.text = display_name or ""
else:
display = ET.Element("display")
display.text = display_name
names = ET.Element("names")
names.append(display)
CNcomment.append(names)
text = ET.tostring(CNcomment).decode("utf-8")
except UnicodeEncodeError:
text = comment
else:
text = comment
else:
text = comment
if text:
if text in defined_texts:
self.comment_addr = defined_texts[text]
else:
meta = text.startswith("<CNcomment")
tx_block = TextBlock(text=text, meta=meta)
self.comment_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.comment_addr = 0
conversion = self.conversion
if conversion:
address = conversion.to_blocks(address, blocks, defined_texts, cc_map)
self.conversion_addr = conversion.address
else:
self.conversion_addr = 0
source = self.source
if source:
address = source.to_blocks(address, blocks, defined_texts, si_map)
self.source_addr = source.address
else:
self.source_addr = 0
blocks.append(self)
self.address = address
address += self.block_len
return address
def __bytes__(self):
if self.block_len == v4c.CN_BLOCK_SIZE:
return v4c.SIMPLE_CHANNEL_PACK(
self.id,
self.reserved0,
self.block_len,
self.links_nr,
self.next_ch_addr,
self.component_addr,
self.name_addr,
self.source_addr,
self.conversion_addr,
self.data_block_addr,
self.unit_addr,
self.comment_addr,
self.channel_type,
self.sync_type,
self.data_type,
self.bit_offset,
self.byte_offset,
self.bit_count,
self.flags,
self.pos_invalidation_bit,
self.precision,
self.reserved1,
self.attachment_nr,
self.min_raw_value,
self.max_raw_value,
self.lower_limit,
self.upper_limit,
self.lower_ext_limit,
self.upper_ext_limit,
)
else:
fmt = v4c.FMT_CHANNEL.format(self.links_nr)
keys = (
"id",
"reserved0",
"block_len",
"links_nr",
"next_ch_addr",
"component_addr",
"name_addr",
"source_addr",
"conversion_addr",
"data_block_addr",
"unit_addr",
"comment_addr",
)
if self.attachment_nr:
keys += ("attachment_addr",)
if self.flags & v4c.FLAG_CN_DEFAULT_X:
keys += ("default_X_dg_addr", "default_X_cg_addr", "default_X_ch_addr")
keys += (
"channel_type",
"sync_type",
"data_type",
"bit_offset",
"byte_offset",
"bit_count",
"flags",
"pos_invalidation_bit",
"precision",
"reserved1",
"attachment_nr",
"min_raw_value",
"max_raw_value",
"lower_limit",
"upper_limit",
"lower_ext_limit",
"upper_ext_limit",
"sampling_rate"
)
return pack(fmt, *[getattr(self, key) for key in keys])
def __str__(self):
return f"""<Channel (name: {self.name}, unit: {self.unit}, comment: {self.comment}, address: {hex(self.address)},
conversion: {self.conversion},
source: {self.source},
fields: {', '.join(block_fields(self))})>"""
def metadata(self):
if self.block_len == v4c.CN_BLOCK_SIZE:
keys = v4c.KEYS_SIMPLE_CHANNEL
else:
keys = (
"id",
"reserved0",
"block_len",
"links_nr",
"next_ch_addr",
"component_addr",
"name_addr",
"source_addr",
"conversion_addr",
"data_block_addr",
"unit_addr",
"comment_addr",
)
if self.attachment_nr:
keys += ("attachment_addr",)
if self.flags & v4c.FLAG_CN_DEFAULT_X:
keys += ("default_X_dg_addr", "default_X_cg_addr", "default_X_ch_addr")
keys += (
"channel_type",
"sync_type",
"data_type",
"bit_offset",
"byte_offset",
"bit_count",
"flags",
"pos_invalidation_bit",
"precision",
"reserved1",
"attachment_nr",
"min_raw_value",
"max_raw_value",
"lower_limit",
"upper_limit",
"lower_ext_limit",
"upper_ext_limit",
"sampling_rate"
)
max_len = max(len(key) for key in keys)
template = f"{{: <{max_len}}}: {{}}"
metadata = []
lines = f"""
name: {self.name}
display name: {self.display_name}
address: {hex(self.address)}
comment: {self.comment}
unit: {self.unit}
""".split(
"\n"
)
for key in keys:
val = getattr(self, key)
if key.endswith("addr") or key.startswith("text_"):
lines.append(template.format(key, hex(val)))
elif isinstance(val, float):
lines.append(template.format(key, round(val, 6)))
else:
if isinstance(val, bytes):
try:
lines.append(template.format(key, val.decode()))
except:
lines.append(template.format(key, val.decode('latin-1').strip("\0")))
else:
lines.append(template.format(key, val))
if key == "data_type":
lines[-1] += f" = {v4c.DATA_TYPE_TO_STRING[self.data_type]}"
elif key == "channel_type":
lines[-1] += f" = {v4c.CHANNEL_TYPE_TO_STRING[self.channel_type]}"
elif key == "sync_type":
lines[-1] += f" = {v4c.SYNC_TYPE_TO_STRING[self.sync_type]}"
elif key == "flags":
flags = []
for fl, string in v4c.FLAG_CN_TO_STRING.items():
if self.flags & fl:
flags.append(string)
if flags:
lines[-1] += f" [0x{self.flags:X}= {', '.join(flags)}]"
for line in lines:
if not line:
metadata.append(line)
else:
for wrapped_line in wrap(line, width=120):
metadata.append(wrapped_line)
return "\n".join(metadata)
def __contains__(self, item):
return hasattr(self, item)
def __lt__(self, other):
self_byte_offset = self.byte_offset
other_byte_offset = other.byte_offset
if self_byte_offset < other_byte_offset:
result = 1
elif self_byte_offset == other_byte_offset:
self_range = self.bit_offset + self.bit_count
other_range = other.bit_offset + other.bit_count
if self_range > other_range:
result = 1
else:
result = 0
else:
result = 0
return result
class _ChannelArrayBlockBase:
__slots__ = (
"axis_channels",
"dynamic_size_channels",
"input_quantity_channels",
"output_quantity_channel",
"comparison_quantity_channel",
"axis_conversions",
"address",
"id",
"reserved0",
"block_len",
"links_nr",
"ca_type",
"storage",
"dims",
"flags",
"byte_offset_base",
"invalidation_bit_base",
)
class ChannelArrayBlock(_ChannelArrayBlockBase):
"""
Other attributes
* ``address`` - int : array block address
* ``axis_channels`` - list : list of (group index, channel index)
pairs referencing the axis of this array block
* ``axis_conversions`` - list : list of ChannelConversion or None
for each axis of this array block
* ``dynamic_size_channels`` - list : list of (group index, channel index)
pairs referencing the axis dynamic size of this array block
* ``input_quantity_channels`` - list : list of (group index, channel index)
pairs referencing the input quantity channels of this array block
* ``output_quantity_channels`` - tuple | None : (group index, channel index)
pair referencing the output quantity channel of this array block
* ``comparison_quantity_channel`` - tuple | None : (group index, channel index)
pair referencing the comparison quantity channel of this array block
"""
def __init__(self, **kwargs):
self.axis_channels = []
self.dynamic_size_channels = []
self.input_quantity_channels = []
self.output_quantity_channel = None
self.comparison_quantity_channel = None
self.axis_conversions = []
try:
self.address = address = kwargs["address"]
stream = kwargs["stream"]
mapped = kwargs.get("mapped", False) or not is_file_like(stream)
if mapped:
(self.id, self.reserved0, self.block_len, self.links_nr) = COMMON_uf(
stream, address
)
if self.id != b"##CA":
message = (
f'Expected "##CA" block @{hex(address)} but found "{self.id}"'
)
logger.exception(message)
raise MdfException(message)
nr = self.links_nr
address += COMMON_SIZE
links = unpack_from(f"<{nr}Q", stream, address)
self.composition_addr = links[0]
links = links[1:]
address += nr * 8
values = unpack_from("<2BHIiI", stream, address)
dims_nr = values[2]
(
self.ca_type,
self.storage,
self.dims,
self.flags,
self.byte_offset_base,
self.invalidation_bit_base,
) = values
address += 16
dim_sizes = unpack_from(f"<{dims_nr}Q", stream, address)
for i, size in enumerate(dim_sizes):
self[f"dim_size_{i}"] = size
stream.seek(address + dims_nr * 8)
if self.storage == v4c.CA_STORAGE_TYPE_DG_TEMPLATE:
data_links_nr = 1
for size in dim_sizes:
data_links_nr *= size
for i in range(data_links_nr):
self[f"data_link_{i}"] = links[i]
links = links[data_links_nr:]
if self.flags & v4c.FLAG_CA_DYNAMIC_AXIS:
for i in range(dims_nr):
self[f"dynamic_size_{i}_dg_addr"] = links[3 * i]
self[f"dynamic_size_{i}_cg_addr"] = links[3 * i + 1]
self[f"dynamic_size_{i}_ch_addr"] = links[3 * i + 2]
links = links[dims_nr * 3 :]
if self.flags & v4c.FLAG_CA_INPUT_QUANTITY:
for i in range(dims_nr):
self[f"input_quantity_{i}_dg_addr"] = links[3 * i]
self[f"input_quantity_{i}_cg_addr"] = links[3 * i + 1]
self[f"input_quantity_{i}_ch_addr"] = links[3 * i + 2]
links = links[dims_nr * 3 :]
if self.flags & v4c.FLAG_CA_OUTPUT_QUANTITY:
self[f"output_quantity_dg_addr"] = links[0]
self[f"output_quantity_cg_addr"] = links[1]
self[f"output_quantity_ch_addr"] = links[2]
links = links[3:]
if self.flags & v4c.FLAG_CA_COMPARISON_QUANTITY:
self[f"comparison_quantity_dg_addr"] = links[0]
self[f"comparison_quantity_cg_addr"] = links[1]
self[f"comparison_quantity_ch_addr"] = links[2]
links = links[3:]
if self.flags & v4c.FLAG_CA_AXIS:
for i in range(dims_nr):
self[f"axis_conversion_{i}"] = links[i]
links = links[dims_nr:]
if (self.flags & v4c.FLAG_CA_AXIS) and not (
self.flags & v4c.FLAG_CA_FIXED_AXIS
):
for i in range(dims_nr):
self[f"scale_axis_{i}_dg_addr"] = links[3 * i]
self[f"scale_axis_{i}_cg_addr"] = links[3 * i + 1]
self[f"scale_axis_{i}_ch_addr"] = links[3 * i + 2]
links = links[dims_nr * 3 :]
if self.flags & v4c.FLAG_CA_FIXED_AXIS:
for i in range(dims_nr):
for j in range(self[f"dim_size_{i}"]):
(value,) = FLOAT64_u(stream.read(8))
self[f"axis_{i}_value_{j}"] = value
else:
stream.seek(address)
(self.id, self.reserved0, self.block_len, self.links_nr) = unpack(
"<4sI2Q", stream.read(24)
)
if self.id != b"##CA":
message = (
f'Expected "##CA" block @{hex(address)} but found "{self.id}"'
)
logger.exception(message)
raise MdfException(message)
nr = self.links_nr
links = unpack(f"<{nr}Q", stream.read(8 * nr))
self.composition_addr = links[0]
links = links[1:]
values = unpack("<2BHIiI", stream.read(16))
dims_nr = values[2]
(
self.ca_type,
self.storage,
self.dims,
self.flags,
self.byte_offset_base,
self.invalidation_bit_base,
) = values
dim_sizes = unpack(f"<{dims_nr}Q", stream.read(8 * dims_nr))
for i, size in enumerate(dim_sizes):
self[f"dim_size_{i}"] = size
if self.storage == v4c.CA_STORAGE_TYPE_DG_TEMPLATE:
data_links_nr = 1
for size in dim_sizes:
data_links_nr *= size
for i in range(data_links_nr):
self[f"data_link_{i}"] = links[i]
links = links[data_links_nr:]
if self.flags & v4c.FLAG_CA_DYNAMIC_AXIS:
for i in range(dims_nr):
self[f"dynamic_size_{i}_dg_addr"] = links[3 * i]
self[f"dynamic_size_{i}_cg_addr"] = links[3 * i + 1]
self[f"dynamic_size_{i}_ch_addr"] = links[3 * i + 2]
links = links[dims_nr * 3 :]
if self.flags & v4c.FLAG_CA_INPUT_QUANTITY:
for i in range(dims_nr):
self[f"input_quantity_{i}_dg_addr"] = links[3 * i]
self[f"input_quantity_{i}_cg_addr"] = links[3 * i + 1]
self[f"input_quantity_{i}_ch_addr"] = links[3 * i + 2]
links = links[dims_nr * 3 :]
if self.flags & v4c.FLAG_CA_OUTPUT_QUANTITY:
self[f"output_quantity_dg_addr"] = links[0]
self[f"output_quantity_cg_addr"] = links[1]
self[f"output_quantity_ch_addr"] = links[2]
links = links[3:]
if self.flags & v4c.FLAG_CA_COMPARISON_QUANTITY:
self[f"comparison_quantity_dg_addr"] = links[0]
self[f"comparison_quantity_cg_addr"] = links[1]
self[f"comparison_quantity_ch_addr"] = links[2]
links = links[3:]
if self.flags & v4c.FLAG_CA_AXIS:
for i in range(dims_nr):
self[f"axis_conversion_{i}"] = links[i]
links = links[dims_nr:]
if (self.flags & v4c.FLAG_CA_AXIS) and not (
self.flags & v4c.FLAG_CA_FIXED_AXIS
):
for i in range(dims_nr):
self[f"scale_axis_{i}_dg_addr"] = links[3 * i]
self[f"scale_axis_{i}_cg_addr"] = links[3 * i + 1]
self[f"scale_axis_{i}_ch_addr"] = links[3 * i + 2]
links = links[dims_nr * 3 :]
if self.flags & v4c.FLAG_CA_FIXED_AXIS:
for i in range(dims_nr):
for j in range(self[f"dim_size_{i}"]):
(value,) = FLOAT64_u(stream.read(8))
self[f"axis_{i}_value_{j}"] = value
except KeyError:
self.id = b"##CA"
self.reserved0 = 0
self.address = 0
ca_type = kwargs["ca_type"]
if ca_type == v4c.CA_TYPE_ARRAY:
dims_nr = kwargs["dims"]
self.block_len = 48 + dims_nr * 8
self.links_nr = 1
self.composition_addr = 0
self.ca_type = v4c.CA_TYPE_ARRAY
self.storage = v4c.CA_STORAGE_TYPE_CN_TEMPLATE
self.dims = dims_nr
self.flags = 0
self.byte_offset_base = kwargs.get("byte_offset_base", 1)
self.invalidation_bit_base = kwargs.get("invalidation_bit_base", 0)
for i in range(dims_nr):
self[f"dim_size_{i}"] = kwargs[f"dim_size_{i}"]
elif ca_type == v4c.CA_TYPE_SCALE_AXIS:
self.block_len = 56
self.links_nr = 1
self.composition_addr = 0
self.ca_type = v4c.CA_TYPE_SCALE_AXIS
self.storage = v4c.CA_STORAGE_TYPE_CN_TEMPLATE
self.dims = 1
self.flags = 0
self.byte_offset_base = kwargs.get("byte_offset_base", 1)
self.invalidation_bit_base = kwargs.get("invalidation_bit_base", 0)
self.dim_size_0 = kwargs["dim_size_0"]
elif ca_type == v4c.CA_TYPE_LOOKUP:
flags = kwargs["flags"]
dims_nr = kwargs["dims"]
values = sum(kwargs[f"dim_size_{i}"] for i in range(dims_nr))
if flags & v4c.FLAG_CA_FIXED_AXIS:
self.block_len = 48 + dims_nr * 16 + values * 8
self.links_nr = 1 + dims_nr
self.composition_addr = 0
for i in range(dims_nr):
self[f"axis_conversion_{i}"] = 0
self.ca_type = v4c.CA_TYPE_LOOKUP
self.storage = v4c.CA_STORAGE_TYPE_CN_TEMPLATE
self.dims = dims_nr
self.flags = v4c.FLAG_CA_FIXED_AXIS | v4c.FLAG_CA_AXIS
self.byte_offset_base = kwargs.get("byte_offset_base", 1)
self.invalidation_bit_base = kwargs.get("invalidation_bit_base", 0)
for i in range(dims_nr):
self[f"dim_size_{i}"] = kwargs[f"dim_size_{i}"]
for i in range(dims_nr):
for j in range(self[f"dim_size_{i}"]):
self[f"axis_{i}_value_{j}"] = kwargs.get(
f"axis_{i}_value_{j}", j
)
else:
self.block_len = 48 + dims_nr * 5 * 8
self.links_nr = 1 + dims_nr * 4
self.composition_addr = 0
for i in range(dims_nr):
self[f"axis_conversion_{i}"] = 0
for i in range(dims_nr):
self[f"scale_axis_{i}_dg_addr"] = 0
self[f"scale_axis_{i}_cg_addr"] = 0
self[f"scale_axis_{i}_ch_addr"] = 0
self.ca_type = v4c.CA_TYPE_LOOKUP
self.storage = v4c.CA_STORAGE_TYPE_CN_TEMPLATE
self.dims = dims_nr
self.flags = v4c.FLAG_CA_AXIS
self.byte_offset_base = kwargs.get("byte_offset_base", 1)
self.invalidation_bit_base = kwargs.get("invalidation_bit_base", 0)
for i in range(dims_nr):
self[f"dim_size_{i}"] = kwargs[f"dim_size_{i}"]
def __getitem__(self, item):
return self.__getattribute__(item)
def __setitem__(self, item, value):
self.__setattr__(item, value)
def __str__(self):
return "<ChannelArrayBlock (referenced channels: {}, address: {}, fields: {})>".format(
self.axis_channels, hex(self.address), dict(self)
)
def __bytes__(self):
flags = self.flags
dims_nr = self.dims
keys = (
"id",
"reserved0",
"block_len",
"links_nr",
"composition_addr",
)
if self.storage:
dim_sizes = [self[f"dim_size_{i}"] for i in range(dims_nr)]
data_links_nr = 1
for size in dim_sizes:
data_links_nr *= size
else:
dim_sizes = []
data_links_nr = 0
if self.storage == v4c.CA_STORAGE_TYPE_DG_TEMPLATE:
keys += tuple(f"data_link_{i}" for i in range(data_links_nr))
if flags & v4c.FLAG_CA_DYNAMIC_AXIS:
for i in range(dims_nr):
keys += (
f"dynamic_size_{i}_dg_addr",
f"dynamic_size_{i}_cg_addr",
f"dynamic_size_{i}_ch_addr",
)
if flags & v4c.FLAG_CA_INPUT_QUANTITY:
for i in range(dims_nr):
keys += (
f"input_quantity_{i}_dg_addr",
f"input_quantity_{i}_cg_addr",
f"input_quantity_{i}_ch_addr",
)
if flags & v4c.FLAG_CA_OUTPUT_QUANTITY:
keys += (
f"output_quantity_dg_addr",
f"output_quantity_cg_addr",
f"output_quantity_ch_addr",
)
if flags & v4c.FLAG_CA_COMPARISON_QUANTITY:
keys += (
f"comparison_quantity_dg_addr",
f"comparison_quantity_cg_addr",
f"comparison_quantity_ch_addr",
)
if flags & v4c.FLAG_CA_AXIS:
keys += tuple(f"axis_conversion_{i}" for i in range(dims_nr))
if (flags & v4c.FLAG_CA_AXIS) and not (flags & v4c.FLAG_CA_FIXED_AXIS):
for i in range(dims_nr):
keys += (
f"scale_axis_{i}_dg_addr",
f"scale_axis_{i}_cg_addr",
f"scale_axis_{i}_ch_addr",
)
keys += (
"ca_type",
"storage",
"dims",
"flags",
"byte_offset_base",
"invalidation_bit_base",
)
keys += tuple(f"dim_size_{i}" for i in range(dims_nr))
if flags & v4c.FLAG_CA_FIXED_AXIS:
keys += tuple(
f"axis_{i}_value_{j}"
for i in range(dims_nr)
for j in range(self[f"dim_size_{i}"])
)
if self.storage:
keys += tuple(f"cycle_count_{i}" for i in range(data_links_nr))
fmt = "<4sI{}Q2BHIiI{}Q{}d{}Q".format(
self.links_nr + 2, dims_nr, sum(dim_sizes), data_links_nr
)
result = pack(fmt, *[getattr(self, key) for key in keys])
return result
class ChannelGroup:
"""*ChannelGroup* has the following attributes, that are also available as
dict like key-value pairs
CGBLOCK fields
* ``id`` - bytes : block ID; always b'##CG'
* ``reserved0`` - int : reserved bytes
* ``block_len`` - int : block bytes size
* ``links_nr`` - int : number of links
* ``next_cg_addr`` - int : next channel group address
* ``first_ch_addr`` - int : address of first channel of this channel group
* ``acq_name_addr`` - int : address of TextBLock that contains the channel
group acquisition name
* ``acq_source_addr`` - int : addres of SourceInformation that contains the
channel group source
* ``first_sample_reduction_addr`` - int : address of first SRBLOCK; this is
considered 0 since sample reduction is not yet supported
* ``comment_addr`` - int : address of TXBLOCK/MDBLOCK that contains the
channel group comment
* ``record_id`` - int : record ID for thei channel group
* ``cycles_nr`` - int : number of cycles for this channel group
* ``flags`` - int : channel group flags
* ``path_separator`` - int : ordinal for character used as path separator
* ``reserved1`` - int : reserved bytes
* ``samples_byte_nr`` - int : number of bytes used for channels samples in
the record for this channel group; this does not contain the invalidation
bytes
* ``invalidation_bytes_nr`` - int : number of bytes used for invalidation
bits by this channl group
Other attributes
* ``acq_name`` - str : acquisition name
* ``acq_source`` - SourceInformation : acquisition source information
* ``address`` - int : channel group address
* ``comment`` - str : channel group comment
"""
__slots__ = (
"address",
"acq_name",
"acq_source",
"comment",
"id",
"reserved0",
"block_len",
"links_nr",
"next_cg_addr",
"first_ch_addr",
"acq_name_addr",
"acq_source_addr",
"first_sample_reduction_addr",
"comment_addr",
"cg_master_addr",
"record_id",
"cycles_nr",
"flags",
"path_separator",
"reserved1",
"samples_byte_nr",
"invalidation_bytes_nr",
"cg_master_index",
"sampling_rate",
"unit",
)
def __init__(self, **kwargs):
self.acq_name = self.comment = ""
self.acq_source = None
self.cg_master_index = None
try:
self.address = address = kwargs["address"]
stream = kwargs["stream"]
mapped = kwargs.get("mapped", False) or not is_file_like(stream)
if mapped:
(self.id, self.reserved0, self.block_len, self.links_nr) = COMMON_uf(
stream, address
)
if self.block_len == v4c.CG_BLOCK_SIZE:
(
self.next_cg_addr,
self.first_ch_addr,
self.acq_name_addr,
self.acq_source_addr,
self.first_sample_reduction_addr,
self.comment_addr,
self.record_id,
self.cycles_nr,
self.flags,
self.path_separator,
self.reserved1,
self.samples_byte_nr,
self.invalidation_bytes_nr,
) = v4c.CHANNEL_GROUP_SHORT_uf(stream, address + COMMON_SIZE)
else:
(
self.next_cg_addr,
self.first_ch_addr,
self.acq_name_addr,
self.acq_source_addr,
self.first_sample_reduction_addr,
self.comment_addr,
self.cg_master_addr,
self.record_id,
self.cycles_nr,
self.flags,
self.path_separator,
self.reserved1,
self.samples_byte_nr,
self.invalidation_bytes_nr,
) = v4c.CHANNEL_GROUP_RM_SHORT_uf(stream, address + COMMON_SIZE)
else:
stream.seek(address)
(
self.id,
self.reserved0,
self.block_len,
self.links_nr,
) = v4c.COMMON_u(stream.read(COMMON_SIZE))
if self.block_len == v4c.CG_BLOCK_SIZE:
(
self.next_cg_addr,
self.first_ch_addr,
self.acq_name_addr,
self.acq_source_addr,
self.first_sample_reduction_addr,
self.comment_addr,
self.record_id,
self.cycles_nr,
self.flags,
self.path_separator,
self.reserved1,
self.samples_byte_nr,
self.invalidation_bytes_nr,
) = v4c.CHANNEL_GROUP_SHORT_u(
stream.read(v4c.CG_BLOCK_SIZE - COMMON_SIZE)
)
else:
(
self.next_cg_addr,
self.first_ch_addr,
self.acq_name_addr,
self.acq_source_addr,
self.first_sample_reduction_addr,
self.comment_addr,
self.cg_master_addr,
self.record_id,
self.cycles_nr,
self.flags,
self.path_separator,
self.reserved1,
self.samples_byte_nr,
self.invalidation_bytes_nr,
) = v4c.CHANNEL_GROUP_RM_SHORT_u(
stream.read(v4c.CG_RM_BLOCK_SIZE - COMMON_SIZE)
)
if self.id != b"##CG":
message = f'Expected "##CG" block @{hex(address)} but found "{self.id}"'
logger.exception(message)
raise MdfException(message)
self.acq_name = get_text_v4(self.acq_name_addr, stream, mapped=mapped)
self.comment = get_text_v4(self.comment_addr, stream, mapped=mapped)
si_map = kwargs["si_map"]
address = self.acq_source_addr
if address:
if mapped:
raw_bytes = stream[address : address + v4c.SI_BLOCK_SIZE]
else:
stream.seek(address)
raw_bytes = stream.read(v4c.SI_BLOCK_SIZE)
if raw_bytes in si_map:
source = si_map[raw_bytes]
else:
source = SourceInformation(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
tx_map=kwargs["tx_map"],
)
si_map[raw_bytes] = source
self.acq_source = source
else:
self.acq_source = None
except KeyError:
self.address = 0
self.id = b"##CG"
self.reserved0 = kwargs.get("reserved0", 0)
self.next_cg_addr = kwargs.get("next_cg_addr", 0)
self.first_ch_addr = kwargs.get("first_ch_addr", 0)
self.acq_name_addr = kwargs.get("acq_name_addr", 0)
self.acq_source_addr = kwargs.get("acq_source_addr", 0)
self.first_sample_reduction_addr = kwargs.get(
"first_sample_reduction_addr", 0
)
self.comment_addr = kwargs.get("comment_addr", 0)
self.record_id = kwargs.get("record_id", 1)
self.cycles_nr = kwargs.get("cycles_nr", 0)
self.flags = kwargs.get("flags", 0)
self.path_separator = kwargs.get("path_separator", 0)
self.reserved1 = kwargs.get("reserved1", 0)
self.samples_byte_nr = kwargs.get("samples_byte_nr", 0)
self.invalidation_bytes_nr = kwargs.get("invalidation_bytes_nr", 0)
if self.flags & v4c.FLAG_CG_REMOTE_MASTER:
self.cg_master_addr = kwargs.get("cg_master_addr", 0)
self.block_len = v4c.CG_RM_BLOCK_SIZE
self.links_nr = 7
else:
self.block_len = v4c.CG_BLOCK_SIZE
self.links_nr = 6
def __getitem__(self, item):
return self.__getattribute__(item)
def __setitem__(self, item, value):
self.__setattr__(item, value)
def to_blocks(self, address, blocks, defined_texts, si_map):
text = self.acq_name
if text:
if text in defined_texts:
self.acq_name_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.acq_name_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.acq_name_addr = 0
text = self.comment
if text:
if text in defined_texts:
self.comment_addr = defined_texts[text]
else:
meta = text.startswith("<CGcomment")
tx_block = TextBlock(text=text, meta=meta)
self.comment_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.comment_addr = 0
source = self.acq_source
if source:
address = source.to_blocks(address, blocks, defined_texts, si_map)
self.acq_source_addr = source.address
else:
self.acq_source_addr = 0
blocks.append(self)
self.address = address
address += self.block_len
return address
def __bytes__(self):
if self.flags & v4c.FLAG_CG_REMOTE_MASTER:
result = v4c.CHANNEL_GROUP_RM_p(
self.id,
self.reserved0,
self.block_len,
self.links_nr,
self.next_cg_addr,
self.first_ch_addr,
self.acq_name_addr,
self.acq_source_addr,
self.first_sample_reduction_addr,
self.comment_addr,
self.cg_master_addr,
self.record_id,
self.cycles_nr,
self.flags,
self.path_separator,
self.reserved1,
self.samples_byte_nr,
self.invalidation_bytes_nr,
)
else:
result = v4c.CHANNEL_GROUP_p(
self.id,
self.reserved0,
self.block_len,
self.links_nr,
self.next_cg_addr,
self.first_ch_addr,
self.acq_name_addr,
self.acq_source_addr,
self.first_sample_reduction_addr,
self.comment_addr,
self.record_id,
self.cycles_nr,
self.flags,
self.path_separator,
self.reserved1,
self.samples_byte_nr,
self.invalidation_bytes_nr,
)
return result
def metadata(self):
keys = (
"id",
"reserved0",
"block_len",
"links_nr",
"next_cg_addr",
"first_ch_addr",
"acq_name_addr",
"acq_source_addr",
"first_sample_reduction_addr",
"comment_addr",
)
if self.block_len == v4c.CG_RM_BLOCK_SIZE:
keys += ("cg_master_addr",)
keys += (
"record_id",
"cycles_nr",
"flags",
"path_separator",
"reserved1",
"samples_byte_nr",
"invalidation_bytes_nr",
)
max_len = max(len(key) for key in keys)
template = f"{{: <{max_len}}}: {{}}"
metadata = []
lines = f"""
name: {self.acq_name}
address: {hex(self.address)}
comment: {self.comment}
""".split(
"\n"
)
for key in keys:
val = getattr(self, key)
if key.endswith("addr") or key.startswith("text_"):
lines.append(template.format(key, hex(val)))
elif isinstance(val, float):
lines.append(template.format(key, round(val, 6)))
else:
if isinstance(val, bytes):
try:
lines.append(template.format(key, val.decode()))
except:
lines.append(template.format(key, val.decode('latin-1').strip("\0")))
else:
lines.append(template.format(key, val))
if key == "flags":
flags = []
for fl, string in v4c.FLAG_CG_TO_STRING.items():
if self.flags & fl:
flags.append(string)
if flags:
lines[-1] += f" [0x{self.flags:X}= {', '.join(flags)}]"
elif key == "path_separator":
if self.path_separator:
sep = pack("<H", self.path_separator).decode("utf-16")
lines[-1] += f" (= '{sep}')"
else:
lines[-1] += f" (= <undefined>)"
for line in lines:
if not line:
metadata.append(line)
else:
for wrapped_line in wrap(line, width=120):
metadata.append(wrapped_line)
return "\n".join(metadata)
class _ChannelConversionBase:
__slots__ = (
"name",
"unit",
"comment",
"formula",
"referenced_blocks",
"address",
"id",
"reserved0",
"block_len",
"links_nr",
"name_addr",
"unit_addr",
"comment_addr",
"inv_conv_addr",
"conversion_type",
"precision",
"flags",
"ref_param_nr",
"val_param_nr",
"min_phy_value",
"max_phy_value",
"a",
"b",
"P1",
"P2",
"P3",
"P4",
"P5",
"P6",
)
class ChannelConversion(_ChannelConversionBase):
"""*ChannelConversion* has the following attributes, that are also available as
dict like key-value pairs
CCBLOCK common fields
* ``id`` - bytes : block ID; always b'##CG'
* ``reserved0`` - int : reserved bytes
* ``block_len`` - int : block bytes size
* ``links_nr`` - int : number of links
* ``name_addr`` - int : address of TXBLOCK that contains the
conversion name
* ``unit_addr`` - int : address of TXBLOCK that contains the
conversion unit
* ``comment_addr`` - int : address of TXBLOCK/MDBLOCK that contains the
conversion comment
* ``inv_conv_addr`` int : address of invers conversion
* ``conversion_type`` int : integer code for conversion type
* ``precision`` - int : integer code for precision
* ``flags`` - int : conversion block flags
* ``ref_param_nr`` - int : number fo referenced parameters (linked
parameters)
* ``val_param_nr`` - int : number of value parameters
* ``min_phy_value`` - float : minimum physical channel value
* ``max_phy_value`` - float : maximum physical channel value
CCBLOCK specific fields
* linear conversion
* ``a`` - float : factor
* ``b`` - float : offset
* rational conversion
* ``P1`` to ``P6`` - float : parameters
* algebraic conversion
* ``formula_addr`` - address of TXBLOCK that contains the
the algebraic conversion formula
* tabluar conversion with or without interpolation
* ``raw_<N>`` - float : N-th raw value
* ``phys_<N>`` - float : N-th physical value
* tabular range conversion
* ``lower_<N>`` - float : N-th lower value
* ``upper_<N>`` - float : N-th upper value
* ``phys_<N>`` - float : N-th physical value
* tabular value to text conversion
* ``val_<N>`` - float : N-th raw value
* ``text_<N>`` - int : address of N-th TXBLOCK that
contains the physical value
* ``default`` - int : address of TXBLOCK that contains
the default physical value
* tabular range to text conversion
* ``lower_<N>`` - float : N-th lower value
* ``upper_<N>`` - float : N-th upper value
* ``text_<N>`` - int : address of N-th TXBLOCK that
contains the physical value
* ``default`` - int : address of TXBLOCK that contains
the default physical value
* text to value conversion
* ``val_<N>`` - float : N-th physical value
* ``text_<N>`` - int : address of N-th TXBLOCK that
contains the raw value
* ``val_default`` - float : default physical value
* text tranfosrmation (translation) conversion
* ``input_<N>_addr`` - int : address of N-th TXBLOCK that
contains the raw value
* ``output_<N>_addr`` - int : address of N-th TXBLOCK that
contains the physical value
* ``default_addr`` - int : address of TXBLOCK that contains
the default physical value
Other attributes
* ``address`` - int : channel conversion address
* ``comment`` - str : channel conversion comment
* ``formula`` - str : algebraic conversion formula; default ''
* ``referenced_blocks`` - dict : dict of refenced blocks; can be TextBlock
objects for value to text, and text to text conversions; for partial
conversions the referenced blocks can be ChannelConversion obejct as well
* ``name`` - str : channel conversion name
* ``unit`` - str : channel conversion unit
"""
def __init__(self, **kwargs):
if "stream" in kwargs:
stream = kwargs["stream"]
mapped = kwargs["mapped"]
try:
self.address = address = kwargs["address"]
block = kwargs["raw_bytes"]
(self.id, self.reserved0, self.block_len, self.links_nr) = COMMON_uf(
block
)
if self.id != b"##CC":
message = (
f'Expected "##CC" block @{hex(address)} but found "{self.id}"'
)
logger.exception(message)
raise MdfException(message)
block = block[COMMON_SIZE:]
except KeyError:
self.address = address = kwargs["address"]
stream.seek(address)
(self.id, self.reserved0, self.block_len, self.links_nr) = COMMON_u(
stream.read(COMMON_SIZE)
)
if self.id != b"##CC":
message = (
f'Expected "##CC" block @{hex(address)} but found "{self.id}"'
)
logger.exception(message)
raise MdfException(message)
block = stream.read(self.block_len - COMMON_SIZE)
(conv,) = UINT8_uf(block, self.links_nr * 8)
if conv == v4c.CONVERSION_TYPE_NON:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = v4c.CONVERSION_NONE_INIT_u(block)
elif conv == v4c.CONVERSION_TYPE_LIN:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
self.b,
self.a,
) = v4c.CONVERSION_LINEAR_INIT_u(block)
elif conv == v4c.CONVERSION_TYPE_RAT:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
self.P1,
self.P2,
self.P3,
self.P4,
self.P5,
self.P6,
) = unpack(v4c.FMT_CONVERSION_RAT_INIT, block)
elif conv == v4c.CONVERSION_TYPE_ALG:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
self.formula_addr,
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack(v4c.FMT_CONVERSION_ALGEBRAIC_INIT, block)
elif conv in (v4c.CONVERSION_TYPE_TABI, v4c.CONVERSION_TYPE_TAB):
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from(v4c.FMT_CONVERSION_NONE_INIT, block)
nr = self.val_param_nr
values = unpack_from(f"<{nr}d", block, 56)
for i in range(nr // 2):
self[f"raw_{i}"], self[f"phys_{i}"] = (
values[i * 2],
values[2 * i + 1],
)
elif conv == v4c.CONVERSION_TYPE_RTAB:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from(v4c.FMT_CONVERSION_NONE_INIT, block)
nr = self.val_param_nr
values = unpack_from(f"<{nr}d", block, 56)
for i in range((nr - 1) // 3):
(self[f"lower_{i}"], self[f"upper_{i}"], self[f"phys_{i}"]) = (
values[i * 3],
values[3 * i + 1],
values[3 * i + 2],
)
(self.default,) = FLOAT64_u(block[-8:])
elif conv == v4c.CONVERSION_TYPE_TABX:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
) = unpack_from("<4Q", block)
links_nr = self.links_nr - 4
links = unpack_from(f"<{links_nr}Q", block, 32)
for i, link in enumerate(links[:-1]):
self[f"text_{i}"] = link
self.default_addr = links[-1]
(
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from("<2B3H2d", block, 32 + links_nr * 8)
values = unpack_from(f"<{links_nr - 1}d", block, 32 + links_nr * 8 + 24)
for i, val in enumerate(values):
self[f"val_{i}"] = val
elif conv == v4c.CONVERSION_TYPE_RTABX:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
) = unpack_from("<4Q", block)
links_nr = self.links_nr - 4
links = unpack_from(f"<{links_nr}Q", block, 32)
for i, link in enumerate(links[:-1]):
self[f"text_{i}"] = link
self.default_addr = links[-1]
(
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from("<2B3H2d", block, 32 + links_nr * 8)
values = unpack_from(
f"<{self.val_param_nr}d", block, 32 + links_nr * 8 + 24
)
self.default_lower = self.default_upper = 0
for i in range(self.val_param_nr // 2):
j = 2 * i
self[f"lower_{i}"] = values[j]
self[f"upper_{i}"] = values[j + 1]
elif conv == v4c.CONVERSION_TYPE_TTAB:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
) = unpack_from("<4Q", block)
links_nr = self.links_nr - 4
links = unpack_from(f"<{links_nr}Q", block, 32)
for i, link in enumerate(links):
self[f"text_{i}"] = link
(
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from("<2B3H2d", block, 32 + links_nr * 8)
values = unpack_from(
f"<{self.val_param_nr}d", block, 32 + links_nr * 8 + 24
)
for i, val in enumerate(values[:-1]):
self[f"val_{i}"] = val
self.val_default = values[-1]
elif conv == v4c.CONVERSION_TYPE_TRANS:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
) = unpack_from("<4Q", block)
links_nr = self.links_nr - 4
links = unpack_from(f"<{links_nr}Q", block, 32)
for i in range((links_nr - 1) // 2):
j = 2 * i
self[f"input_{i}_addr"] = links[j]
self[f"output_{i}_addr"] = links[j + 1]
self.default_addr = links[-1]
(
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from("<2B3H2d", block, 32 + links_nr * 8)
elif conv == v4c.CONVERSION_TYPE_BITFIELD:
(
self.name_addr,
self.unit_addr,
self.comment_addr,
self.inv_conv_addr,
) = unpack_from("<4Q", block)
links_nr = self.links_nr - 4
links = unpack_from(f"<{links_nr}Q", block, 32)
for i, link in enumerate(links):
self[f"text_{i}"] = link
(
self.conversion_type,
self.precision,
self.flags,
self.ref_param_nr,
self.val_param_nr,
self.min_phy_value,
self.max_phy_value,
) = unpack_from("<2B3H2d", block, 32 + links_nr * 8)
values = unpack_from(
f"<{self.val_param_nr}Q", block, 32 + links_nr * 8 + 24
)
for i, val in enumerate(values):
self[f"mask_{i}"] = val
self.referenced_blocks = None
tx_map = kwargs["tx_map"]
addr = self.name_addr
if addr in tx_map:
self.name = tx_map[addr]
else:
self.name = get_text_v4(addr, stream, mapped=mapped)
tx_map[addr] = self.name
addr = self.unit_addr
if addr in tx_map:
self.unit = tx_map[addr]
else:
self.unit = get_text_v4(addr, stream, mapped=mapped)
tx_map[addr] = self.unit
addr = self.comment_addr
if addr in tx_map:
self.comment = tx_map[addr]
else:
self.comment = get_text_v4(addr, stream, mapped=mapped)
tx_map[addr] = self.comment
conv_type = conv
if conv_type == v4c.CONVERSION_TYPE_ALG:
self.formula = get_text_v4(
self.formula_addr, stream, mapped=mapped
).replace("x", "X")
else:
self.formula = ""
if conv_type in v4c.TABULAR_CONVERSIONS:
refs = self.referenced_blocks = {}
if conv_type in (
v4c.CONVERSION_TYPE_TTAB,
v4c.CONVERSION_TYPE_BITFIELD,
):
tabs = self.links_nr - 4
else:
tabs = self.links_nr - 4 - 1
for i in range(tabs):
address = self[f"text_{i}"]
if address:
if address in tx_map:
txt = tx_map[address]
if not isinstance(txt, bytes):
txt = txt.encode("utf-8")
refs[f"text_{i}"] = txt
else:
stream.seek(address)
_id = stream.read(4)
if _id == b"##TX":
block = get_text_v4(
address=address,
stream=stream,
mapped=mapped,
decode=False,
)
tx_map[address] = block
refs[f"text_{i}"] = block
elif _id == b"##CC":
block = ChannelConversion(
address=address,
stream=stream,
mapped=mapped,
tx_map=tx_map,
)
refs[f"text_{i}"] = block
else:
message = f'Expected "##TX" or "##CC" block @{hex(address)} but found "{_id}"'
logger.exception(message)
raise MdfException(message)
else:
refs[f"text_{i}"] = b""
if conv_type not in (
v4c.CONVERSION_TYPE_TTAB,
v4c.CONVERSION_TYPE_BITFIELD,
):
address = self.default_addr
if address:
if address in tx_map:
txt = tx_map[address] or b""
if not isinstance(txt, bytes):
txt = txt.encode("utf-8")
refs["default_addr"] = txt
else:
stream.seek(address)
_id = stream.read(4)
if _id == b"##TX":
block = get_text_v4(
address=address,
stream=stream,
mapped=mapped,
decode=False,
)
tx_map[address] = block
refs["default_addr"] = block
elif _id == b"##CC":
block = ChannelConversion(
address=address,
stream=stream,
mapped=mapped,
tx_map=tx_map,
)
refs["default_addr"] = block
else:
message = f'Expected "##TX" or "##CC" block @{hex(address)} but found "{_id}"'
logger.exception(message)
raise MdfException(message)
else:
refs["default_addr"] = b""
elif conv_type == v4c.CONVERSION_TYPE_TRANS:
refs = self.referenced_blocks = {}
# link_nr - common links (4) - default text link (1)
for i in range((self.links_nr - 4 - 1) // 2):
for key in (f"input_{i}_addr", f"output_{i}_addr"):
address = self[key]
if address:
if address in tx_map:
txt = tx_map[address] or b""
if not isinstance(txt, bytes):
txt = txt.encode("utf-8")
refs[key] = txt
else:
block = get_text_v4(
address=address,
stream=stream,
mapped=mapped,
decode=False,
)
tx_map[address] = block
refs[key] = block
else:
refs[key] = b""
address = self.default_addr
if address:
if address in tx_map:
txt = tx_map[address] or b""
if not isinstance(txt, bytes):
txt = txt.encode("utf-8")
refs["default_addr"] = txt
else:
block = get_text_v4(
address=address,
stream=stream,
mapped=mapped,
decode=False,
)
refs["default_addr"] = block
tx_map[address] = block
else:
refs["default_addr"] = b""
else:
self.name = kwargs.get("name", "")
self.unit = kwargs.get("unit", "")
self.comment = kwargs.get("comment", "")
self.formula = kwargs.get("formula", "")
self.referenced_blocks = None
self.address = 0
self.id = b"##CC"
self.reserved0 = 0
if kwargs["conversion_type"] == v4c.CONVERSION_TYPE_NON:
self.block_len = v4c.CC_NONE_BLOCK_SIZE
self.links_nr = 4
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = 0
self.conversion_type = v4c.CONVERSION_TYPE_NON
self.precision = 1
self.flags = 0
self.ref_param_nr = 0
self.val_param_nr = 0
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_LIN:
self.block_len = v4c.CC_LIN_BLOCK_SIZE
self.links_nr = 4
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
self.conversion_type = v4c.CONVERSION_TYPE_LIN
self.precision = kwargs.get("precision", 1)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = 0
self.val_param_nr = 2
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.b = kwargs["b"]
self.a = kwargs["a"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_ALG:
self.block_len = v4c.CC_ALG_BLOCK_SIZE
self.links_nr = 5
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
self.formula_addr = kwargs.get("formula_addr", 0)
self.conversion_type = v4c.CONVERSION_TYPE_ALG
self.precision = kwargs.get("precision", 1)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = 1
self.val_param_nr = 0
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
elif kwargs["conversion_type"] in (
v4c.CONVERSION_TYPE_TAB,
v4c.CONVERSION_TYPE_TABI,
):
nr = kwargs["val_param_nr"]
self.block_len = 80 + 8 * nr
self.links_nr = 4
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
self.conversion_type = kwargs["conversion_type"]
self.precision = kwargs.get("precision", 1)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = 0
self.val_param_nr = nr
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range(nr // 2):
self[f"raw_{i}"] = kwargs[f"raw_{i}"]
self[f"phys_{i}"] = kwargs[f"phys_{i}"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_RTAB:
self.block_len = kwargs["val_param_nr"] * 8 + 80
self.links_nr = 4
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
self.conversion_type = v4c.CONVERSION_TYPE_RTAB
self.precision = kwargs.get("precision", 0)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = 0
self.val_param_nr = kwargs["val_param_nr"]
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range((kwargs["val_param_nr"] - 1) // 3):
self[f"lower_{i}"] = kwargs[f"lower_{i}"]
self[f"upper_{i}"] = kwargs[f"upper_{i}"]
self[f"phys_{i}"] = kwargs[f"phys_{i}"]
self.default = kwargs["default"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_RAT:
self.block_len = 80 + 6 * 8
self.links_nr = 4
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
self.conversion_type = kwargs["conversion_type"]
self.precision = kwargs.get("precision", 1)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = 0
self.val_param_nr = kwargs.get("val_param_nr", 6)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range(1, 7):
self[f"P{i}"] = kwargs[f"P{i}"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_TABX:
self.referenced_blocks = {}
nr = kwargs["ref_param_nr"] - 1
self.block_len = (nr * 8 * 2) + 88
self.links_nr = nr + 5
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
for i in range(nr):
key = f"text_{i}"
self[key] = 0
self.referenced_blocks[key] = kwargs[key]
self.default_addr = 0
key = "default_addr"
if "default_addr" in kwargs:
default = kwargs["default_addr"]
else:
default = kwargs.get("default", b"")
self.referenced_blocks[key] = default
self.conversion_type = v4c.CONVERSION_TYPE_TABX
self.precision = kwargs.get("precision", 0)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = nr + 1
self.val_param_nr = nr
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range(nr):
self[f"val_{i}"] = kwargs[f"val_{i}"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_RTABX:
self.referenced_blocks = {}
nr = kwargs["ref_param_nr"] - 1
self.block_len = (nr * 8 * 3) + 88
self.links_nr = nr + 5
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
for i in range(nr):
key = f"text_{i}"
self[key] = 0
self.referenced_blocks[key] = kwargs[key]
self.default_addr = 0
self.default_lower = self.default_upper = 0
if "default_addr" in kwargs:
default = kwargs["default_addr"]
else:
default = kwargs.get("default", b"")
if isinstance(default, bytes) and b"{X}" in default:
default = (
default.decode("latin-1").replace("{X}", "X").split('"')[1]
)
default = ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_ALG, formula=default
)
self.referenced_blocks["default_addr"] = default
else:
self.referenced_blocks["default_addr"] = default
self.conversion_type = v4c.CONVERSION_TYPE_RTABX
self.precision = kwargs.get("precision", 0)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = nr + 1
self.val_param_nr = nr * 2
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range(nr):
self[f"lower_{i}"] = kwargs[f"lower_{i}"]
self[f"upper_{i}"] = kwargs[f"upper_{i}"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_TTAB:
self.block_len = ((kwargs["links_nr"] - 4) * 8 * 2) + 88
self.links_nr = kwargs["links_nr"]
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
for i in range(kwargs["links_nr"] - 4):
self[f"text_{i}"] = kwargs.get(f"text_{i}", 0)
self.conversion_type = v4c.CONVERSION_TYPE_TTAB
self.precision = kwargs.get("precision", 0)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = kwargs["links_nr"] - 4
self.val_param_nr = kwargs["links_nr"] - 4 + 1
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range(kwargs["links_nr"] - 4):
self[f"val_{i}"] = kwargs[f"val_{i}"]
self.val_default = kwargs["val_default"]
elif kwargs["conversion_type"] == v4c.CONVERSION_TYPE_BITFIELD:
self.referenced_blocks = {}
nr = kwargs["val_param_nr"]
self.block_len = (nr * 8 * 2) + 80
self.links_nr = nr + 4
self.name_addr = kwargs.get("name_addr", 0)
self.unit_addr = kwargs.get("unit_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.inv_conv_addr = kwargs.get("inv_conv_addr", 0)
for i in range(nr):
key = f"text_{i}"
self[key] = 0
self.referenced_blocks[key] = kwargs[key]
self.conversion_type = v4c.CONVERSION_TYPE_BITFIELD
self.precision = kwargs.get("precision", 0)
self.flags = kwargs.get("flags", 0)
self.ref_param_nr = nr
self.val_param_nr = nr
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
for i in range(nr):
self[f"mask_{i}"] = kwargs[f"mask_{i}"]
else:
message = "Conversion {} dynamic creation not implementated"
message = message.format(kwargs["conversion_type"])
logger.exception(message)
raise MdfException(message)
def to_blocks(self, address, blocks, defined_texts, cc_map):
text = self.name
if text:
if text in defined_texts:
self.name_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.name_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.name_addr = 0
text = self.unit
if text:
if text in defined_texts:
self.unit_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.unit_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.unit_addr = 0
if self.conversion_type == v4c.CONVERSION_TYPE_ALG:
text = self.formula
if text:
if text in defined_texts:
self.formula_addr = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self.formula_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.formula_addr = 0
text = self.comment
if text:
if text in defined_texts:
self.comment_addr = defined_texts[text]
else:
meta = text.startswith("<CCcomment")
tx_block = TextBlock(text=text, meta=meta)
self.comment_addr = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self.comment_addr = 0
if self.referenced_blocks:
for key, block in self.referenced_blocks.items():
if block:
if isinstance(block, ChannelConversion):
address = block.to_blocks(
address, blocks, defined_texts, cc_map
)
self[key] = block.address
else:
text = block
if text in defined_texts:
self[key] = defined_texts[text]
else:
block = TextBlock(text=text)
defined_texts[text] = address
blocks.append(block)
self[key] = address
address += block["block_len"]
else:
self[key] = 0
bts = bytes(self)
if bts in cc_map:
self.address = cc_map[bts]
else:
blocks.append(bts)
self.address = address
cc_map[bts] = address
address += self.block_len
return address
def convert(self, values):
conversion_type = self.conversion_type
if conversion_type == v4c.CONVERSION_TYPE_NON:
pass
elif conversion_type == v4c.CONVERSION_TYPE_LIN:
a = self.a
b = self.b
if (a, b) != (1, 0):
if values.dtype.names:
names = values.dtype.names
name = names[0]
vals = values[name]
vals = vals * a
if b:
vals += b
values = np.core.records.fromarrays(
[vals] + [values[name] for name in names[1:]],
dtype=[(name, vals.dtype, vals.shape[1:])]
+ [
(name, values[name].dtype, values[name].shape[1:])
for name in names[1:]
],
)
else:
values = values * a
if b:
values += b
elif conversion_type == v4c.CONVERSION_TYPE_RAT:
P1 = self.P1
P2 = self.P2
P3 = self.P3
P4 = self.P4
P5 = self.P5
P6 = self.P6
names = values.dtype.names
if names:
name = names[0]
vals = values[name]
if (P1, P4, P5, P6) == (0, 0, 0, 1):
if (P2, P3) != (1, 0):
vals = values[name] * P2
if P3:
vals = vals + P3
elif (P3, P4, P5, P6) == (0, 0, 1, 0):
if (P1, P2) != (1, 0):
vals = values[name] * P1
if P2:
vals = vals + P2
else:
X = vals
try:
vals = evaluate(v4c.CONV_RAT_TEXT)
except TypeError:
vals = (P1 * X ** 2 + P2 * X + P3) / (P4 * X ** 2 + P5 * X + P6)
values = np.core.records.fromarrays(
[vals] + [values[name] for name in names[1:]],
dtype=[(name, vals.dtype, vals.shape[1:])]
+ [
(name, values[name].dtype, values[name].shape[1:])
for name in names[1:]
],
)
else:
X = values
if (P1, P4, P5, P6) == (0, 0, 0, 1):
if (P2, P3) != (1, 0):
values = values * P2
if P3:
values = values + P3
elif (P3, P4, P5, P6) == (0, 0, 1, 0):
if (P1, P2) != (1, 0):
values = values * P1
if P2:
values += P2
else:
try:
values = evaluate(v4c.CONV_RAT_TEXT)
except TypeError:
values = (P1 * X ** 2 + P2 * X + P3) / (
P4 * X ** 2 + P5 * X + P6
)
elif conversion_type == v4c.CONVERSION_TYPE_ALG:
X = values
values = evaluate(self.formula)
elif conversion_type in (v4c.CONVERSION_TYPE_TABI, v4c.CONVERSION_TYPE_TAB):
nr = self.val_param_nr // 2
raw_vals = np.array([self[f"raw_{i}"] for i in range(nr)])
phys = np.array([self[f"phys_{i}"] for i in range(nr)])
if conversion_type == v4c.CONVERSION_TYPE_TABI:
values = np.interp(values, raw_vals, phys)
else:
dim = raw_vals.shape[0]
inds = np.searchsorted(raw_vals, values)
inds[inds >= dim] = dim - 1
inds2 = inds - 1
inds2[inds2 < 0] = 0
cond = np.abs(values - raw_vals[inds]) >= np.abs(
values - raw_vals[inds2]
)
values = np.where(cond, phys[inds2], phys[inds])
elif conversion_type == v4c.CONVERSION_TYPE_RTAB:
nr = (self.val_param_nr - 1) // 3
lower = np.array([self[f"lower_{i}"] for i in range(nr)])
upper = np.array([self[f"upper_{i}"] for i in range(nr)])
phys = np.array([self[f"phys_{i}"] for i in range(nr)])
default = self.default
if values.dtype.kind == "f":
idx1 = np.searchsorted(lower, values, side="right") - 1
idx2 = np.searchsorted(upper, values, side="right")
else:
idx1 = np.searchsorted(lower, values, side="right") - 1
idx2 = np.searchsorted(upper, values, side="right") - 1
idx_ne = np.nonzero(idx1 != idx2)[0]
idx_eq = np.nonzero(idx1 == idx2)[0]
new_values = np.zeros(len(values), dtype=phys.dtype)
if len(idx_ne):
new_values[idx_ne] = default
if len(idx_eq):
new_values[idx_eq] = phys[idx1[idx_eq]]
values = new_values
elif conversion_type == v4c.CONVERSION_TYPE_TABX:
nr = self.val_param_nr
raw_vals = [self[f"val_{i}"] for i in range(nr)]
phys = [self.referenced_blocks[f"text_{i}"] for i in range(nr)]
default = self.referenced_blocks["default_addr"]
x = sorted(zip(raw_vals, phys))
raw_vals = np.array([e[0] for e in x], dtype="<i8")
phys = [e[1] for e in x]
names = values.dtype.names
if names:
name = names[0]
vals = values[name]
shape = vals.shape
vals = vals.flatten()
ret = np.array([None] * len(vals), dtype="O")
idx1 = np.searchsorted(raw_vals, vals, side="right") - 1
idx2 = np.searchsorted(raw_vals, vals, side="left")
idx = | np.argwhere(idx1 != idx2) | numpy.argwhere |
# -*- coding: utf-8 -*-
"""
Functions related to my thesis of forward and inverse modelling of terrestrial
cosmogenic nuclides to detect past glaciations.
The calculations are based on Vermeesch 2007.
Forward function calculates nuclide concentrations with depth.
Find_times function chooses randomly times that are testes in Inverse function.
<NAME> 5.5.2020
"""
import numpy as np
def forward(isotope, time_ice, time_degla ,block_erosion, const_erosion):
'''
Function to calculate nuclide concentration with depth.
Parameters:
isotope -- 1 Be-10, 2 Al-26, 3 C-14
time_ice -- array for ice coverage [ka]
time_degla -- array for no ice coverage [ka]
block_erosion -- array the amount of erosion instantly after glaciation [m]
const_erosion -- float, constant erosion rate during interglacial [cm/a]
Output:
z -- depth [m]
N_final -- final number of nuclides [kg of quartz]
'''
# Constants
rho = 2650 # kg/m3
depth_m = 10 # model depth, m
Ln = 160 # g/cm2 Vertical attenuation length, neutrons, Gosse 2001, Vermeesch 2007
Lsm1 = 738 # g/cm2 Vertical attenuation length, slow muons, Vermeesch 2007
Lsm2 = 2688 # g/cm2 Vertical attenuation length, slow muons, Vermeesch 2007
Lfm = 4360 # g/cm2 Vertical attenuation length, fast muons, Vermeesch 2007
# Remname variables
erosion = block_erosion
ec = const_erosion # constant erosion cm/a
# Isotope related constants
if (isotope == 1):
# Be-10
P_0_g = 3.95 # Production rate, atoms/g, Stroeven et al.2015
t_half = 1.387e6 # half-life, a, Korschinek et al. 2010
name = 'Be'
# Relative production
F0 = 0.9724 # Neutrons
F1 = 0.0186 # Slow muons
F2 = 0.004 # Slow muons
F3 = 0.005 # Fast muons
elif (isotope == 2):
# Al-26
P_0_g = 26.71 # Production rate, atoms/g, Stroeven et al. 2016,
t_half = 7.05e5 # half-life, a, Norris 1983
name = 'Al'
# Relative production
F0 = 0.9655 # Neutrons
F1 = 0.0233 # Slow muons
F2 = 0.005 # Slow muons
F3 = 0.0062 # Fast muons
elif (isotope == 3):
# C-14
P_0_g = 15.5 # Production rate, atoms/g, Miller 2006
t_half = 5730 # half-life, a, Dunai 2010
name = 'C'
# Relative production
F0 = 0.83 # Neutrons
F1 = 0.0691 # Slow muons
F2 = 0.0809 # Slow muons
F3 = 0.02 # Fast muons
# Time arrays from ka to years
ti = time_ice*1e3 # a
td = time_degla*1e3 # a
#If the first timestep is glaciation > no nuclides formed > remove the first step
if (len(ti)>len(td)):
ti = np.delete(ti,0)
# Unit conversions to SI
P_0 = P_0_g * 1000 # atoms/kg/a
L0 = Ln*10 # kg/m2
L1 = Lsm1*10
L2 = Lsm2*10
L3 = Lfm*10
# Decay constant
lambda1 = np.log(2)/t_half
# Arrays
spacing = 0.001 # Spacing for arrays
z = np.arange(-0.1,depth_m,spacing) # Depth (m)
N = np.zeros(len(z)) # Number of nuclides
N_decay = np.zeros(len(z)) # Decay during glaciation
N_final = np.zeros(len(z)) # After every step
N_erosion = np.zeros(len(z)) # After erosion and glaciation
N_ex = np.zeros(len(z)) # After exposure
neu = np.zeros(len(z)) # Neutrons
slow_muon1 = np.zeros(len(z)) # Slow muons
slow_muon2 = np.zeros(len(z)) # Slow muons
fast_muon = np.zeros(len(z)) # Fast muons
# Loop for glacial cycle: exposure, decay, erosion
for i in range(len(ti)-1):
# Exposure
t_ex = td[i] - ti[i]
# Glaciation
t_gla = ti[i] - ti[i+1]
# Production paths
neu = F0/(lambda1 + ec*rho/L0) * np.exp(-z*rho/L0) * \
(1 - np.exp(-(lambda1 + ec*rho/L0)*t_ex))
slow_muon1 = F1/(lambda1 + ec*rho/L1) * np.exp(-z*rho/L1) * \
(1 - np.exp(-(lambda1 + ec*rho/L1)*t_ex))
slow_muon2 = F2/(lambda1 + ec*rho/L2) * np.exp(-z*rho/L2) * \
(1 - np.exp(-(lambda1 + ec*rho/L2)*t_ex))
fast_muon = F3/(lambda1 + ec*rho/L3) * np.exp(-z*rho/L3) * \
(1 - np.exp(-(lambda1 + ec*rho/L3)*t_ex))
# Total concentration after exposure
N_ex = P_0 * (neu + slow_muon1 + slow_muon2 + fast_muon) - \
(N-N*np.exp(-lambda1*t_ex))
for j in range(len(z)):
# Number of nuclides after glaciation
N_decay[j] = N_ex[j]*np.exp(-lambda1*t_gla)
# Index of last value
N_idx = j
#Index of erosion
idx = 0
#Erosion
# Do not calculate if there is no erosion
if erosion[i] != 0:
# FFind the index of erosion depth. Depth rounded to 4 decimals
a = np.where(np.around(z,4)==erosion[i])
idx = a[0][0]
for j in range(len(z)):
if ((j+idx) <= N_idx):
#Inherited nuclides are transferred
new_idx = j+idx
N_erosion[j] = N_decay[new_idx]
else:
#If no inheritance, set to 0
N_erosion[j] = 0
else:
N_erosion = N_decay
# Rename for the next loop
N = N_erosion
# Final exposure
t_ex = td[-1]
# Production pathways
neu = F0/(lambda1 + ec*rho/L0) * np.exp(-z*rho/L0) * \
(1 - np.exp(-(lambda1 + ec*rho/L0)*t_ex))
slow_muon1 = F1/(lambda1 + ec*rho/L1) * np.exp(-z*rho/L1) * \
(1 - np.exp(-(lambda1 + ec*rho/L1)*t_ex))
slow_muon2 = F2/(lambda1 + ec*rho/L2) * np.exp(-z*rho/L2) * \
(1 - np.exp(-(lambda1 + ec*rho/L2)*t_ex))
fast_muon = F3/(lambda1 + ec*rho/L3) * np.exp(-z*rho/L3) * \
(1 - np.exp(-(lambda1 + ec*rho/L3)*t_ex))
# Final concentration
N_final = P_0 * (neu + slow_muon1 + slow_muon2 + fast_muon) +\
N* | np.exp(-lambda1*t_ex) | numpy.exp |
"""
ckwg +31
Copyright 2016-2017 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Interface to vital::homography
"""
import six
import ctypes
import collections
import numpy
from vital.exceptions.math import PointMapsToInfinityException
from vital.types import (
EigenArray
)
from vital.util import VitalObject, TYPE_NAME_MAP
class Homography (VitalObject):
@classmethod
def from_matrix(cls, m, datatype=ctypes.c_double):
"""
Create a homography from an existing 3x3 matrix.
If the data type of the matrix given is not the same as ``datatype``,
it will be automatically converted.
:param m: Matrix to base the new homography on. This should be a 3x3
matrix.
:type m: collections.Iterable[collections.Iterable[float]] | vital.types.EigenArray
:param datatype: Type to store data in the homography.
:type datatype: ctypes._SimpleCData
:return: New homography instance whose transform is equal to the given
matrix.
:rtype: Homography
"""
# noinspection PyProtectedMember
tchar = datatype._type_
m = EigenArray.from_iterable(m, datatype, (3, 3))
cptr = cls._call_cfunc(
'vital_homography_%s_new_from_matrix' % tchar,
[EigenArray.c_ptr_type(3, 3, datatype)], [m],
Homography.c_ptr_type()
)
return Homography(from_cptr=cptr)
@classmethod
def from_translation(cls, dx, dy, datatype=ctypes.c_double):
"""
Return homography that represents a translation.
:param dx: Homography will displace input points by this amount along
the x-axis.
:type dx: float | double | int
:param dy: Homography will displace input points by this value along
the y-axis.
:type dy: float | double | int
:return: New homography instance.
:rtype: Homography
"""
m = | numpy.matrix([[1, 0, dx], [0, 1, dy], [0, 0, 1]]) | numpy.matrix |
import numpy as np
import scipy
from scipy import fftpack, signal, integrate
# This file contains all of the functions necessary to perform iterative time-domain deconvolution
# as outlined in Ligorria & Ammon 1999.
# --------------------------------------------------------------------------------------------------
# Last updated 10/22/2019 by <EMAIL>
# --------------------------------------------------------------------------------------------------
def gauss_filter(dt, nft, f0):
"""
Construct a gaussian filter of a prescribed width in the frequency-domain.
:param dt: sampling interval (seconds) -> typically the sampling interval of the seismic data
:param nft: length of gaussian filter (samples)
:param f0: gaussian width factor (the constant in the denominator of the exponential function)
:return: gauss: Array containing the desired gaussian filter in the frequency-domain
"""
df = 1.0/(nft * dt)
nft21 = int(0.5*nft + 1)
# Construct x-axis (frequency) vectors
f = df*np.arange(0, nft21, 1)
w = 2*np.pi*f
# Construct gaussian filter
gauss = np.zeros(nft)
gauss[0:nft21] = np.exp(-0.25*(w/f0)**2)/dt
gauss[nft21:] = np.flip(gauss[1:nft21-1])
return gauss
def gauss_convolve(a, b, nf, ndt):
"""
Convolve time-series data with a gaussian filter by Fourier transforming into the frequency-domain, multiplying,
and inverse-Fourier transforming back into the time-domain.
:param a: Array of time series data
:param b: Gaussian filter (output of gauss_filter() function)
:param nf: Number of points (samples) in Fourier transform
:param ndt: Sampling interval of data (seconds)
:return: ab: Array containing the convolution of the data and the gaussian filter
"""
afr = fftpack.fft(a, nf)
bfr = (afr * b) * ndt
ab = np.real(np.fft.ifft(bfr, nf))
return ab
def correlate(R, W, nfft):
"""
Calculate the cross-correlation between two time-series by Fourier transforming and multiplying by the complex
conjugate of one of the time-series.
:param R: Array of time series data (radial component for Z -> R receiver functions)
:param W: Array of time series data (vertical component for Z -> R receiver functions)
:param nfft: Number of points (samples) in Fourier transform
:return: x: Array containing the resulting cross-correlation
"""
x = fftpack.ifft(fftpack.fft(R, nfft) * np.conj(fftpack.fft(W, nfft)), nfft).real
return x
def phase_shift(x, nfft, dt, tshift):
"""
Shift a vector of time-series data by a given number of seconds.
:param x: Input vector of time-series data
:param nfft: Number of points (samples) in Fourier transform
:param dt: Sampling interval of time-series data (seconds)
:param tshift: Desired time shift (seconds)
:return: x: Time shifted version of input vector
"""
# Go into the frequency domain
xf = fftpack.fft(x, nfft)
# Phase shift in radians
shift_i = round(tshift / dt)
p = 2 * np.pi * np.arange(1, nfft + 1) * shift_i / (nfft)
# Apply shift
xf = xf * (np.cos(p) - 1j * np.sin(p))
# Back into time
x = fftpack.ifft(xf, nfft).real / np.cos(2 * np.pi * shift_i / nfft)
return x
def next_power_2(x):
"""
Determine the next integer that is 2 raised to some power.
:param x: Number which you would like to find the next power of 2 for
:return: x: Number which is 2 raised to some power
"""
# Function which finds the nearest number that is 2 raised to some power
return 1 if x == 0 else 2**(x-1).bit_length()
def iterdecon(num, den, dt, nt, tshift, f0, itmax, errtol):
"""
Calculate a receiver function using the iterative time-domain deconvolution algorithm outlined by
Ligorria & Ammon 1999.
:param num: Numerator in deconvolution (radial component data for Z -> R receiver functions)
:param den: Denominator in deconvolution (vertical component data for Z -> R receiver functions)
:param dt: Sampling interval of data (seconds)
:param nt: Length of input data vectors (samples)
:param tshift: Desired time shift of resulting receiver function (seconds)
:param f0: Gaussian width factor determining width of gaussian filter used in deconvolution
:param itmax: Maximum allowed number of iterations before giving up and outputting receiver function
:param errtol: Minimum change in error between iterations before giving up and outputting receiver function
:return: RFI, RMS: An array containing the resulting receiver function (time-domain) and an array containing the
RMS error from each iteration.
"""
# Initiate iterative deconvolution
rms = np.zeros(itmax+1)
nfft = next_power_2(nt)
p0 = np.zeros(nfft)
# Resize and rename numerator and denominator
u0 = np.zeros(nfft)
w0 = u0.copy()
u0[0:nt] = num
w0[0:nt] = den
# Construct Gaussian Filter
gF = gauss_filter(dt, nfft, f0)
# Apply Gaussian Filter to signals
u = gauss_convolve(u0, gF, nfft, dt)
w = gauss_convolve(w0, gF, nfft, dt)
# Get Vertical Component in Frequency Domain
wf = fftpack.fft(w0, nfft)
r = u.copy()
# Get power in numerator for error scaling
powerU = np.sum(u**2)
# Loop through the iterations
it = -1
sumsq_i = 1
d_error = 100*powerU + errtol
maxlag = int(0.5*nfft)
while abs(d_error) > errtol and it < itmax:
# Advance iteration
it = it + 1
# Cross - correlate signals
rw = correlate(r, w, nfft)
rw = rw/np.sum(w**2)
# Get index of maximum of cross correlation
i1 = np.argmax(abs(rw[0:maxlag]))
amp = rw[i1]/dt
# Compute predicted deconvolution
p0[i1] += amp
p = gauss_convolve(p0, gF, nfft, dt)
p = gauss_convolve(p, wf, nfft, dt)
# Compute residual with filtered numerator
r = u - p.copy()
sumsq = np.sum(r**2)/powerU
rms[it] = sumsq
d_error = 100*(sumsq_i - sumsq)
sumsq_i = sumsq
# Compute final receiver function
p = gauss_convolve(p0, gF, nfft, dt)
# Apply optional time shift to receiver function
p = phase_shift(p, nfft, dt, tshift)
# Output first nt samples of final receiver function
rfi = p[0:nt]
# Output RMS values
rms = rms[0:it]
return rfi, rms
def rf_quality(rfdata, delta, f0, tshift=0):
"""
Calculate receiver function quality metric using the algorithm outlined in Burky et al. 2019
:param rfdata: Array containing receiver function data
:param delta: Sampling interval of receiver function data
:param f0: Gaussian width factor used in iterative time domain deconvolution
:param tshift: Time shift of receiver function in seconds (DEFAULT = 0)
:return: quality: Float in [0, 1] representing the quality of receiver function data
"""
# OLD METHOD
# Take derivative of data
# drf = np.diff(rfdata/delta)
# pks = scipy.signal.find_peaks(rfdata[0:eidx], height=0)
# Use start of receiver function (index 0) instead of finding idx1
# NEW METHOD
# Find largest peak in window from start time to time-shift plus 3 standard deviations
if tshift != 0:
bidx = int(round((tshift-(3/f0))/delta))
eidx = int(round((tshift+(3/f0))/delta))
p_int = scipy.integrate.trapz(np.abs(rfdata[bidx:eidx]), dx=delta)
else:
eidx = int(round((tshift+(3/f0))/delta))
# Integral of initial gaussian pulse (P arrival)
p_int = scipy.integrate.trapz(np.abs(rfdata[0:eidx]), dx=delta)
# Integral of entire receiver function
rf_int = scipy.integrate.trapz( | np.abs(rfdata) | numpy.abs |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = | N.array([1,2,2]) | numpy.array |
import numpy as np
def eval_relation_recall(sg_entry,
roidb_entry,
result_dict,
mode,
iou_thresh):
# gt
gt_inds = np.where(roidb_entry['max_overlaps'] == 1)[0]
gt_boxes = roidb_entry['boxes'][gt_inds].copy().astype(float)
num_gt_boxes = gt_boxes.shape[0]
gt_relations = roidb_entry['gt_relations'].copy()
gt_classes = roidb_entry['gt_classes'].copy()
num_gt_relations = gt_relations.shape[0]
if num_gt_relations == 0:
return (None, None)
gt_class_scores = np.ones(num_gt_boxes)
gt_predicate_scores = | np.ones(num_gt_relations) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 13:24:43 2020
@author: ssli
Module to calculate the m bias
mcFitFunc:
Shear bias function.
WgQuantile1DFunc:
Calculate the weighted quantile by given probabilities
designed for 1D numpy array.
WgBin2DFunc:
Calculate the weighted quantile by given bin numbers
designed for 2D numpy array
mCalFunc:
Calculating the residual shear bias (m-value) in 2-d bins
"""
import numpy as np
from scipy import optimize
import pandas as pd
from astropy.io import fits
## All possible g1,g2 combinations
g1Range = np.array([-0.04,0.00,0.04,0.00,-0.0283,+0.0283,+0.0283,-0.0283])
g2Range = np.array([0.00,0.04,0.00,-0.04,+0.0283,+0.0283,-0.0283,-0.0283])
def mcFitFunc(x, m, c):
"""
Shear bias function.
"""
return (1.+m)*x+c
def WgQuantile1DFunc(values, weights, pq):
"""
Calculate the weighted quantile by given probabilities
designed for 1D numpy array.
"""
# Sort the data
ind_sorted = np.argsort(values)
v_sorted = values[ind_sorted]
wg_sorted = weights[ind_sorted]
# Compute the auxiliary arrays
Sn = np.cumsum(wg_sorted)
Pn = (Sn-0.5*wg_sorted)/np.sum(wg_sorted)
# Get the quantiles
res = np.interp(pq, Pn, v_sorted)
return res
def WgBin2DFunc(v1, v2, wgs, Nbin1, Nbin2):
"""
Calculate the weighted quantile by given bin numbers
designed for 2D numpy array
"""
# Define the probabilities for the quantiles based on the number of bins
pq1 = np.linspace(0,1.0,Nbin1+1)
pq2 = np.linspace(0,1.0,Nbin2+1)
# Calculate quantiles for v1
q1 = WgQuantile1DFunc(v1, wgs, pq1)
#Compute quantiles for v2 in each v1 bin
q2s=[]
for i in range(len(q1)-1):
mask = (v1>=q1[i])&(v1<q1[i+1])
q2 = WgQuantile1DFunc(v2[mask], wgs[mask], pq2)
q2s.append(q2)
return q1, np.array(q2s)
def mCalFunc(id_bin, dataSim, dataReal,
Nbin1, Nbin2, pq):
"""
Calculating the residual shear bias in 2-d bins
"""
# helper quantities
# Simulation
snrSim = dataSim['snr_model'].values
#
g1_inSim = dataSim['g1'].values
g2_inSim = dataSim['g2'].values
#
e1Sim = dataSim['e1'].values
e2Sim = dataSim['e2'].values
eSim = np.sqrt(e1Sim**2 + e2Sim**2)
#
size_out_altSim = dataSim['size_out'].values*np.sqrt((1.-eSim)/(1.+eSim))
#
RSim = dataSim['psf_size_in'].values/(size_out_altSim**2+dataSim['psf_size_in'].values)
#
wgSim= dataSim['LFweight'].values
#
# Data
snrReal = dataReal['model_SNratio'].values
#Define PSF size
size_psfReal = np.sqrt(dataReal['PSF_Q11'].values*dataReal['PSF_Q22'].values - dataReal['PSF_Q12'].values**2)
#Define |e| for the 3 blindings
eReal = np.sqrt(dataReal['bias_corrected_e1'].values**2 + dataReal['bias_corrected_e2'].values**2)
#Define circularised galaxy size
size_abReal = dataReal['bias_corrected_scalelength_pixels'].values*np.sqrt((1.-eReal)/(1.+eReal))
#Define galaxy 'resolution'
RReal = size_psfReal/(size_abReal**2+size_psfReal)
# weight
wgReal = dataReal['recal_weight'].values
# 2D binning
#Calculate the bins such that each bin contains the same number of points.
bin1_bounds, bin2_bounds = WgBin2DFunc(snrSim, RSim, wgSim, Nbin1, Nbin2)
wgRealSums = []
wgReal2Sums = []
m1s = []
m2s = []
m1_errs = []
m2_errs = []
m1_err_BSs = []
m2_err_BSs = []
m_err_BSs = []
for i in range(Nbin1):
lower1 = bin1_bounds[i]
upper1 = bin1_bounds[i+1]
#
mask1Sim = (snrSim>=lower1)&(snrSim<upper1)
mask1Real = (snrReal>=lower1)&(snrReal<upper1)
for j in range(Nbin2):
lower2 = bin2_bounds[i][j]
upper2 = bin2_bounds[i][j+1]
#
mask2Sim = (RSim>=lower2)&(RSim<upper2)
mask2Real = (RReal>=lower2)&(RReal<upper2)
#
maskSim = mask1Sim & mask2Sim
maskReal = mask1Real & mask2Real
# mask input parameters
# Simulation
wgSim_mask = wgSim[maskSim]
#
e1Sim_mask = e1Sim[maskSim]
e2Sim_mask = e2Sim[maskSim]
#
g1_inSim_mask = g1_inSim[maskSim]
g2_inSim_mask = g2_inSim[maskSim]
# data
wgReal_mask = wgReal[maskReal]
wgRealSums.append(np.sum(wgReal_mask))
# prepare shear parameters for mc fitting
g1_out=[]
g2_out=[]
g_out_w=[]
g1_in_used=[]
g2_in_used=[]
for kk in range(len(g1Range)):
maskShear=(g1_inSim_mask==g1Range[kk])&(g2_inSim_mask==g2Range[kk])
numMasked=len(e1Sim_mask[maskShear])
if (numMasked >0):
#Calculating bin average for calibration quantities
g1_out.append(np.average(e1Sim_mask[maskShear], weights=wgSim_mask[maskShear]))
g2_out.append(np.average(e2Sim_mask[maskShear], weights=wgSim_mask[maskShear]))
g_out_w.append(1./(np.sum(wgSim_mask[maskShear]))**0.5)
#
g1_in_used.append(g1Range[kk])
g2_in_used.append(g2Range[kk])
# Start mc fitting
numShear=len(g1_out)
if(numShear<3):
print('Cannot do the regression in bin ', \
bin1_bounds[i], bin1_bounds[i+1], bin2_bounds[i][j], bin2_bounds[i][j+1], \
' less than 3 shear values! (', numShear, ')')
exit()
else:
g1_in_used = np.array(g1_in_used)
g2_in_used = np.array(g2_in_used)
g1_out = np.array(g1_out)
g2_out = np.array(g2_out)
g_out_w = np.array(g_out_w)
m1c1, err1 = optimize.curve_fit(mcFitFunc, xdata=g1_in_used, ydata=g1_out, sigma=g_out_w)
m2c2, err2 = optimize.curve_fit(mcFitFunc, xdata=g2_in_used, ydata=g2_out, sigma=g_out_w)
m1 = m1c1[0]
m1_err = (err1[0,0])**0.5
# # #
# c1 = m1c1[1]
# c1_err = (err1[1,1])**0.5
#
m2 = m2c2[0]
m2_err = (err2[0,0])**0.5
# # #
# c2 = m2c2[1]
# c2_err =(err2[1,1])**0.5
#
#
# m = (m1 + m2)/2.
# Performing Bootstrap
nboot = 50
m1_sample = np.zeros(nboot)
m2_sample = np.zeros(nboot)
m_sample = np.zeros(nboot)
# c1_sample = np.zeros(nboot)
# c2_sample = np.zeros(nboot)
for BS_index in range(nboot):
# Retrieving random shears
index = np.random.randint(0,numShear,numShear)
BS_g1_in = g1_in_used[index]
BS_g2_in = g2_in_used[index]
BS_g1_out = g1_out[index]
BS_g2_out = g2_out[index]
BS_g_out_w = g_out_w[index]
m1c1, err1 = optimize.curve_fit(mcFitFunc, xdata=BS_g1_in, ydata=BS_g1_out, sigma=BS_g_out_w)
m2c2, err2 = optimize.curve_fit(mcFitFunc, xdata=BS_g2_in, ydata=BS_g2_out, sigma=BS_g_out_w)
m1_sample[BS_index] = m1c1[0]
m2_sample[BS_index] = m2c2[0]
m_sample[BS_index] = (m1c1[0]+m2c2[0])/2.
# c1_sample[BS_index] = m1c1[1]
# c2_sample[BS_index] = m2c2[1]
m1_err_BS = np.std(m1_sample)
m2_err_BS = np.std(m2_sample)
m_err_BS = np.std(m_sample)
# c1_err_BS = np.std(c1_sample)
# c2_err_BS = np.std(c2_sample)
#
m1s.append(m1)
m2s.append(m2)
m1_errs.append(m1_err)
m2_errs.append(m2_err)
m1_err_BSs.append(m1_err_BS)
m2_err_BSs.append(m2_err_BS)
m_err_BSs.append(m_err_BS)
wgRealSums = np.array(wgRealSums)
m1s = np.array(m1s)
m2s = np.array(m2s)
m1_errs = np.array(m1_errs)
m2_errs = np.array(m2_errs)
m1_err_BSs = np.array(m1_err_BSs)
m2_err_BSs = np.array(m2_err_BSs)
m_err_BSs = | np.array(m_err_BSs) | numpy.array |
# Allen Institute Software License - This software license is the 2-clause BSD
# license plus a third clause that prohibits redistribution for commercial
# purposes without further permission.
#
# Copyright 2015-2016. Allen Institute. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Redistributions for commercial purposes are not permitted without the
# Allen Institute's written permission.
# For purposes of this license, commercial purposes is the incorporation of the
# Allen Institute's software into anything for which you will charge fees or
# other compensation. Contact <EMAIL> for commercial licensing
# opportunities.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import math
import numpy as np
import scipy.signal as signal
import logging
# Design notes:
# to generate an average feature file, all sweeps must have all features
# to generate a fitness score of a sweep to a feature file,, the sweep
# must have all features in the file. If one is absent, a penalty
# of TODO ??? will be assessed
# set of features
class EphysFeatures( object ):
def __init__(self, name):
# feature mean and standard deviations
self.mean = {}
self.stdev = {}
# human-readable names for features
self.glossary = {}
# table indicating how to score feature
# 'hit' feature exists:
# 'ignore' do nothing
# 'stdev' score is # stdevs from target mean
# 'miss' feature absent:
# 'constant' score = scoring['constant']
# 'mean_mult' score = mean * scoring['mean_mult']
#
self.scoring = {}
self.name = name
################################################################
# ignore scores
ignore_score = { "hit": "ignore" }
self.glossary["n_spikes"] = "Number of spikes"
self.scoring["n_spikes"] = ignore_score
################################################################
# ignore misses
ignore_miss = { "hit":"stdev", "miss":"const", "const":0 }
self.glossary["adapt"] = "Adaptation index"
self.scoring["adapt"] = ignore_miss
self.glossary["latency"] = "Time to first spike (ms)"
self.scoring["latency"] = ignore_miss
################################################################
# base miss off mean
mean_score = { "hit":"stdev", "miss":"mean_mult", "mean_mult":2 }
self.glossary["ISICV"] = "ISI-CV"
self.scoring["ISICV"] = mean_score
################################################################
# normal scoring
normal_score = { "hit":"stdev", "miss":"const", "const":20 }
self.glossary["isi_avg"] = "Average ISI (ms)"
self.scoring["isi_avg"] = ignore_score
self.glossary["doublet"] = "Doublet ISI (ms)"
self.scoring["doublet"] = normal_score
self.glossary["f_fast_ahp"] = "Fast AHP (mV)"
self.scoring["f_fast_ahp"] = normal_score
self.glossary["f_slow_ahp"] = "Slow AHP (mV)"
self.scoring["f_slow_ahp"] = normal_score
self.glossary["f_slow_ahp_time"] = "Slow AHP time"
self.scoring["f_slow_ahp_time"] = normal_score
self.glossary["base_v"] = "Baseline voltage (mV)"
self.scoring["base_v"] = normal_score
#self.glossary["base_v2"] = "Baseline voltage 2 (mV)"
#self.scoring["base_v2"] = normal_score
#self.glossary["base_v3"] = "Baseline voltage 3 (mV)"
#self.scoring["base_v3"] = normal_score
################################################################
# per spike scoring
perspike_score = { "hit":"perspike", "miss":"const", "const":20, "skip_last_n":0 }
self.glossary["f_peak"] = "Spike height (mV)"
self.scoring["f_peak"] = perspike_score.copy()
self.glossary["f_trough"] = "Spike depth (mV)"
self.scoring["f_trough"] = perspike_score.copy()
self.scoring["f_trough"]["skip_last_n"] = 1
# self.glossary["f_w"] = "Spike width at -30 mV (ms)"
# self.scoring["f_w"] = perspike_score.copy()
self.glossary["upstroke"] = "Peak upstroke (mV/ms)"
self.scoring["upstroke"] = perspike_score.copy()
self.glossary["upstroke_v"] = "Vm of peak upstroke (mV)"
self.scoring["upstroke_v"] = perspike_score.copy()
self.glossary["downstroke"] = "Peak downstroke (mV/ms)"
self.scoring["downstroke"] = perspike_score.copy()
self.glossary["downstroke_v"] = "Vm of peak downstroke (mV)"
self.scoring["downstroke_v"] = perspike_score.copy()
self.glossary["threshold"] = "Threshold voltage (mV)"
self.scoring["threshold"] = perspike_score.copy()
self.glossary["width"] = "Spike width at half-max (ms)"
self.scoring["width"] = perspike_score.copy()
self.scoring["width"]["skip_last_n"] = 1
self.glossary["thresh_ramp"] = "Change in dv/dt over first 5 mV past threshold (mV/ms)"
self.scoring["thresh_ramp"] = perspike_score.copy()
################################################################
# heavily penalize when there are no spikes
spike_score = { "hit":"stdev", "miss":"const", "const":250 }
self.glossary["rate"] = "Firing rate (Hz)"
self.scoring["rate"] = spike_score
def print_out(self):
print(("Features from " + self.name))
for k in list(self.mean.keys()):
if k in self.glossary:
st = "%30s = " % self.glossary[k]
if self.mean[k] is not None:
st += "%g" % self.mean[k]
else:
st += "--------"
if k in self.stdev and self.stdev[k] is not None:
st += " +/- %g" % self.stdev[k]
print(st)
# initialize summary feature set from file
def clone(self, param_dict):
for k in list(param_dict.keys()):
self.mean[k] = param_dict[k]["mean"]
self.stdev[k] = param_dict[k]["stdev"]
class EphysFeatureExtractor( object ):
def __init__(self):
# list of feature set instances
self.feature_list = []
# names of each element in feature list
self.feature_source = []
# feature set object representing combination of all instances
self.summary = None
# adds new feature set instance to feature_list
def process_instance(self, name, v, curr, t, onset, dur, stim_name):
feature = EphysFeatures(name)
################################################################
# set stop time -- run until end of stimulus or end of sweep
# comment-out the one of the two approaches
# detect spikes only during stimulus
start = onset
stop = onset + dur
# detect spikes for all of sweep
#start = 0
#stop = t[-1]
################################################################
# pull out spike times
# calculate the derivative only within target window
# otherwise get spurious detection at ends of stimuli
# filter with 10kHz cutoff if constant 200kHz sample rate (ie experimental trace)
start_idx = np.where(t >= start)[0][0]
stop_idx = np.where(t >= stop)[0][0]
v_target = v[start_idx:stop_idx]
if np.abs(t[1] - t[0] - 5e-6) < 1e-7 and np.var(np.diff(t)) < 1e-6:
b, a = signal.bessel(4, 0.1, "low")
smooth_v = signal.filtfilt(b, a, v_target, axis=0)
dv = np.diff(smooth_v)
else:
dv = | np.diff(v_target) | numpy.diff |
from __future__ import division, unicode_literals, absolute_import
import numpy as np
try:
import lalsimulation as lalsim
except Exception:
pass
class lal_wrapper(object):
def __init__(self, approx, domain):
self.approx = lalsim.__dict__[approx]
self.domain = domain
def __call__(self, freqs, params):
if self.domain == 'time' :
return generate_timedomain_waveform(self.approx, params)
elif self.domain == 'freq' :
fr, hp, hc = generate_freqdomain_waveform(self.approx, params)
indxs = np.where((fr>=params['f_min'])&(fr<=params['f_max']))
return hp[indxs], hc[indxs]
else:
raise ValueError("Unable to generate LAL waveform, invalid domain.")
def generate_timedomain_waveform(approx, params):
"""
SimInspiralChooseTDWaveform:
REAL8TimeSeries **hplus, /**< +-polarization waveform */
REAL8TimeSeries **hcross, /**< x-polarization waveform */
const REAL8 m1, /**< mass of companion 1 (kg) */
const REAL8 m2, /**< mass of companion 2 (kg) */
const REAL8 S1x, /**< x-component of the dimensionless spin of object 1 */
const REAL8 S1y, /**< y-component of the dimensionless spin of object 1 */
const REAL8 S1z, /**< z-component of the dimensionless spin of object 1 */
const REAL8 S2x, /**< x-component of the dimensionless spin of object 2 */
const REAL8 S2y, /**< y-component of the dimensionless spin of object 2 */
const REAL8 S2z, /**< z-component of the dimensionless spin of object 2 */
const REAL8 distance, /**< distance of source (m) */
const REAL8 inclination, /**< inclination of source (rad) */
const REAL8 phiRef, /**< reference orbital phase (rad) */
const REAL8 longAscNodes, /**< longitude of ascending nodes, degenerate with the polarization angle, Omega in documentation */
const REAL8 eccentricity, /**< eccentrocity at reference epoch */
const REAL8 UNUSED meanPerAno, /**< mean anomaly of periastron */
const REAL8 deltaT, /**< sampling interval (s) */
const REAL8 f_min, /**< starting GW frequency (Hz) */
REAL8 f_ref, /**< reference GW frequency (Hz) */
LALDict *LALparams, /**< LAL dictionary containing accessory parameters */
const Approximant approximant /**< post-Newtonian approximant to use for waveform production */
"""
LALDict = lalsim.lal.CreateDict()
if params['lambda1'] != 0. :
lalsim.SimInspiralWaveformParamsInsertTidalLambda1(LALDict, params['lambda1'])
if params['lambda2'] != 0. :
lalsim.SimInspiralWaveformParamsInsertTidalLambda2(LALDict, params['lambda2'])
hp,hc = lalsim.SimInspiralChooseTDWaveform(lalsim.lal.MSUN_SI*params['mtot']*params['q']/(1.+params['q']),
lalsim.lal.MSUN_SI*params['mtot']/(1.+params['q']),
params['s1x'],params['s1y'],params['s1z'],
params['s2x'],params['s2y'],params['s2z'],
params['distance']*1e6*lalsim.lal.PC_SI,
params['iota'],
params['phi_ref'],
0.0, params['eccentricity'], 0.0,
1./params['srate'],
params['f_min'],
params['f_min'],
LALDict,
approx)
hp = hp.data.data
hc = hc.data.data
return | np.array(hp) | numpy.array |
__all__ = [
'OutlineContinents',
'GlobeSource',
]
import numpy as np
import pyvista as pv
import vtk
from .. import interface
from ..base import AlgorithmBase
class OutlineContinents(AlgorithmBase):
"""A simple data source to produce a ``vtkEarthSource`` outlining the
Earth's continents. This works well with our ``GlobeSource``.
"""
__displayname__ = 'Outline Continents'
__category__ = 'source'
def __init__(self, radius=6371.0e6):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkPolyData'
)
self.__radius = radius
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate the output"""
pdo = self.GetOutputData(outInfo, 0)
earth = vtk.vtkEarthSource()
earth.SetRadius(self.__radius)
earth.OutlineOn()
earth.Update()
foo = pv.wrap(earth.GetOutput())
# Copy the geometries only
continents = pv.PolyData()
continents.points = foo.points.copy()
continents.lines = foo.lines.copy()
del foo
pdo.ShallowCopy(continents)
return 1
def set_radius(self, radius):
"""Set the radius of the globe. Default is 6.371.0e9 meters"""
if self.__radius != radius:
self.__radius = radius
self.Modified()
###############################################################################
class GlobeSource(AlgorithmBase):
"""Creates a globe/sphere the size of the Earth with texture coordinates
already mapped. The globe's center is assumed to be (0,0,0).
Args:
radius (float): the radius to use
npar (int): the number of parallels (latitude)
nmer (int): the number of meridians (longitude)
"""
__displayname__ = 'Globe Source'
__category__ = 'source'
def __init__(self, radius=6371.0e6, npar=15, nmer=36, **kwargs):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkPolyData'
)
self.__radius = radius
self.__npar = npar
self.__nmer = nmer
# TODO: use **kwargs
def spherical_to_cartesian(self, meridian, parallel):
"""Converts longitude/latitude to catesian coordinates. Assumes the
arguments are given in degrees.
"""
lon_r = np.radians(meridian)
lat_r = np.radians(parallel)
x = self.__radius * np.cos(lat_r) * np.cos(lon_r)
y = self.__radius * np.cos(lat_r) * | np.sin(lon_r) | numpy.sin |
"""
Functions for preprocessing products from the MAP Framework Team
"""
import os
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from gisutils.raster import get_values_at_points, write_raster
from mfsetup import load_modelgrid
from mfsetup.discretization import voxels_to_layers, fill_cells_vertically
from mfsetup.testing import point_is_on_nhg
from mfsetup.units import convert_length_units
def get_layer(botm_array, i, j, elev):
"""Return the botm_array for elevations at i, j locations.
Parameters
----------
botm_array : 3D numpy array
layer bottom elevations
i : scaler or sequence
row index (zero-based)
j : scaler or sequence
column index
elev : scaler or sequence
elevation (in same units as model)
Returns
-------
k : np.ndarray (1-D) or scalar
zero-based layer index
"""
def to_array(arg):
if not isinstance(arg, np.ndarray):
return np.array([arg])
else:
return arg
i = to_array(i)
j = to_array(j)
nlay = botm_array.shape[0]
elev = to_array(elev)
botms = botm_array[:, i, j] # .tolist()
# identify layer botms that are above and below the elevations
differences = np.round((botms - elev), 2)
isabove = differences >= 0
# layer is the number of botm_array that are above
layers = np.sum(isabove, axis=0)
# force elevations below model bottom into bottom layer
layers[layers > nlay - 1] = nlay - 1
layers = np.atleast_1d(np.squeeze(layers))
if len(layers) == 1:
layers = layers[0]
return layers
def plot_slice(layer_elevations, property_data=None,
row=0, column=slice(None),
voxel_start_layer=0, voxel_zones=None, cmap='copper',
voxel_cmap='viridis', unit_labels=None, add_surfaces=None):
"""Plot a single cross section slice
Parameters
----------
layer_elevations : 3D numpy array
Array of layer elevations, starting with the model top.
(Length equal to the number of botm_array + 1)
property_data : 3D numpy array
Array of zone numbers generated by setup_model_layers.
row : int or slice instance
If a cross section along a row is desired, row should be a integer,
and column should be a slice instance indicating the range of columns to include.
by default, 0.
column : int or slice instance
If a cross section along a column is desired, column should be a integer,
and row should be a slice instance indicating the range of rows to include.
by default, slice(None), which includes all columns.
voxel_start_layer : int, optional
First layer with voxel data, by default 0
voxel_zones : sequence, optional
Zone numbers within property_data that are voxel-based,
by default None
cmap : str, optional
Matplotlib colormap for non-voxel zone numbers, by default 'copper',
to contrast with colormap for voxel-based zone numbers.
voxel_cmap : str, optional
Matplotlib colormap for voxel-based zone numbers, by default 'viridis'.
unit_labels : dict, optional
Dictionary mapping non-voxel zone numbers to hydrogeologic units,
by default None
Returns
-------
ax : matplotlib AxesSubplot instance for figure
"""
# cross section code
nlay, nrow, ncol = layer_elevations.shape
# create meshgrid for rows or columns
# along a row
if isinstance(column, slice):
# x, z = np.meshgrid(range(ncol), np.array(z_edges))
# x = grid.xcellcenters[row, column]
ncells = ncol
title = 'Row {}'.format(row)
xlabel = 'Column in model'
# along a column
else:
# x, z = np.meshgrid(range(nrow), np.array(z_edges))
# x = grid.ycellcenters[row, column]
ncells = nrow
title = 'Column {}'.format(column)
xlabel = 'Row in model'
x = np.arange(ncells)
z = layer_elevations[:, row, column].copy()
# since z is used to define cell edges in the pcolormesh (below)
# z cannot be masked or have nan values
# set missing data values (outside of the model footprint) in z
# to -9999
# pcolormesh will still skip these cells, as they are defined
# as no data by the mask for the property array
z_nodata = -9999
z[np.isnan(z)] = z_nodata
# zero values will result in pcolormesh edges that dip to zero
# on the edge of nodata areas
# fill these with previous value in either direction
# first drop any indices along the edges
for side in -1, 1:
k, j = np.where(z == z_nodata)
interior_zeros = (j > 0) & (j < z.shape[1] - 1)
j = j[interior_zeros]
k = k[interior_zeros]
# then reassign the zero elevations
z[k, j] = z[k, j+side]
#z = np.ma.masked_where(np.isnan(z), z)
thicknesses = np.diff(z, axis=0) * -1
thicknesses[thicknesses <= 0] = 0.
fig, ax = plt.subplots(figsize=(11, 8.5))
# optionally plot a property such as resistivity facies
if property_data is not None:
# drop na values
# (areas with no voxel data at any depth)
#loc = ~np.all(z.mask, axis=0)
data = property_data[:, row, column].copy()
vmin, vmax = property_data.min(), property_data.max()
#x = np.squeeze(x[loc]) # + [x[-1] + 1]
#x = np.ma.masked_array(x, mask=~loc)
zstart = voxel_start_layer
zend = voxel_start_layer + property_data.shape[0] + 1
z = np.squeeze(z[zstart:zend, :])
#if not np.any(z):
# return
#data = np.squeeze(data[:, loc])
#thicknesses = np.squeeze(thicknesses[:, loc])
if np.any(z) and voxel_zones is not None:
# get the min max values for the existing framework
# and voxel-based property data
is_voxel_3D = np.isin(property_data, voxel_zones)
vmin = property_data[~is_voxel_3D].min()
vmax = property_data[~is_voxel_3D].max()
voxel_vmin = np.min(voxel_zones)
voxel_vmax = np.max(voxel_zones)
is_voxel = np.isin(data, voxel_zones)
voxel_mask = (thicknesses <= 0) | ~is_voxel
data_mask = (thicknesses <= 0) | is_voxel
voxel_data = np.ma.masked_array(data, mask=voxel_mask)
data = | np.ma.masked_array(data, mask=data_mask) | numpy.ma.masked_array |
import numpy as np
import os
import cv2
import keras
import sklearn
import pandas
from time import time
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.utils import to_categorical
from keras.models import load_model
from keras.layers import *
from keras import layers
from keras import Model
from keras.callbacks import TensorBoard
from keras import optimizers
import matplotlib.pyplot as plt
from keras.applications import *
from sklearn.metrics import classification_report
import time
input = | np.load("resnet_features.npy") | numpy.load |
import weakref
from enum import Enum, unique
from threading import Lock
from typing import Callable, Dict, Iterable, List, Tuple
import numpy as np
from scipy.special import gamma
INFINITESIMAL = 1e-100
FRACTION_PARAM_NAME = "f"
NAME_KEY = "Name"
BOUNDS_KEY = "Bounds"
DEFAULT_VALUE_KEY = "Default"
LOCATION_KEY = "Location"
COMPONENT_INDEX_KEY = "ComponentIndex"
PARAM_INDEX_KEY = "ParamIndex"
@unique
class DistributionType(Enum):
Normal = 0
Weibull = 1
GeneralWeibull = 2
def check_component_number(component_number: int):
# Check the validity of `component_number`
if type(component_number) != int:
raise TypeError(component_number)
elif component_number < 1:
raise ValueError(component_number)
def get_param_count(distribution_type: DistributionType) -> int:
if distribution_type == DistributionType.Normal:
return 2
elif distribution_type == DistributionType.Weibull:
return 2
elif distribution_type == DistributionType.GeneralWeibull:
return 3
else:
raise NotImplementedError(distribution_type)
def get_param_names(distribution_type: DistributionType) -> Tuple[str]:
if distribution_type == DistributionType.Normal:
return ("mu", "sigma")
elif distribution_type == DistributionType.Weibull:
return ("beta", "eta")
elif distribution_type == DistributionType.GeneralWeibull:
return ("mu", "beta", "eta")
else:
raise NotImplementedError(distribution_type)
def get_base_func_name(distribution_type: DistributionType) -> str:
if distribution_type == DistributionType.Normal:
return "normal"
elif distribution_type == DistributionType.Weibull:
return "weibull"
elif distribution_type == DistributionType.GeneralWeibull:
return "gen_weibull"
else:
raise NotImplementedError(distribution_type)
def get_param_bounds(distribution_type: DistributionType) -> Tuple[Tuple[float, float]]:
if distribution_type == DistributionType.Normal:
return ((INFINITESIMAL, None), (INFINITESIMAL, None))
elif distribution_type == DistributionType.Weibull:
return ((INFINITESIMAL, None), (INFINITESIMAL, None))
elif distribution_type == DistributionType.GeneralWeibull:
return ((INFINITESIMAL, None), (INFINITESIMAL, None), (INFINITESIMAL, None))
else:
raise NotImplementedError(distribution_type)
# in order to obtain better performance,
# the params of components should be different
def get_param_defaults(distribution_type: DistributionType, component_number: int) -> Tuple[Tuple]:
check_component_number(component_number)
if distribution_type == DistributionType.Normal:
return tuple(((i*10, 2+i) for i in range(1, component_number+1)))
elif distribution_type == DistributionType.Weibull:
return tuple(((10+i, (i+1)*15) for i in range(1, component_number+1)))
elif distribution_type == DistributionType.GeneralWeibull:
return tuple(((0, 2+i, i*10) for i in range(1, component_number+1)))
else:
raise NotImplementedError(distribution_type)
def get_params(distribution_type: DistributionType, component_number: int) -> List[Dict]:
check_component_number(component_number)
params = []
param_count = get_param_count(distribution_type)
param_names = get_param_names(distribution_type)
param_bounds = get_param_bounds(distribution_type)
param_defaults = get_param_defaults(distribution_type, component_number)
# generate params for all components
for component_index, component_defaults in enumerate(param_defaults):
for param_index, name, bounds, defalut in zip(range(param_count), param_names, param_bounds, component_defaults):
params.append({NAME_KEY: name+str(component_index+1), BOUNDS_KEY: bounds,
DEFAULT_VALUE_KEY: defalut, COMPONENT_INDEX_KEY: component_index,
PARAM_INDEX_KEY: param_index, LOCATION_KEY: component_index*param_count+param_index})
# generate fractions for front n-1 components
for component_index in range(component_number-1):
# the fraction of each distribution
params.append({NAME_KEY: FRACTION_PARAM_NAME+str(component_index+1), BOUNDS_KEY: (0, 1),
DEFAULT_VALUE_KEY: 1/component_number, COMPONENT_INDEX_KEY: component_index,
LOCATION_KEY: component_number*param_count + component_index})
sort_params_by_location_in_place(params)
return params
def sort_params_by_location_in_place(params: List[Dict]):
params.sort(key=lambda element: element[LOCATION_KEY])
def get_bounds(params: List[Dict]) -> Tuple[Tuple]:
bounds = []
for param in params:
bounds.append(param[BOUNDS_KEY])
return tuple(bounds)
def get_constrains(component_number: int) -> Tuple[Dict]:
if component_number == 1:
return ()
elif component_number > 1:
return ({'type': 'ineq', 'fun': lambda args: 1 - np.sum(args[1-component_number:]) + INFINITESIMAL})
else:
raise ValueError(component_number)
def get_defaults(params: List[Dict]) -> Tuple[float]:
defaults = []
for param in params:
defaults.append(param[DEFAULT_VALUE_KEY])
return tuple(defaults)
def get_lambda_str(distribution_type: DistributionType, component_number:int) -> str:
base_func_name = get_base_func_name(distribution_type)
param_count = get_param_count(distribution_type)
param_names = get_param_names(distribution_type)
if component_number == 1:
return "lambda x, {0}: {1}(x, {0})".format(", ".join(param_names), base_func_name)
elif component_number > 1:
parameter_list = ", ".join(["x"] + [name+str(i+1) for i in range(component_number) for name in param_names] + [FRACTION_PARAM_NAME+str(i+1) for i in range(component_number-1)])
# " + " to connect each sub-function
# the previous sub-function str list means the m-1 sub-functions with n params `fj * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`
# the last sub-function str which represents `(1-f_1-...-f_j-...-f_m-1) * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`
previous_format_str = "{0}{1}*{2}(x, " + ", ".join(["{"+str(i+3)+"}{1}" for i in range(param_count)]) + ")"
previous_sub_func_strs = [previous_format_str.format(FRACTION_PARAM_NAME, i+1, base_func_name, *param_names) for i in range(component_number-1)]
last_format_str = "({0})*{1}(x, " + ", ".join(["{"+str(i+3)+"}{2}" for i in range(param_count)]) + ")"
last_sub_func_str = last_format_str.format("-".join(["1"]+["f{0}".format(i+1) for i in range(component_number-1)]), base_func_name, component_number, *param_names)
expression = " + ".join(previous_sub_func_strs + [last_sub_func_str])
lambda_string = "lambda {0}: {1}".format(parameter_list, expression)
return lambda_string
else:
raise ValueError(component_number)
# prcess the raw params list to make it easy to use
def process_params(distribution_type: DistributionType, component_number: int, fitted_params: Iterable) -> Tuple[Tuple[Tuple, float]]:
param_count = get_param_count(distribution_type)
if component_number == 1:
assert len(fitted_params) == param_count
return ((tuple(fitted_params), 1.0),)
elif component_number > 1:
assert len(fitted_params) == (param_count+1) * component_number - 1
expanded = list(fitted_params) + [1.0-sum(fitted_params[component_number*param_count:])]
return tuple(((tuple(expanded[i*param_count:(i+1)*param_count]), expanded[component_number*param_count+i]) for i in range(component_number)))
else:
raise ValueError(component_number)
# the pdf function of Normal distribution
def normal(x, mu, sigma):
if sigma <= 0.0:
return np.zeros_like(x, dtype=np.float64)
else:
return 1/(sigma*np.sqrt(2*np.pi))*np.exp(-np.square(x-mu)/(2*np.square(sigma)))
def double_normal(x, mu1, sigma1, mu2, sigma2, f1):
return f1 * normal(x, mu1, sigma1) + (1-f1) * normal(x, mu2, sigma2)
def triple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, f1, f2):
return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + (1-f1-f2) * normal(x, mu3, sigma3)
def quadruple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, f1, f2, f3):
return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + f3 * normal(x, mu3, sigma3) + (1-f1-f2-f3) * normal(x, mu4, sigma4)
def normal_mean(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_median(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_mode(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_standard_deviation(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return sigma
def normal_variance(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return sigma**2
def normal_skewness(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return 0.0
def normal_kurtosis(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return 0.0
# The pdf function of Weibull distribution
def weibull(x, beta, eta):
results = np.zeros_like(x, dtype=np.float64)
if beta <= 0.0 or eta <= 0.0:
return results
else:
non_zero = np.greater(x, 0.0)
results[non_zero] = (beta/eta) * (x[non_zero]/eta)**(beta-1) * np.exp(-(x[non_zero]/eta)**beta)
return results
# return (beta/eta) * (x/eta)**(beta-1) * np.exp(-(x/eta)**beta)
def double_weibull(x, beta1, eta1, beta2, eta2, f):
return f * weibull(x, beta1, eta1) + (1-f) * weibull(x, beta2, eta2)
def triple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, f1, f2):
return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + (1-f1-f2) * weibull(x, beta3, eta3)
def quadruple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, beta4, eta4, f1, f2, f3):
return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + f3 * weibull(x, beta3, eta3) + (1-f1-f2-f3) * weibull(x, beta4, eta4)
def weibull_mean(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*gamma(1/beta+1)
def weibull_median(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*(np.log(2)**(1/beta))
def weibull_mode(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
elif beta <= 1:
return 0.0
else:
return eta*(1-1/beta)**(1/beta)
def weibull_standard_deviation(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*np.sqrt(gamma(2/beta+1) - gamma(1/beta+1)**2)
def weibull_variance(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (eta**2)*(gamma(2/beta+1)-gamma(1/beta+1)**2)
def weibull_skewness(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (2*gamma(1/beta+1)**3 - 3*gamma(2/beta+1)*gamma(1/beta+1) + gamma(3/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**(3/2)
def weibull_kurtosis(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (-3*gamma(1/beta+1)**4 + 6*gamma(2/beta+1)*gamma(1/beta+1)**2 - 4*gamma(3/beta+1)*gamma(1/beta+1) + gamma(4/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**2
def gen_weibull(x, mu, beta, eta):
return weibull(x-mu, beta, eta)
def double_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, f):
return f * gen_weibull(x, mu1, beta1, eta1) + (1-f) * gen_weibull(x, mu2, beta2, eta2)
def triple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, f1, f2):
return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + (1-f1-f2)*gen_weibull(x, mu3, beta3, eta3)
def quadruple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, mu4, beta4, eta4, f1, f2, f3):
return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + f3 * gen_weibull(x, mu3, beta3, eta3) + (1-f1-f2-f3) * gen_weibull(x, mu4, beta4, eta4)
def gen_weibull_mean(mu, beta, eta):
return weibull_mean(beta, eta) + mu
def gen_weibull_median(mu, beta, eta):
return weibull_median(beta, eta) + mu
def gen_weibull_mode(mu, beta, eta):
return weibull_mode(beta, eta) + mu
def gen_weibull_standard_deviation(mu, beta, eta):
return weibull_standard_deviation(beta, eta)
def gen_weibull_variance(mu, beta, eta):
return weibull_variance(beta, eta)
def gen_weibull_skewness(mu, beta, eta):
return weibull_skewness(beta, eta)
def gen_weibull_kurtosis(mu, beta, eta):
return weibull_kurtosis(beta, eta)
def get_single_func(distribution_type: DistributionType) -> Callable:
if distribution_type == DistributionType.Normal:
return normal
elif distribution_type == DistributionType.Weibull:
return weibull
elif distribution_type == DistributionType.GeneralWeibull:
return gen_weibull
else:
raise NotImplementedError(distribution_type)
def get_param_by_mean(distribution_type: DistributionType, component_number: int, mean_values: Iterable):
assert len(mean_values) == component_number
param_count = get_param_count(distribution_type)
func_params = get_params(distribution_type, component_number)
param_values = list(get_defaults(func_params))
if distribution_type == DistributionType.Normal:
for i in range(component_number):
# for normal distribution
# only change the loaction param (first param of each component)
param_values[i*param_count] = mean_values[i]
elif distribution_type == DistributionType.Weibull:
for i in range(component_number):
beta = param_values[i*param_count]
param_values[i*param_count+1] = mean_values[i] / gamma(1/beta+1)
elif distribution_type == DistributionType.GeneralWeibull:
for i in range(component_number):
mu = param_values[i*param_count]
beta = param_values[i*param_count+1]
param_values[i*param_count+2] = (mean_values[i]-mu) / gamma(1/beta+1)
else:
raise NotImplementedError(distribution_type)
return tuple(param_values)
class AlgorithmData:
__cache = weakref.WeakValueDictionary()
__cache_lock = Lock()
def __init__(self, distribution_type: DistributionType, component_number: int):
check_component_number(component_number)
self.__distribution_type = distribution_type
self.__component_number = component_number
self.__param_count = get_param_count(self.distribution_type)
self.__param_names = get_param_names(self.distribution_type)
self.__single_func = get_single_func(distribution_type)
self.__lambda_str = get_lambda_str(distribution_type, component_number)
self.__mixed_func = self.__get_func_by_lambda_str(self.__lambda_str)
self.__func_params = get_params(distribution_type, component_number)
self.__bounds = get_bounds(self.__func_params)
self.__defaults = get_defaults(self.__func_params)
self.__constrains = get_constrains(component_number)
self.__get_statistic_func()
def __get_func_by_lambda_str(self, lambda_str: str) -> Callable:
local_params = {"__tempMixedFunc": None}
exec("__tempMixedFunc=" + lambda_str, None, local_params)
mixed_func = local_params["__tempMixedFunc"]
return mixed_func
def __get_statistic_func(self):
if self.distribution_type == DistributionType.Normal:
self.__mean = normal_mean
self.__median = normal_median
self.__mode = normal_mode
self.__standard_deviation = normal_standard_deviation
self.__variance = normal_variance
self.__skewness = normal_skewness
self.__kurtosis = normal_kurtosis
elif self.distribution_type == DistributionType.Weibull:
self.__mean = weibull_mean
self.__median = weibull_median
self.__mode = weibull_mode
self.__standard_deviation = weibull_standard_deviation
self.__variance = weibull_variance
self.__skewness = weibull_skewness
self.__kurtosis = weibull_kurtosis
elif self.distribution_type == DistributionType.GeneralWeibull:
self.__mean = gen_weibull_mean
self.__median = gen_weibull_median
self.__mode = gen_weibull_mode
self.__standard_deviation = gen_weibull_standard_deviation
self.__variance = gen_weibull_variance
self.__skewness = gen_weibull_skewness
self.__kurtosis = gen_weibull_kurtosis
else:
raise NotImplementedError(self.distribution_type)
@property
def distribution_type(self) -> DistributionType:
return self.__distribution_type
@property
def component_number(self) -> int:
return self.__component_number
@property
def param_count(self) -> int:
return self.__param_count
@property
def param_names(self) -> Tuple[str]:
return self.__param_names
@property
def single_func(self) -> Callable:
return self.__single_func
@property
def mixed_func(self) -> Callable:
return self.__mixed_func
@property
def bounds(self) -> Tuple[Tuple]:
return self.__bounds
@property
def defaults(self) -> Tuple[float]:
return self.__defaults
@property
def constrains(self) -> Tuple[Dict]:
return self.__constrains
@property
def mean(self) -> Callable:
return self.__mean
@property
def median(self) -> Callable:
return self.__median
@property
def mode(self) -> Callable:
return self.__mode
@property
def variance(self) -> Callable:
return self.__variance
@property
def standard_deviation(self) -> Callable:
return self.__standard_deviation
@property
def skewness(self) -> Callable:
return self.__skewness
@property
def kurtosis(self) -> Callable:
return self.__kurtosis
@classmethod
def get_algorithm_data(cls, distribution_type: DistributionType,
component_number: int):
cls.__cache_lock.acquire()
key = (distribution_type, component_number)
if key in cls.__cache:
data = cls.__cache[key]
else:
data = AlgorithmData(distribution_type, component_number)
cls.__cache[key] = data
cls.__cache_lock.release()
return data
def process_params(self, fitted_params: Iterable, x_offset: float) -> Tuple[Tuple[Tuple, float]]:
params_copy = | np.array(fitted_params) | numpy.array |
import numpy as np
def random_policy(env):
counter = 0
total_rewards = 0
reward = None
rewardTracker = []
while reward != 1:
env.render()
state, reward, done, info = env.step(env.action_space.sample())
total_rewards += reward
if done:
rewardTracker.append(total_rewards)
env.reset()
counter += 1
print("Solved in {} Steps with a average return of {}".format(counter, sum(rewardTracker) / len(rewardTracker)))
def epsilon_greedy(env, epsilon, Q, state, episode):
n_actions = env.action_space.n
if np.random.rand() > epsilon:
# adding a noise to the best action from Q
action = np.argmax(Q[state, :] + | np.random.randn(1, n_actions) | numpy.random.randn |
import warnings
import astropy.units as u
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy.metric import Schwarzschild, Kerr, KerrNewman
from einsteinpy.coordinates import CartesianConversion
from einsteinpy.coordinates.utils import four_position, stacked_vec
from einsteinpy.geodesic import Geodesic
from einsteinpy import constant
_c = constant.c.value
_G = constant.G.value
_Cc = constant.coulombs_const.value
def test_str_repr():
"""
Tests, if the ``__str__`` and ``__repr__`` messages match
"""
t = 0.
M = 1e25
x_vec = np.array([306., np.pi / 2, np.pi / 2])
v_vec = np.array([0., 0.01, 10.])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 1.
step_size = 0.4e-6
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size
)
assert str(geod) == repr(geod)
@pytest.fixture()
def dummy_data():
M = 6e24
t = 0.
x_vec = np.array([130.0, np.pi / 2, -np.pi / 8])
v_vec = np.array([0.0, 0.0, 1900.0])
metric = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
metric_mat = metric.metric_covariant(x_4vec)
init_vec = stacked_vec(metric_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 0.002
step_size = 5e-8
return metric, init_vec, end_lambda, step_size
def test_Geodesics_has_trajectory(dummy_data):
metric, init_vec, end_lambda, step_size = dummy_data
geo = Geodesic(
metric=metric,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size
)
assert isinstance(geo.trajectory, np.ndarray)
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, end_lambda, step_size",
[
(
np.array([306., np.pi / 2, np.pi / 2]),
np.array([0., 0., 951.]),
0.,
4e24,
0.002,
0.5e-6,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([0.1 * _c, 0.5e-5 * _c, 0.5e-4 * _c]),
0.,
5.972e24,
0.0001,
0.5e-6,
),
(
np.array([50e3, np.pi / 2, np.pi / 2]),
np.array([0.1 * _c, 2e-7 * _c, 1e-5]),
0.,
5.972e24,
0.001,
5e-6,
),
],
)
def test_calculate_trajectory_schwarzschild(
x_vec, v_vec, t, M, end_lambda, step_size
):
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=False
)
ans = geod.trajectory
testarray = list()
for i in ans:
x = i[:4]
g = ms_cov.metric_covariant(x)
testarray.append(
g[0][0] * (i[4] ** 2) +
g[1][1] * (i[5] ** 2) +
g[2][2] * (i[6] ** 2) +
g[3][3] * (i[7] ** 2)
)
testarray = np.array(testarray, dtype=float)
assert_allclose(testarray, 1., 1e-4)
def test_calculate_trajectory2_schwarzschild():
# based on the revolution of earth around sun
# data from https://en.wikipedia.org/wiki/Earth%27s_orbit
t = 0.
M = 1.989e30
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
angular_vel = (speed_at_perihelion / distance_at_perihelion)
x_vec = np.array([distance_at_perihelion, np.pi / 2, 0])
v_vec = np.array([0.0, 0.0, angular_vel])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 3.154e7
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=end_lambda / 2e3,
return_cartesian=False
)
ans = geod.trajectory
# velocity should be 29.29 km/s at aphelion(where r is max)
i = np.argmax(ans[:, 1]) # index where radial distance is max
v_aphelion = (((ans[i][1] * ans[i][7]) * (u.m / u.s)).to(u.km / u.s)).value
assert_allclose(v_aphelion, 29.29, rtol=0.01)
def test_calculate_trajectory3_schwarzschild():
# same test as with test_calculate_trajectory2_schwarzschild(),
# but initialized with cartesian coordinates
# and function returning cartesian coordinates
t = 0.
M = 1.989e30
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
x_sph = CartesianConversion(
distance_at_perihelion / | np.sqrt(2) | numpy.sqrt |
import numpy as np
class TestFunction:
def __init__(self):
self.dim = 2
self.rand_distortion = | np.random.rand(self.dim) | numpy.random.rand |
from meta_mb.samplers.base import BaseSampler
from meta_mb.samplers.ars_sampler.ars_env_executor import ModelARSEnvExecutor, IterativeARSEnvExecutor
from meta_mb.samplers.vectorized_env_executor import IterativeEnvExecutor, ParallelEnvExecutor
from meta_mb.logger import logger
from meta_mb.utils import utils
from collections import OrderedDict
from pyprind import ProgBar
import numpy as np
import time
import itertools
class ARSSampler(BaseSampler):
"""
Sampler for Meta-RL
Args:
env (meta_mb.meta_envs.base.MetaEnv) : environment object
policy (meta_mb.policies.base.Policy) : policy object
batch_size (int) : number of trajectories per task
meta_batch_size (int) : number of meta tasks
max_path_length (int) : max number of steps per trajectory
envs_per_task (int) : number of meta_envs to run vectorized for each task (influences the memory usage)
"""
def __init__(
self,
env,
policy,
rollouts_per_policy,
num_deltas,
max_path_length,
dynamics_model=None,
n_parallel=1,
vae=None,
):
super(ARSSampler, self).__init__(env, policy, rollouts_per_policy, max_path_length)
self.rollouts_per_policy = rollouts_per_policy
self.num_deltas = num_deltas
self.total_samples = num_deltas * rollouts_per_policy * max_path_length * 2
self.total_timesteps_sampled = 0
self.dynamics_model = dynamics_model
self.vae = vae
# setup vectorized environment
# TODO: Create another vectorized env executor
if dynamics_model is not None:
self.vec_env = ModelARSEnvExecutor(env, dynamics_model, num_deltas, rollouts_per_policy,
max_path_length)
else:
if n_parallel > 1:
self.vec_env = ParallelEnvExecutor(env, n_parallel, 2 * rollouts_per_policy * num_deltas, max_path_length)
else:
self.vec_env = IterativeARSEnvExecutor(env, num_deltas, rollouts_per_policy, max_path_length)
def obtain_samples(self, log=False, log_prefix='', buffer=None):
"""
Collect batch_size trajectories from each task
Args:
log (boolean): whether to log sampling times
log_prefix (str) : prefix for logger
Returns:
(dict) : A dict of paths of size [meta_batch_size] x (batch_size) x [5] x (max_path_length)
"""
# initial setup / preparation
pbar = ProgBar(self.max_path_length)
policy_time, env_time = 0, 0
policy = self.policy
policy.reset(dones=[True] * self.vec_env.num_envs)
# initial reset of meta_envs
obses = self.vec_env.reset(buffer)
time_step = 0
list_observations = []
list_actions = []
list_rewards = []
list_dones = []
mask = np.ones((self.vec_env.num_envs,))
while time_step < self.max_path_length:
# Execute policy
t = time.time()
if self.vae is not None:
obses = np.array(obses)
obses = self.vae.encode(obses)
obses = np.split(obses, self.vec_env.num_envs, axis=0)
if self.dynamics_model is not None:
actions, agent_infos = policy.get_actions_batch(obses, update_filter=False)
else:
obses = np.array(obses)
actions, agent_infos = policy.get_actions_batch(obses, update_filter=True)
policy_time += time.time() - t
# Step environments
t = time.time()
next_obses, rewards, dones, _ = self.vec_env.step(actions)
next_obses, rewards, dones = np.array(next_obses), np.array(rewards), np.array(dones)
rewards *= mask
dones = dones + (1 - mask)
mask *= (1 - dones)
env_time += time.time() - t
list_observations.append(obses)
list_actions.append(actions)
list_rewards.append(rewards)
list_dones.append(dones)
time_step += 1
obses = next_obses
pbar.update(1)
pbar.stop()
self.total_timesteps_sampled += np.sum(1 - np.array(list_dones))
if log:
logger.logkv(log_prefix + "PolicyExecTime", policy_time)
logger.logkv(log_prefix + "EnvExecTime", env_time)
samples_data = dict(observations=np.array(list_observations),
actions=np.array(list_actions),
rewards=np.array(list_rewards),
returns= | np.sum(list_rewards, axis=0) | numpy.sum |
#!/usr/bin/env python
from __future__ import print_function
import os
import argparse
import imp
import numpy as np
from PIL import Image
import tensorflow as tf
tf.enable_eager_execution()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from object_detection.utils import label_map_util
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--inFile", required=True,
help="path to record file")
ap.add_argument("-l", "--labelFile", default=None,
help="path to labels file 'classes.pbtxt'")
ap.add_argument("-s", dest="scaleFac", type=float, default=3,
help="shrink native resolution factor [3].")
args = vars(ap.parse_args())
def main():
parsedDataset = parse_record(args["inFile"])
categoryIdx = None
if args["labelFile"]:
labelMap = label_map_util.load_labelmap(args["labelFile"])
numClasses = len(labelMap.item)
categories = label_map_util.convert_label_map_to_categories(
labelMap, max_num_classes=numClasses, use_display_name=True)
categoryIdx = label_map_util.create_category_index(categories)
cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
print("\n\nPress any key to advance to next image or <q> to quit.\n\n")
for example in parsedDataset:
imageDecoded = tf.image.decode_image(example['image/encoded']).numpy()
height = example['image/height'].numpy()
width = example['image/width'].numpy()
filename = example['image/filename'].numpy()
imgFormat = example['image/format'].numpy()
x1norm = tf.sparse_tensor_to_dense(
example['image/object/bbox/xmin'], default_value=0).numpy()
x2norm = tf.sparse_tensor_to_dense(
example['image/object/bbox/xmax'], default_value=0).numpy()
y1norm = tf.sparse_tensor_to_dense(
example['image/object/bbox/ymin'], default_value=0).numpy()
y2norm = tf.sparse_tensor_to_dense(
example['image/object/bbox/ymax'], default_value=0).numpy()
labels = tf.sparse_tensor_to_dense(
example['image/object/class/label'], default_value=0).numpy()
numBoxes = len(labels)
widthScreen = int(width / args["scaleFac"])
heightScreen = int(height / args["scaleFac"])
cv2.resizeWindow('Frame', widthScreen, heightScreen)
image = np.array(imageDecoded, np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if numBoxes > 0:
x1 = np.int64(x1norm * width)
x2 = | np.int64(x2norm * width) | numpy.int64 |
import numpy as np
import torch
from PIL import Image, ImageDraw
def draw_bbox_batch(images, bbox_sets):
device = images.device
results = []
images = images.cpu().numpy()
images = np.ascontiguousarray(np.transpose(images, (0, 2, 3, 1)), dtype=np.float32)
for image, bbox_set in zip(images, bbox_sets):
for bbox in bbox_set:
if all(bbox == 0):
continue
else:
image = draw_bbox(image, bbox)
results.append(image)
images = np.stack(results, axis=0)
images = | np.transpose(images, (0, 3, 1, 2)) | numpy.transpose |
import multiprocessing as mp
import numpy as np
import os
import queue
from .trac_ik_solver import TracIKSolver
class TracIKProc(mp.Process):
"""
Used for finding ik in parallel.
"""
def __init__(
self,
output_queue,
urdf_file,
base_link,
tip_link,
timeout=0.005,
epsilon=1e-5,
solve_type="Speed",
):
super().__init__()
self.output_queue = output_queue
self.input_queue = mp.Queue()
self.ik_solver = TracIKSolver(
urdf_file,
base_link,
tip_link,
timeout,
epsilon,
solve_type,
)
def _ik(self, ee_pose, qinit, bx, by, bz, brx, bry, brz):
return self.ik_solver.ik(ee_pose, qinit, bx, by, bz, brx, bry, brz)
def _fk(self, q):
return self.ik_solver.fk(q)
def run(self):
while True:
try:
request = self.input_queue.get(timeout=1)
except queue.Empty:
continue
ret = getattr(self, "_" + request[0])(*request[1:-1])
self.output_queue.put((request[-1], ret))
def ik(self, grasp, qinit, bx, by, bz, brx, bry, brz, ind=None):
self.input_queue.put(
("ik", grasp, qinit, bx, by, bz, brx, bry, brz, ind)
)
def fk(self, q, ind=None):
self.input_queue.put(("fk", q, ind))
class MultiTracIKSolver:
def __init__(
self,
urdf_file,
base_link,
tip_link,
timeout=0.005,
epsilon=1e-5,
solve_type="Speed",
num_workers=os.cpu_count(),
):
self.output_queue = mp.Queue()
self.num_workers = num_workers
self.ik_procs = []
if (
not isinstance(self.num_workers, int)
or self.num_workers <= 0
or self.num_workers > os.cpu_count()
):
raise ValueError(
"num_workers must be an integer between "
f"1 and {os.cpu_count()}!"
)
for _ in range(num_workers):
self.ik_procs.append(
TracIKProc(
self.output_queue,
urdf_file,
base_link,
tip_link,
timeout,
epsilon,
solve_type,
)
)
self.ik_procs[-1].daemon = True
self.ik_procs[-1].start()
@property
def joint_limits(self):
"""
Return lower bound limits and upper bound limits for all the joints
in the order of the joint names.
"""
return self.ik_procs[0].ik_solver.joint_limits
@joint_limits.setter
def joint_limits(self, bounds):
try:
lower_bounds, upper_bounds = bounds
except ValueError:
raise ValueError("bounds must be an iterable with two lists")
if len(lower_bounds) != self.number_of_joints:
raise ValueError(
"lower_bounds array size mismatch, input size "
f"{len(lower_bounds):d}, should be {self.number_of_joints:d}"
)
if len(upper_bounds) != self.number_of_joints:
raise ValueError(
"upper_bounds array size mismatch, input size "
f"{len(upper_bounds):d}, should be {self.number_of_joints:d}"
)
for ikp in self.ik_procs:
ikp.ik_solver.joint_limits = bounds
@property
def number_of_joints(self):
return self.ik_procs[0].ik_solver.number_of_joints
@property
def joint_names(self):
return self.ik_procs[0].ik_solver.joint_names
@property
def link_names(self):
return self.ik_procs[0].ik_solver.link_names
# Calculates FK for a vector of cfgs
# (NOTE: this should be vectorized on C++ side)
def fk(self, q):
if not isinstance(q, np.ndarray):
q = | np.asarray(q, dtype=np.float64) | numpy.asarray |
from __future__ import absolute_import, print_function
import numpy as np
import healpy as hp
from copy import deepcopy
from .skymodel import SkyModel
from .instrumentmodel import InstrumentModel
from scipy import stats, linalg
import logging
from collections.abc import Iterable
class MapLike(object) :
"""Map-based likelihood
"""
def __init__(self, config_dict, sky_model,instrument_model) :
"""
Initializes likelihood
Parameters
----------
sky_model: SkyModel
SkyModel object describing all sky components, contains the
SkyModel.fnu method, which is used to calculate the SED.
instrument_model: InstrumentModel
InstrumentModel object describing the instrument's response to the
sky.
config_dict: dictionary
Dictionary containing all the setup information for the likelihood.
This contains frequencies of the data, the data mean, the data
variance, and which spectral parameters to sample.
Fields this dictionary must have are:
- data: data 2 or 3-D array [N_pol,N_pix,N_freq]
- noisevar: noise variance of the data [N_pol,N_pix,N_freq]
- var_pars: which parameters to vary (list(str)).
- fixed_pars: which parameters are fixed (dictionary with fixed values)
- var_prior_mean: array with the mean value of the prior for each
parameter. This value will also be used to initialize any
sampler/minimizer.
- var_prior_width: array with the width of the prior for each parameter.
- var_prior_type: array with the prior type for each parameter. Allowed
values are 'gauss', 'tophat' or 'none'.
"""
self.sky = sky_model
self.inst = instrument_model
self.__dict__.update(config_dict)
self.check_parameters()
if ((self.inst.n_channels!=self.data.shape[-1]) or
(self.inst.n_channels!=self.noisevar.shape[-1])) :
raise ValueError("Data does not conform to instrument parameters")
if self.data.ndim==3 :
shp=self.data.shape
self.n_pol=shp[0]
#Flatten first two dimensions (pol and pix)
self.data=self.data.reshape([shp[0]*shp[1],shp[2]])
self.noisevar=self.noisevar.reshape([shp[0]*shp[1],shp[2]])
else :
self.n_pol=1
self.noiseivar=1./self.noisevar #Inverse variance
self.dataivar=self.data*self.noiseivar #Inverse variance-weighted data
self.npix=len(self.data)
# Set the priors. These can be either gaussian or tophat.
self.var_prior_mean= | np.array(self.var_prior_mean) | numpy.array |
#===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from . import plotting_library as pl
#from .. import gpy_plot
from .plot_util import get_x_y_var, get_free_dims, get_which_data_ycols,\
get_which_data_rows, update_not_existing_kwargs, helper_predict_with_model
def plot_data(self, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
projection='2d', label=None, **plot_kwargs):
"""
Plot the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_ycols: 'all' or a list of integers
:param visible_dims: an array specifying the input dimensions to plot (maximum two)
:type visible_dims: a numpy array
:param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs!
:param str label: the label for the plot
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
:returns list: of plots created.
"""
canvas, plot_kwargs = pl().new_canvas(projection=projection, **plot_kwargs)
plots = _plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, label, **plot_kwargs)
return pl().add_to_canvas(canvas, plots)
def _plot_data(self, canvas, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
projection='2d', label=None, **plot_kwargs):
ycols = get_which_data_ycols(self, which_data_ycols)
rows = get_which_data_rows(self, which_data_rows)
X, _, Y = get_x_y_var(self)
free_dims = get_free_dims(self, visible_dims, None)
plots = {}
plots['dataplot'] = []
#one dimensional plotting
if len(free_dims) == 1:
for d in ycols:
update_not_existing_kwargs(plot_kwargs, pl().defaults.data_1d) # @UndefinedVariable
plots['dataplot'].append(pl().scatter(canvas, X[rows, free_dims], Y[rows, d], label=label, **plot_kwargs))
#2D plotting
elif len(free_dims) == 2:
if projection=='2d':
for d in ycols:
update_not_existing_kwargs(plot_kwargs, pl().defaults.data_2d) # @UndefinedVariable
plots['dataplot'].append(pl().scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]],
color=Y[rows, d], label=label, **plot_kwargs))
else:
for d in ycols:
update_not_existing_kwargs(plot_kwargs, pl().defaults.data_2d) # @UndefinedVariable
plots['dataplot'].append(pl().scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]],
Z=Y[rows, d], color=Y[rows, d], label=label, **plot_kwargs))
elif len(free_dims) == 0:
pass #Nothing to plot!
else:
raise NotImplementedError("Cannot plot in more then two dimensions")
return plots
def plot_data_error(self, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
projection='2d', label=None, **error_kwargs):
"""
Plot the training data input error.
For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_ycols: 'all' or a list of integers
:param visible_dims: an array specifying the input dimensions to plot (maximum two)
:type visible_dims: a numpy array
:param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs!
:param dict error_kwargs: kwargs for the error plot for the plotting library you are using
:param str label: the label for the plot
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
:returns list: of plots created.
"""
canvas, error_kwargs = pl().new_canvas(projection=projection, **error_kwargs)
plots = _plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, label, **error_kwargs)
return pl().add_to_canvas(canvas, plots)
def _plot_data_error(self, canvas, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
projection='2d', label=None, **error_kwargs):
ycols = get_which_data_ycols(self, which_data_ycols)
rows = get_which_data_rows(self, which_data_rows)
X, X_variance, Y = get_x_y_var(self)
free_dims = get_free_dims(self, visible_dims, None)
plots = {}
if X_variance is not None:
plots['input_error'] = []
#one dimensional plotting
if len(free_dims) == 1:
for d in ycols:
update_not_existing_kwargs(error_kwargs, pl().defaults.xerrorbar)
plots['input_error'].append(pl().xerrorbar(canvas, X[rows, free_dims].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims].flatten()), label=label,
**error_kwargs))
#2D plotting
elif len(free_dims) == 2:
update_not_existing_kwargs(error_kwargs, pl().defaults.xerrorbar) # @UndefinedVariable
plots['input_error'].append(pl().xerrorbar(canvas, X[rows, free_dims[0]].flatten(), X[rows, free_dims[1]].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[0]].flatten()), label=label,
**error_kwargs))
plots['input_error'].append(pl().yerrorbar(canvas, X[rows, free_dims[0]].flatten(), X[rows, free_dims[1]].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[1]].flatten()), label=label,
**error_kwargs))
elif len(free_dims) == 0:
pass #Nothing to plot!
else:
raise NotImplementedError("Cannot plot in more then two dimensions")
return plots
def plot_inducing(self, visible_dims=None, projection='2d', label='inducing', legend=True, **plot_kwargs):
"""
Plot the inducing inputs of a sparse gp model
:param array-like visible_dims: an array specifying the input dimensions to plot (maximum two)
:param kwargs plot_kwargs: keyword arguments for the plotting library
"""
canvas, kwargs = pl().new_canvas(projection=projection, **plot_kwargs)
plots = _plot_inducing(self, canvas, visible_dims, projection, label, **kwargs)
return pl().add_to_canvas(canvas, plots, legend=legend)
def _plot_inducing(self, canvas, visible_dims, projection, label, **plot_kwargs):
if visible_dims is None:
sig_dims = self.get_most_significant_input_dimensions()
visible_dims = [i for i in sig_dims if i is not None]
free_dims = get_free_dims(self, visible_dims, None)
Z = self.Z.values
plots = {}
#one dimensional plotting
if len(free_dims) == 1:
update_not_existing_kwargs(plot_kwargs, pl().defaults.inducing_1d) # @UndefinedVariable
plots['inducing'] = pl().plot_axis_lines(canvas, Z[:, free_dims], label=label, **plot_kwargs)
#2D plotting
elif len(free_dims) == 2 and projection == '3d':
update_not_existing_kwargs(plot_kwargs, pl().defaults.inducing_3d) # @UndefinedVariable
plots['inducing'] = pl().plot_axis_lines(canvas, Z[:, free_dims], label=label, **plot_kwargs)
elif len(free_dims) == 2:
update_not_existing_kwargs(plot_kwargs, pl().defaults.inducing_2d) # @UndefinedVariable
plots['inducing'] = pl().scatter(canvas, Z[:, free_dims[0]], Z[:, free_dims[1]],
label=label, **plot_kwargs)
elif len(free_dims) == 0:
pass #Nothing to plot!
else:
raise NotImplementedError("Cannot plot in more then two dimensions")
return plots
def plot_errorbars_trainset(self, which_data_rows='all',
which_data_ycols='all', fixed_inputs=None,
plot_raw=False, apply_link=False, label=None, projection='2d',
predict_kw=None, **plot_kwargs):
"""
Plot the errorbars of the GP likelihood on the training data.
These are the errorbars after the appropriate
approximations according to the likelihood are done.
This also works for heteroscedastic likelihoods.
Give the Y_metadata in the predict_kw if you need it.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param dict predict_kwargs: kwargs for the prediction used to predict the right quantiles.
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
"""
canvas, kwargs = pl().new_canvas(projection=projection, **plot_kwargs)
plots = _plot_errorbars_trainset(self, canvas, which_data_rows, which_data_ycols,
fixed_inputs, plot_raw, apply_link, label, projection, predict_kw, **kwargs)
return pl().add_to_canvas(canvas, plots)
def _plot_errorbars_trainset(self, canvas,
which_data_rows='all', which_data_ycols='all',
fixed_inputs=None,
plot_raw=False, apply_link=False,
label=None, projection='2d', predict_kw=None, **plot_kwargs):
ycols = get_which_data_ycols(self, which_data_ycols)
rows = get_which_data_rows(self, which_data_rows)
X, _, Y = get_x_y_var(self)
if fixed_inputs is None:
fixed_inputs = []
free_dims = get_free_dims(self, None, fixed_inputs)
Xgrid = X.copy()
for i, v in fixed_inputs:
Xgrid[:, i] = v
plots = []
if len(free_dims)<=2 and projection=='2d':
update_not_existing_kwargs(plot_kwargs, pl().defaults.yerrorbar)
if predict_kw is None:
predict_kw = {}
if 'Y_metadata' not in predict_kw:
predict_kw['Y_metadata'] = self.Y_metadata or {}
mu, percs, _ = helper_predict_with_model(self, Xgrid, plot_raw,
apply_link, (2.5, 97.5),
ycols, predict_kw)
if len(free_dims)==1:
for d in ycols:
plots.append(pl().yerrorbar(canvas, X[rows,free_dims[0]], mu[rows,d],
| np.vstack([mu[rows, d] - percs[0][rows, d], percs[1][rows, d] - mu[rows,d]]) | numpy.vstack |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
import os
import h5py
import subprocess
import shlex
import json
import glob
from .. ops import transform_functions, se3
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
def download_modelnet40():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(train, use_normals):
if train: partition = 'train'
else: partition = 'test'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5' % partition)):
f = h5py.File(h5_name)
if use_normals: data = np.concatenate([f['data'][:], f['normal'][:]], axis=-1).astype('float32')
else: data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def deg_to_rad(deg):
return np.pi / 180 * deg
def create_random_transform(dtype, max_rotation_deg, max_translation):
max_rotation = deg_to_rad(max_rotation_deg)
rot = np.random.uniform(-max_rotation, max_rotation, [1, 3])
trans = np.random.uniform(-max_translation, max_translation, [1, 3])
quat = transform_functions.euler_to_quaternion(rot, "xyz")
vec = np.concatenate([quat, trans], axis=1)
vec = torch.tensor(vec, dtype=dtype)
return vec
def jitter_pointcloud(pointcloud, sigma=0.04, clip=0.05):
# N, C = pointcloud.shape
sigma = 0.04*np.random.random_sample()
pointcloud += torch.empty(pointcloud.shape).normal_(mean=0, std=sigma).clamp(-clip, clip)
return pointcloud
def farthest_subsample_points(pointcloud1, num_subsampled_points=768):
pointcloud1 = pointcloud1
num_points = pointcloud1.shape[0]
nbrs1 = NearestNeighbors(n_neighbors=num_subsampled_points, algorithm='auto',
metric=lambda x, y: minkowski(x, y)).fit(pointcloud1[:, :3])
random_p1 = np.random.random(size=(1, 3)) + np.array([[500, 500, 500]]) * np.random.choice([1, -1, 1, -1])
idx1 = nbrs1.kneighbors(random_p1, return_distance=False).reshape((num_subsampled_points,))
gt_mask = torch.zeros(num_points).scatter_(0, torch.tensor(idx1), 1)
return pointcloud1[idx1, :], gt_mask
class UnknownDataTypeError(Exception):
def __init__(self, *args):
if args: self.message = args[0]
else: self.message = 'Datatype not understood for dataset.'
def __str__(self):
return self.message
class ModelNet40Data(Dataset):
def __init__(
self,
train=True,
num_points=1024,
download=True,
randomize_data=False,
use_normals=False
):
super(ModelNet40Data, self).__init__()
if download: download_modelnet40()
self.data, self.labels = load_data(train, use_normals)
if not train: self.shapes = self.read_classes_ModelNet40()
self.num_points = num_points
self.randomize_data = randomize_data
def __getitem__(self, idx):
if self.randomize_data: current_points = self.randomize(idx)
else: current_points = self.data[idx].copy()
current_points = torch.from_numpy(current_points[:self.num_points, :]).float()
label = torch.from_numpy(self.labels[idx]).type(torch.LongTensor)
return current_points, label
def __len__(self):
return self.data.shape[0]
def randomize(self, idx):
pt_idxs = | np.arange(0, self.num_points) | numpy.arange |
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# imports
import argparse
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import osm_helpers
import file_helpers
# parameters
PLT_COLORMAP = 'hot' # matplotlib color map (from https://matplotlib.org/examples/color/colormaps_reference.html)
def box_filter(image, w_box) -> np.array:
"""
return image filtered with box filter
:param image: Image to filter
:param w_box: Filter parameters
:return: Retunrs the fft filtered image
"""
box = np.ones((w_box, w_box)) / (w_box ** 2)
image_fft = np.fft.rfft2(image)
box_fft = np.fft.rfft2(box, s=image.shape)
image = np.fft.irfft2(image_fft * box_fft)
return image
def create_supertile(x_tile_min, x_tile_max, y_tile_min, y_tile_max, zoom) -> np.array:
"""
Method that stitches together all the tiles needed for the heatmap and generates one "supertile" from it.
Tiles are downloaded, if they are not in the tiles folder.
:param x_tile_min: Minimum x tile value
:param x_tile_max: Maximum x tile value
:param y_tile_min: Minimum y tile value
:param y_tile_max: Maximum y tile value
:param zoom: The zoom level at which to download the tiles. This strongly impacts the resolution and the number of
necessary tiles
:return: An array with the image data of the supertile
"""
supertile_size = ((y_tile_max - y_tile_min + 1) * osm_helpers.OSM_TILE_SIZE,
(x_tile_max - x_tile_min + 1) * osm_helpers.OSM_TILE_SIZE, 3)
supertile = np.zeros(supertile_size)
for x in range(x_tile_min, x_tile_max + 1):
for y in range(y_tile_min, y_tile_max + 1):
tile_filename = 'tiles/tile_' + str(zoom) + '_' + str(x) + '_' + str(y) + '.png'
tile = plt.imread(tile_filename) # float ([0,1])
i = y - y_tile_min
j = x - x_tile_min
# fill supertile with tile image
supertile[i * osm_helpers.OSM_TILE_SIZE:i * osm_helpers.OSM_TILE_SIZE + osm_helpers.OSM_TILE_SIZE,
j * osm_helpers.OSM_TILE_SIZE:j * osm_helpers.OSM_TILE_SIZE + osm_helpers.OSM_TILE_SIZE,
:] = tile[:, :, :3]
# convert super_tile to grayscale and invert colors
# convert to 1 channel grayscale image
supertile = 0.2126 * supertile[:, :, 0] + 0.7152 * supertile[:, :, 1] + 0.0722 * supertile[:, :, 2]
supertile = 1 - supertile # invert colors
# convert back to 3 channels image
supertile = np.dstack((supertile, supertile, supertile))
return supertile
def create_heatmap(lat_lon_data, nr_activities, lat_bound_min=-90, lat_bound_max=90,
lon_bound_min=-180, lon_bound_max=180, heatmap_zoom=10, sigma_pixels=1, equal=False,
url='https://maps.wikimedia.org/osm-intl') -> np.array:
"""
Creates a heatmap using the data in the `lat_lon_data` array. The underlying tiles are downloaded from OSM and
converted to a dark theme in order to make the map look cool.
In order to get a map, that is always centered on the same point and has the same extents, no matter the data, set
`equal` to True. This will then use the minimum and maximum boundary values for the tile downloading and loading.
This way, it is possible to get heatmaps with identical extent for each month (or year, activity, etc.)
:param lat_lon_data: Array containing latitude and longitude values
:param nr_activities: The number of activities that sourced the latitude and longitude values
:param lat_bound_min: Minimum latitude value to include in the heatmap. Default is -90
:param lat_bound_max: Maximum latitude value to include in the heatmap. Default is 90
:param lon_bound_min: Minimum longitude value to include in the heatmap. Default is -180
:param lon_bound_max: Maximum longitude value to include in the heatmap. Default is 180.
:param heatmap_zoom: The OSM zoom level to use. Affects number of tiles and resolution. Default is 10.
:param sigma_pixels: Sigma value used for the binning of points. Default is 1.
:param equal: Set this true to use the tile boundaries as heatmap extent. Default is False
:param url: The tile base url. Default is https://maps.wikimedia.org/osm-intl
:return: An array with the image data of the heatmap on the supertile
"""
# crop data to bounding box
lat_lon_data = lat_lon_data[np.logical_and(lat_lon_data[:, 0] > lat_bound_min,
lat_lon_data[:, 0] < lat_bound_max), :]
lat_lon_data = lat_lon_data[np.logical_and(lat_lon_data[:, 1] > lon_bound_min,
lat_lon_data[:, 1] < lon_bound_max), :]
if equal:
x_tile_min, y_tile_max = osm_helpers.deg2tile_coord(lat_bound_min, lon_bound_min, heatmap_zoom)
x_tile_max, y_tile_min = osm_helpers.deg2tile_coord(lat_bound_max, lon_bound_max, heatmap_zoom)
else:
# find min, max tile x,y coordinates
lat_min = lat_lon_data[:, 0].min()
lat_max = lat_lon_data[:, 0].max()
lon_min = lat_lon_data[:, 1].min()
lon_max = lat_lon_data[:, 1].max()
x_tile_min, y_tile_max = osm_helpers.deg2tile_coord(lat_min, lon_min, heatmap_zoom)
x_tile_max, y_tile_min = osm_helpers.deg2tile_coord(lat_max, lon_max, heatmap_zoom)
osm_helpers.download_tiles_for_area(x_tile_min, x_tile_max, y_tile_min, y_tile_max, heatmap_zoom, url=url)
print('creating heatmap...')
# create supertile
supertile = create_supertile(x_tile_min, x_tile_max, y_tile_min, y_tile_max, heatmap_zoom)
# supertile_size = supertile.size()
# fill trackpoints data
data = np.zeros(supertile.shape[:2])
# add w_pixels (= Gaussian kernel sigma) pixels of padding around the trackpoints for better visualization
w_pixels = int(sigma_pixels)
for k in range(len(lat_lon_data)):
x, y = osm_helpers.deg2xy(lat_lon_data[k, 0], lat_lon_data[k, 1], heatmap_zoom)
i = int( | np.round((y - y_tile_min) * osm_helpers.OSM_TILE_SIZE) | numpy.round |
#!/usr/bin/python3
# Copyright (C) 2020 Intel Corporation
from html import escape
from urllib.parse import parse_qs
from flup.server.fcgi import WSGIServer
import threading
import json
import base64
import os
import time
import sys
import wave
import datetime
import numpy as np
import ctypes
import inferservice_python as rt_api
from fcgi_codec import CTCCodec
import cv2
from shapely.geometry import Polygon
import pyclipper
import math
import copy
import logging
import logging.handlers
import socket
syslog = logging.handlers.SysLogHandler(address='/dev/log')
msgfmt = '%(asctime)s {0} %(name)s[%(process)d]: %(message)s'.format(socket.gethostname())
formatter = logging.Formatter(msgfmt, datefmt='%b %d %H:%M:%S')
syslog.setFormatter(formatter)
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logger.addHandler(syslog)
logger.setLevel(logging.DEBUG)
class formula():
def __init__(self, vocab_file):
assert vocab_file.endswith(".json"), "vocab file must be json file"
with open(vocab_file, "r") as file:
dict_vocab = json.load(file)
dict_vocab['id2sign'] = {int(i): j for i, j in dict_vocab['id2sign'].items()}
self.index = dict_vocab["id2sign"]
def get_formula(self, targets):#get latex formula from index
phrase_formula = []
for target in targets:
if target == 2:
break
phrase_formula.append(
self.index.get(target, "?"))
return " ".join(phrase_formula)
def latex_preprocess_image( image_raw, tgt_shape):
img_h, img_w = image_raw.shape[0:2]
target_height, target_width = tgt_shape
new_h = min(target_height, img_h)
new_w = min(target_width, img_w)
image_raw=image_raw[:new_h, :new_w, :]
image = cv2.copyMakeBorder(image_raw, 0, target_height - img_h,
0, target_width - img_w, cv2.BORDER_CONSTANT,
None, (255,255,255))
return image
def latex_recognizer(latex_crop_list, latex_encode_xml, latex_decode_xml, urlinfo,vocab, formula_result_list):
for img in latex_crop_list:
img_decode = formula.latex_preprocess_image(img, (160, 1400))
img_encode = cv2.imencode('.jpg', img_decode)[1]
pic = list(img_encode)
pics = [pic]
other_pin = rt_api.vectorVecFloat()
#prepare out
dec_states_h = rt_api.vectorFloat()
dec_states_c = rt_api.vectorFloat()
output = rt_api.vectorFloat()
row_enc_out = rt_api.vectorFloat()
out = rt_api.vectorVecFloat()
out.append(dec_states_h)
out.append(dec_states_c)
out.append(output)
out.append(row_enc_out)
res = rt_api.infer_image(pics, 3, other_pin, latex_encode_xml, out, urlinfo)
logits = []
if res == 0:
max_formula_len = 128
dec_states_h = out[0]
dec_states_c = out[1]
output = out[2]
row_enc_out = out[3]
tgt = [[[0]]]
for _ in range(max_formula_len):
decode_model = latex_decode_xml
other_pin = rt_api.vectorVecFloat()
other_pin.append(rt_api.vectorFloat(dec_states_c))
other_pin.append(rt_api.vectorFloat(output))
other_pin.append(rt_api.vectorFloat(row_enc_out))
other_pin.append(rt_api.vectorFloat([tgt[0][0][0]]))
decode_out= rt_api.vectorVecFloat()
decode_out1 = rt_api.vectorFloat()
decode_out2 = rt_api.vectorFloat()
decode_out3 = rt_api.vectorFloat()
decode_out4 = rt_api.vectorFloat()
decode_out.append(decode_out1)
decode_out.append(decode_out2)
decode_out.append(decode_out3)
decode_out.append(decode_out4)
input_data = rt_api.vectorVecFloat()
x_pin1 = rt_api.vectorFloat(dec_states_h)
input_data.append(x_pin1)
input_vecs = rt_api.tripleVecFloat()
input_vecs.append(input_data)
res = rt_api.infer_common(input_vecs, other_pin, decode_model, "OPENVINO", decode_out, urlinfo)
dec_states_h = decode_out[0]
dec_states_c = decode_out[1]
output = decode_out[3]
logit = np.array(decode_out[2]).reshape(1,101)
logits.append(logit)
tgt = np.array([[np.argmax(logit, axis=1)]])
tgt = tgt.tolist()
if tgt[0][0][0] == 2:
break
logits = np.array(logits)
logits = logits.squeeze(axis=1)
targets = np.argmax(logits, axis=1)
formula_result = vocab.get_formula(targets)
formula_result_list.append(formula_result)
class chinese_handwritten():
def get_characters(charlist):
'''Get characters'''
with open(charlist, 'r', encoding='utf-8') as f:
return ''.join(line.strip('\n') for line in f)
def handwritten_image_preprocess(image, height, width):
image_ratio = float(image.shape[1]) / float(image.shape[0])
rw = int(height * image_ratio)
if rw <= 2000:
resized_image = cv2.resize(image, (rw, height), interpolation=cv2.INTER_AREA).astype(np.float32)
resized_img = resized_image[None, :, :]
_, rh, rw = resized_img.shape
pad_resized_img = np.pad(resized_img, ((0, 0), (0, height - rh), (0, width - rw)), mode='edge')
else:
image_ratio = width / image.shape[1]
rh = int(image.shape[0] * image_ratio)
resized_img = cv2.resize(image, (width, rh) , interpolation=cv2.INTER_AREA).astype(np.float32)
resized_img = resized_img[None, :, :]
_, rh, rw = resized_img.shape
pad_resized_img = np.pad(resized_img, ((0, 0), (0, height - rh), (0, width - rw)), mode='edge')
return pad_resized_img
def handwritten_recognizer(handwritten_crop_list, model_xml, model_label, urlinfo, handwritten_result_list):
for img in handwritten_crop_list:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = chinese_handwritten.handwritten_image_preprocess(img, 96, 2000)
img = img[0]
image_data = cv2.imencode('.jpg',img)[1]
pic = list(image_data)
pics = [pic]
other_pin = rt_api.vectorVecFloat()
out1 = rt_api.vectorFloat()
out = rt_api.vectorVecFloat() ##with opaque and stl_binding
out.append(out1)
res = rt_api.infer_image(pics, 3, other_pin, model_xml, out, urlinfo)
if res == 0:
char_label = chinese_handwritten.get_characters(model_label)
code_ocr = CTCCodec(char_label, 20)
predict = np.array(out[0])
predict = predict.reshape(186,1,4059)
result = code_ocr.decode(predict)
handwritten_result_list.append(result[0])
class ppocr():
def small_rectangle(contour_img):
rectangle = cv2.minAreaRect(contour_img)
left_top, right_top, right_down, left_down = 0, 1, 2, 3
box_points = sorted(list(cv2.boxPoints(rectangle)), key=lambda x: x[0])
if box_points[3][1] > box_points[2][1]:
right_top = 2
right_down = 3
else:
right_top = 3
right_down = 2
if box_points[1][1] > box_points[0][1]:
left_top = 0
left_down = 1
else:
left_top = 1
left_down = 0
rectangle_points = [box_points[left_top], box_points[right_top], box_points[right_down], box_points[left_down]]
return rectangle_points, min(rectangle[1])
def rectangle_score(bit_img, _rectangle):
rectangle = _rectangle.copy()
h, w = bit_img.shape[:2]
w_min = np.clip(np.floor(rectangle[:, 0].min()).astype(np.int), 0, w - 1)
h_min = np.clip(np.floor(rectangle[:, 1].min()).astype(np.int), 0, h - 1)
w_max = np.clip(np.ceil(rectangle[:, 0].max()).astype(np.int), 0, w - 1)
h_max = np.clip(np.ceil(rectangle[:, 1].max()).astype(np.int), 0, h - 1)
rectangle[:, 0] = rectangle[:, 0] - w_min
rectangle[:, 1] = rectangle[:, 1] - h_min
mask_img = np.zeros((h_max - h_min + 1, w_max - w_min + 1), dtype=np.uint8)
cv2.fillPoly(mask_img, rectangle.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bit_img[h_min:h_max + 1, w_min:w_max + 1], mask_img)[0]
def large_rectangle(rectangle):
enlarge_ratio = 1
pco = pyclipper.PyclipperOffset()
poly = Polygon(rectangle)
length = poly.length
area = poly.area
ratio = area * enlarge_ratio / length
pco.AddPath(rectangle, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded_box = np.array(pco.Execute(ratio))
return expanded_box
def bit_img_boxes(predict, _bit_img, ori_width, ori_height):
max_candidates = 1000
bit_img = _bit_img
height, width = bit_img.shape
contours = cv2.findContours((bit_img * 255).astype(np.uint8), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
contours_num = len(contours)
if contours_num == 2:
new_contours, _ = contours[0], contours[1]
elif contours_num == 3:
img, new_contours, _ = contours[0], contours[1], contours[2]
contours_num = min(contours_num, max_candidates)
boxes =[]
box_scores =[]
for contour in new_contours:
box, s_height = ppocr.small_rectangle(contour)
min_size = 3
if s_height < min_size:
continue
box = np.array(box)
box_score = ppocr.rectangle_score(predict, box.reshape(-1, 2))
if box_score < 0.5:
continue
large_box = ppocr.large_rectangle(box).reshape(-1, 1, 2)
large_box, s_height = ppocr.small_rectangle(large_box)
if s_height < min_size+2:
continue
large_box = np.array(large_box)
w = np.round(large_box[:, 0] / width * ori_width)
h = np.round(large_box[:, 1] / height * ori_height)
large_box[:, 0] = np.clip(w, 0, ori_width)
large_box[:, 1] = np.clip(h, 0, ori_height)
boxes.append(large_box.astype(np.int16))
box_scores.append(box_score)
boxes = | np.array(boxes, dtype=np.int16) | numpy.array |
# https://github.com/sunset1995/py360convert
import numpy as np
from scipy.ndimage import map_coordinates
def xyzcube(face_w):
'''
Return the xyz cordinates of the unit cube in [F R B L U D] format.
'''
out = np.zeros((face_w, face_w * 6, 3), np.float32)
rng = np.linspace(-0.5, 0.5, num=face_w, dtype=np.float32)
grid = np.stack(np.meshgrid(rng, -rng), -1)
# Front face (z = 0.5)
out[:, 0*face_w:1*face_w, [0, 1]] = grid
out[:, 0*face_w:1*face_w, 2] = 0.5
# Right face (x = 0.5)
out[:, 1*face_w:2*face_w, [2, 1]] = grid
out[:, 1*face_w:2*face_w, 0] = 0.5
# Back face (z = -0.5)
out[:, 2*face_w:3*face_w, [0, 1]] = grid
out[:, 2*face_w:3*face_w, 2] = -0.5
# Left face (x = -0.5)
out[:, 3*face_w:4*face_w, [2, 1]] = grid
out[:, 3*face_w:4*face_w, 0] = -0.5
# Up face (y = 0.5)
out[:, 4*face_w:5*face_w, [0, 2]] = grid
out[:, 4*face_w:5*face_w, 1] = 0.5
# Down face (y = -0.5)
out[:, 5*face_w:6*face_w, [0, 2]] = grid
out[:, 5*face_w:6*face_w, 1] = -0.5
return out
def equirect_uvgrid(h, w):
u = np.linspace(-np.pi, np.pi, num=w, dtype=np.float32)
v = np.linspace(np.pi, -np.pi, num=h, dtype=np.float32) / 2
return np.stack(np.meshgrid(u, v), axis=-1)
def equirect_facetype(h, w):
'''
0F 1R 2B 3L 4U 5D
'''
tp = np.roll(np.arange(4).repeat(w // 4)[None, :].repeat(h, 0), 3 * w // 8, 1)
# Prepare ceil mask
mask = | np.zeros((h, w // 4), np.bool) | numpy.zeros |
# Functions for the individual state level model with random censoring, common
# variance and negative binomial counts for the number of states for each
# peptide.
import bz2
import copy
import cPickle
import gzip
import itertools
import h5py
import numpy as np
from scipy import special
from scipy import optimize
from scipy import stats
import fast_agg
#==============================================================================
# Useful constants
#==============================================================================
EPS = np.spacing(1)
#==============================================================================
# Exceptions
#==============================================================================
class Error(Exception):
'''
Base class for errors in lib.
'''
pass
class BisectionError(Error):
'''
Exception class for errors particular to bisection algorithms.
'''
#==============================================================================
# Densities and probabilities
#==============================================================================
def dnorm(x, mu=0, sigmasq=1, log=False):
'''
Gaussian density parameterized by mean and variance.
Syntax mirrors R.
'''
ld = -0.5 * (np.log(2. * np.pi) + np.log(sigmasq)) - (x - mu) ** 2 / \
2. / sigmasq
if log:
return ld
else:
return np.exp(ld)
def dlnorm(x, mu=0, sigmasq=1, log=False):
'''
Density function for log-normal, parameterized by mean and variance of
log(x). Syntax mirrors R.
'''
ld = dnorm(np.log(x), mu, sigmasq, log=True) - np.log(x)
if log:
return ld
else:
return np.exp(ld)
def p_censored(x, eta_0, eta_1, log=False, glm_link_name="Logit"):
'''
Compute probability of intensity-based censoring.
'''
if glm_link_name == "Probit":
lp = stats.norm.logsf(eta_0 + eta_1 * x)
elif glm_link_name == "Cloglog":
lp = -np.exp(eta_0 + eta_1 * x)
else:
lp = -np.log(1. + np.exp(eta_0 + eta_1 * x))
if log:
return lp
else:
return np.exp(lp)
def p_obs(x, eta_0, eta_1, log=False, glm_link_name="Logit"):
'''
Compute 1 - probability of intensity-based censoring.
'''
if glm_link_name == "Probit":
lp = stats.norm.logcdf(eta_0 + eta_1 * x)
elif glm_link_name == "Cloglog":
lp = np.log(1. - np.exp(-np.exp(eta_0 + eta_1 * x)))
else:
lp = -np.log(1. + np.exp(-eta_0 - eta_1 * x))
if log:
return lp
else:
return np.exp(lp)
def dcensored(x, mu, sigmasq, eta_0, eta_1, log=False, glm_link_name="Logit"):
'''
Unnormalized density function for censored log-intensities.
Integrates to p_censored.
'''
ld = dnorm(x, mu, sigmasq, log=True) + \
p_censored(x, eta_0, eta_1, log=True, glm_link_name=glm_link_name)
if log:
return ld
else:
return np.exp(ld)
def dobs(x, mu, sigmasq, eta_0, eta_1, log=False, glm_link_name="Logit"):
'''
Unnormalized density function for observed log-intensities.
Integrates to p_obs.
'''
ld = dnorm(x, mu, sigmasq, log=True) + \
p_obs(x, eta_0, eta_1, log=True, glm_link_name=glm_link_name)
if log:
return ld
else:
return np.exp(ld)
def dt(x, mu=0., scale=1., df=1., log=False):
'''
Normalized t density function with location parameter mu and scale parameter
scale.
'''
ld = -(df + 1.) / 2. * np.log(1. + (x - mu) ** 2 / scale ** 2 / df)
ld -= 0.5 * np.log(np.pi * df) + np.log(scale)
ld += special.gammaln((df + 1.) / 2.) - special.gammaln(df / 2.)
if log:
return ld
return np.exp(ld)
def densityratio(x, eta_0, eta_1, mu, sigmasq, approx_sd, y_hat, propDf,
normalizing_cnst, log=False, glm_link_name="Logit"):
'''
Target-proposal ratio for censored intensity rejection sampler.
'''
ld = dcensored(x, mu, sigmasq, eta_0, eta_1, log=True,
glm_link_name=glm_link_name)
ld -= dt(x, mu=y_hat, scale=approx_sd, df=propDf, log=True)
ld += np.log(normalizing_cnst)
if log:
return ld
return np.exp(ld)
def dgamma(x, shape=1., rate=1., log=False):
'''
Normalized gamma density function, parameterized by shape and rate.
'''
ld = np.log(x) * (shape - 1.) - rate * x
ld += shape * np.log(rate) - special.gammaln(shape)
if log:
return ld
return np.exp(ld)
def lp_profile_gamma(shape, x, log=False, prior_shape=1., prior_rate=0.,
prior_mean_log=0., prior_prec_log=0.):
'''
Compute profile log-posterior of shape parameter for gamma likelihood.
Assuming conditionally-conjugate gamma prior on observation distribution's
rate parameter with given parameters.
Also using log-normal prior on shape parameter itself with given log-mean
and precision.
If log, compute log-posterior for log(shape) and log(rate)
Returns a float with the profile log-posterior.
'''
n = np.size(x)
# Compute conditional posterior mode of rate parameter
rate_hat = (
shape + (prior_shape - 1. + log) / n) / (np.mean(x) + prior_rate / n)
# Evaluate log-posterior at conditional mode
lp = np.sum(dgamma(x, shape=shape, rate=rate_hat, log=True))
# Add prior for rate parameter
lp += dgamma(rate_hat, shape=prior_shape, rate=prior_rate, log=True)
# Add prior for shape parameter
lp += dlnorm(shape, mu=prior_mean_log,
sigmasq=1. / np.float64(prior_prec_log), log=True)
if log:
# Add Jacobians
lp += 1. / shape + 1. / rate_hat
return lp
def dnbinom(x, r, p, log=False):
'''
Normalized PMF for negative binomial distribution. Parameterized s.t.
x >= 0, expectation is p*r/(1-p); variance is p*r/(1-p)**2.
Syntax mirrors R.
'''
ld = (np.log(p) * x + np.log(1 - p) * r + special.gammaln(x + r) -
special.gammaln(x + 1) - special.gammaln(r))
if log:
return ld
return np.exp(ld)
def dbeta(x, a, b, log=False):
'''
Normalized PDF for beta distribution. Syntax mirrors R.
'''
ld = np.log(
x) * (a - 1.) + np.log(1. - x) * (b - 1.) - special.betaln(a, b)
if log:
return ld
return np.exp(ld)
#==============================================================================
# Useful derivatives; primarily used in mode-finding routines
#==============================================================================
def deriv_logdt(x, mu=0, scale=1, df=1.):
deriv = -(df + 1.) / (1. + (x - mu) ** 2 / scale ** 2 / df)
deriv *= (x - mu) / scale ** 2 / df
return deriv
def deriv_logdcensored(x, mu, sigmasq, eta_0, eta_1, glm_link_name="Logit"):
linpred = eta_0 + eta_1 * x
if glm_link_name == "Probit":
sf_adj = np.maximum(stats.norm.sf(linpred), EPS)
pdf = stats.norm.pdf(linpred)
deriv = -pdf / sf_adj * eta_1
elif glm_link_name == "Cloglog":
deriv = -eta_1 * np.exp(linpred)
else:
deriv = (-1. + 1. / (1. + np.exp(linpred))) * eta_1
deriv += - (x - mu) / sigmasq
return deriv
def deriv2_logdcensored(x, mu, sigmasq, eta_0, eta_1, glm_link_name="Logit"):
linpred = eta_0 + eta_1 * x
if glm_link_name == "Probit":
sf_adj = np.maximum(stats.norm.sf(linpred), EPS)
pdf = stats.norm.pdf(linpred)
deriv2 = eta_1**2 * pdf / sf_adj * (linpred - pdf / sf_adj)
elif glm_link_name == "Cloglog":
deriv2 = -eta_1**2 * np.exp(linpred)
else:
deriv2 = - (eta_1 ** 2 * np.exp(linpred)) / (1. + np.exp(linpred))**2
deriv2 += -1. / sigmasq
return deriv2
def deriv3_logdcensored(x, mu, sigmasq, eta_0, eta_1, glm_link_name="Logit"):
linpred = eta_0 + eta_1 * x
if glm_link_name == "Probit":
#deriv1 = deriv_logdcensored(x, mu, sigmasq, eta_0, eta_1, glm_link_name)
#deriv2 = deriv2_logdcensored(x, mu, sigmasq, eta_0, eta_1,
# glm_link_name)
#deriv3 = -deriv2 * (eta_1 * linpred + 2 * deriv1) - eta_1**2 * deriv1
deriv3 = None
elif glm_link_name == "Cloglog":
deriv3 = -eta_1**3 * np.exp(linpred)
else:
deriv3 = ((2. * eta_1 ** 3 * np.exp(2. * linpred)) /
(1. + np.exp(linpred)) ** 3
- (eta_1 ** 3 * np.exp(linpred)) /
(1. + np.exp(linpred)) ** 2)
return deriv3
def deriv_logdensityratio(x, eta_0, eta_1, mu, sigmasq, approx_sd, y_hat,
propDf, glm_link_name="Logit"):
'''
First derivative of the log target-proposal ratio for censored intensity
rejection sampler.
'''
deriv = deriv_logdcensored(x, mu, sigmasq, eta_0, eta_1,
glm_link_name=glm_link_name)
deriv -= deriv_logdt(x, mu=y_hat, scale=approx_sd, df=propDf)
return deriv
def score_profile_posterior_gamma(shape, x, T=None, log=False,
prior_shape=1., prior_rate=0.,
prior_mean_log=0., prior_prec_log=0.,
prior_adj=1.):
'''
Profile posterior score for shape parameter of gamma distribution.
If log, compute score for log(shape) instead.
Assumes a conjugate gamma prior on the rate parameter and an independent
log-normal prior on the shape parameter, each with the given parameters.
Returns a float with the desired score.
'''
# Extract sufficient statistics if needed
if T is None:
# Sufficient statistics are (sum x, sum log x, and n)
T = np.array([np.sum(x), np.sum(np.log(x)), np.size(x)])
n = T[2]
# Compute conditional posterior mode of rate parameter
rate_hat = ((shape + (prior_shape - 1. + log) / n / prior_adj) /
(T[0]/n + prior_rate / n / prior_adj))
# Compute score for untransformed shape parameter
score = (T[1] - n * special.polygamma(0, shape) + n * np.log(rate_hat) -
prior_prec_log * (np.log(shape) - prior_mean_log) / shape /
prior_adj - 1. / shape / prior_adj)
# Handle log transformation of parameters via simple chain rule
if log:
# Add Jacobian term
score += 1. / shape / prior_adj
# Compute derivative of untransformed parameters wrt transformed ones
deriv = shape
# Update information using chain rule
score *= deriv
return score
def info_posterior_gamma(shape, rate, x, T=None, log=False,
prior_shape=1., prior_rate=0.,
prior_mean_log=0., prior_prec_log=0.,
prior_adj=1.):
'''
Compute posterior information for shape and rate parameters of gamma
distribution.
If log, compute information for log(shape) and log(rate) instead.
This is typically more useful, as the normal approximation holds much better
on the log scale.
Assumes a conjugate gamma prior on the rate parameter and an independent
log-normal prior on the shape parameter, each with the given parameters.
Returns a 2x2 np.ndarray for which the first {row,column} corresponds to the
shape parameter and the second corresponds to the rate parameter.
'''
# Extract sufficient statistics if needed
if T is None:
# Sufficient statistics are (sum x, sum log x, and n)
T = np.array([np.sum(x), np.sum(np.log(x)), np.size(x)])
n = T[2]
# Compute observed information for untransformed parameters
info = np.zeros((2, 2))
# shape, shape
info[0, 0] = (n * special.polygamma(1, shape) -
1 / shape ** 2 * (1 + prior_prec_log *
(np.log(shape) - prior_mean_log - 1.)) /
prior_adj)
# rate, rate
info[1, 1] = (n * shape + (prior_shape - 1.) / prior_adj) / rate ** 2
# shape, rate and rate, shape
info[0, 1] = info[1, 0] = -n / rate
# Handle log transformation of parameters via simple chain rule
if log:
# Add Jacobian terms
info[0, 0] += 1. / shape ** 2 / prior_adj
info[1, 1] += 1. / rate ** 2 / prior_adj
# Compute gradient for log-likelihood wrt untransformed parameters
grad = np.array([-n * np.log(rate) + n * special.polygamma(0, shape) -
T[1] + prior_prec_log / prior_adj *
(np.log(shape) - prior_mean_log) / shape + 1. / shape -
log * 1. / shape,
-(n * shape + (prior_shape - 1.) / prior_adj) / rate +
T[0] + prior_rate / prior_adj - log * 1. / rate])
# Compute derivatives of untransformed parameters wrt transformed ones
deriv = np.array([shape, rate])
deriv2 = deriv
# Update information using chain rule
info = info * deriv
info = (info.T * deriv).T
np.fill_diagonal(info, info.diagonal() + deriv2 * grad)
return info
def info_profile_posterior_gamma(shape, x, log=False,
prior_shape=1., prior_rate=0.,
prior_mean_log=0., prior_prec_log=0.):
'''
Compute profile posterior information for shape parameter of gamma
distribution.
If log, compute information for log(shape) instead.
This is typically more useful, as the normal approximation holds much better
on the log scale.
Assumes a conjugate gamma prior on the rate parameter and an independent
log-normal prior on the shape parameter, each with the given parameters.
Returns a float with the desired information.
'''
n = np.size(x)
# Compute information for untransformed shape parameter
info = (n * special.polygamma(1, shape) - n / (shape + (prior_shape - 1.) / n) -
1 / shape ** 2 * (1 + prior_prec_log * (np.log(shape) - prior_mean_log - 1)))
# Handle log transformation of parameters via simple chain rule
if log:
# Compute conditional posterior mode of rate parameter
rate_hat = ((shape + (prior_shape - 1. + log) / n) /
(np.mean(x) + prior_rate / n))
# Compute gradient for log-likelihood wrt untransformed parameters
grad = (-np.sum(np.log(x)) + n * special.polygamma(0, shape) -
n * np.log(rate_hat) +
prior_prec_log * (np.log(shape) - prior_mean_log) / shape + 1. / shape -
log * 1. / shape)
# Compute derivatives of untransformed parameters wrt transformed ones
deriv = shape
deriv2 = deriv
# Update information using chain rule
info = info * deriv ** 2
info += deriv2 * grad
return info
def score_profile_posterior_nbinom(r, x, transform=False,
prior_a=1., prior_b=1.,
prior_mean_log=0., prior_prec_log=0.,
prior_adj=1.):
'''
Profile posterior score for r (convolution) parameter of negative-binomial
distribution.
If transform, compute profile score for log(r) and logit(p) instead.
Assumes a conditionally conjugate beta prior on p and an independent
log-normal prior on r, each with the given parameters.
The entire log-prior is divided by prior_adj. This is useful for
constructing distributed approximations.
Returns a float with the desired score.
'''
# Compute conditional posterior mode of p
n = np.size(x)
A = np.mean(x) + (prior_a - 1. + transform) / n / prior_adj
B = r + (prior_b - 1. + transform) / n / prior_adj
p_hat = A / (A + B)
# Compute score for r
# Likelihood
score = (n * np.log(1. - p_hat) + np.sum(special.polygamma(0, x + r))
- n * special.polygamma(0, r))
# Prior
score += (
-prior_prec_log * (np.log(r) - prior_mean_log) / r - 1. / r) / prior_adj
# Handle log transformation of parameters via simple chain rule
if transform:
# Add Jacobian term
score += 1. / r / prior_adj
# Compute derivative of untransformed parameters wrt transformed ones
deriv = r
# Update information using chain rule
score *= deriv
return score
def score_posterior_nbinom_vec(theta, x, prior_a=1., prior_b=1.,
prior_mean_log=0., prior_prec_log=0.,
prior_adj=1.):
'''
Posterior score for theta = (log r, logit p) parameter of
negative-binomial distribution.
Assumes a conditionally conjugate beta prior on p and an independent
log-normal prior on r, each with the given parameters.
The entire log-prior is divided by prior_adj. This is useful for
constructing distributed approximations.
Returns a 2 x m ndarray with the requested score.
'''
n = np.size(x)
if len(np.shape(theta)) < 2:
theta = theta[:, np.newaxis]
r = np.exp(theta[0])
p = 1. / (1. + np.exp(-theta[1]))
# Compute scores
xpr = np.ones((theta.shape[1], n)) * x
xpr = (xpr.T + r).T
score = np.zeros_like(theta)
score[0] = (np.sum(special.polygamma(0, xpr), 1) - \
n * special.polygamma(0, r) + n * np.log(1. - p)) * r + \
-prior_prec_log * (np.log(r) - prior_mean_log) / prior_adj
score[1] = (-n * r / (1. - p) + np.sum(x) / p +
(prior_a * np.log(p) + prior_b * np.log(1. - p)) /
prior_adj) * p * (1. - p)
return score
def info_posterior_nbinom(r, p, x, transform=False, prior_a=1., prior_b=1.,
prior_mean_log=0., prior_prec_log=0., prior_adj=1.):
'''
Compute posterior information for r (convolution) and p parameters of
negative-binomial distribution.
If transform, compute information for log(r) and logit(p) instead.
This is typically more useful, as the normal approximation holds much better
on the transformed scale.
Assumes a conditionally conjugate beta prior on p and an independent
log-normal prior on r, each with the given parameters.
The entire log-prior is divided by prior_adj. This is useful for
constructing distributed approximations.
Returns a 2x2 np.ndarray for which the first {row,column} corresponds to r
and the second corresponds to p.
'''
# Compute observed information for untransformed parameters
n = np.size(x)
info = np.zeros((2, 2))
# r, r
info[0, 0] = (n * special.polygamma(1, r) - np.sum(special.polygamma(1, x + r))
- 1 / r ** 2 * (1 + prior_prec_log * (np.log(r) - prior_mean_log - 1)) /
prior_adj)
# p, p
info[1, 1] = ((n * r + (prior_b - 1.) / prior_adj) / (1. - p) ** 2 +
(np.sum(x) + (prior_a - 1.) / prior_adj) / p ** 2)
# r, p and p, r
info[0, 1] = info[1, 0] = n / (1. - p)
# Handle log transformation of parameters via simple chain rule
if transform:
# Add Jacobian terms
info[0, 0] += 1. / r ** 2 / prior_adj
info[1, 1] += (1. - 2. * p) / p ** 2 / (1. - p) ** 2 / prior_adj
# Compute gradient for log-likelihood wrt untransformed parameters
grad = np.array([-n * np.log(1. - p) - np.sum(special.polygamma(0, x + r))
+ n * special.polygamma(0, r)
+ (prior_prec_log * (np.log(r) - prior_mean_log) / r + 1. / r -
transform * 1. / r) / prior_adj,
-(np.sum(x) + (prior_a - 1.) / prior_adj) / p +
(n * r + (prior_b - 1.) / prior_adj) / (1. - p) -
transform * 1. / p / (1. - p)])
# Compute derivatives of untransformed parameters wrt transformed ones
deriv = np.array([r, p * (1. - p)])
deriv2 = np.array([r, p * (1. - p) * (2. * p - 1.)])
# Update information using chain rule
info = info * deriv
info = (info.T * deriv).T
np.fill_diagonal(info, info.diagonal() + deriv2 * grad)
return info
def info_profile_posterior_nbinom(r, x, transform=False,
prior_a=1., prior_b=1.,
prior_mean_log=0., prior_prec_log=0.):
'''
Compute profile posterior information for r (convolution) parameter of
negative-binomial distribution.
If transform, compute profile information for log(r) and logit(p) instead.
This is typically more useful, as the normal approximation holds much better
on the transformed scale.
Assumes a conditionally conjugate beta prior on p and an independent
log-normal prior on r, each with the given parameters.
Returns a float with the desired information.
'''
# Compute information for untransformed r
n = np.size(x)
A = np.mean(x) + (prior_a - 1. + transform) / n
B = r + (prior_b - 1. + transform) / n
p_hat = A / (A + B)
info = (n * special.polygamma(1, r) - np.sum(special.polygamma(1, x + r))
- n * p_hat / B
- 1 / r ** 2 * (1 + prior_prec_log * (np.log(r) - prior_mean_log - 1)))
# Handle log transformation of parameters via simple chain rule
if transform:
# Add Jacobian terms
info += 1. / r ** 2
# Compute gradient for log-likelihood wrt untransformed parameters
grad = (-n * np.log(1. - p_hat) - np.sum(special.polygamma(0, x + r))
+ n * special.polygamma(0, r))
grad += prior_prec_log * (np.log(r) - prior_mean_log) / r + 2. / r
# Compute derivatives of untransformed parameters wrt transformed ones
deriv = r
deriv2 = r
# Update information using chain rule
info = info * deriv ** 2
info += deriv2 * grad
return info
#==============================================================================
# RNGs
#==============================================================================
def rmvnorm(n, mu, L):
'''
Draw d x n matrix of multivariate normal RVs with mean vector mu (length d)
and covariance matrix L * L.T.
'''
d = L.shape[0]
z = np.random.randn(d, n)
y = mu + np.dot(L, z)
return y
def rncen(n_obs, p_rnd_cen, p_int_cen, lmbda, r):
'''
Draw ncen | y, censoring probabilities, lambda, r.
Must have n_obs, p_rnd_cen, and p_int_cen as Numpy vectors of same length.
r and lmbda must be scalars.
'''
m = np.size(n_obs)
# Setup vectors for result and stopping indicators
active = np.ones(m, dtype=bool)
n_cen = np.zeros(m, dtype=int)
# Compute probability for geometric component of density
pgeom = 1. - (1. - lmbda) * (p_rnd_cen + (1. - p_rnd_cen) * p_int_cen)
# Compute necessary bound for envelope condition
bound = np.ones(m)
if r < 1:
bound[n_obs > 0] *= (n_obs[n_obs > 0] + r - 1) / n_obs[n_obs > 0]
# Run rejection sampling iterations
nIter = 0
while np.sum(active) > 0:
# Propose from negative binomial distribution
# This is almost correct, modulo the 0 vs. 1 minimum non-conjugacy
prop = np.random.negative_binomial(n_obs[active] + r, pgeom[active],
size=np.sum(active))
# Compute acceptance probability; bog standard
u = np.random.uniform(size=np.sum(active))
pAccept = (
n_obs[active] + prop) / (n_obs[active] + prop + r - 1) * bound[active]
# Alway accept for n_obs == 0; in that case, our draw is exact
pAccept[n_obs[active] == 0] = 1.0
# Execute acceptance step and update done indicators
n_cen[active[u < pAccept]] = prop[u < pAccept]
active[active] = u > pAccept
nIter += 1
# Add one to draws for nObs == 0; needed to meet constraint that all
# peptides exist in at least one state.
n_cen = n_cen + (n_obs == 0)
return n_cen
#==============================================================================
# Optimization and root-finding routines
#==============================================================================
def vectorized_bisection(f, lower, upper, f_args=tuple(), f_kwargs={},
tol=1e-10, maxIter=100, full_output=False):
'''
Find vector of roots of vectorized function using bisection.
f should be a vectorized function that takes arguments x and f_args of
compatible dimensions.
In the iterations, f is called as:
f(x, *f_args, **f_kwargs)
'''
# Initialization
mid = lower / 2. + upper / 2.
error = upper / 2. - lower / 2.
f_lower = f(lower, *f_args, **f_kwargs)
f_upper = f(upper, *f_args, **f_kwargs)
# Check if the starting points are valid
if np.any(np.sign(f_lower) * np.sign(f_upper) > 0):
raise BisectionError(('Not all upper and lower bounds produce function'
' values of different signs.'))
# Iterate until convergence to tolerance
t = 0
while t <= maxIter and error.max() > tol:
# Update function values
f_mid = f(mid, *f_args, **f_kwargs)
# Select direction to move
below = np.sign(f_mid) * np.sign(f_lower) >= 0
above = np.sign(f_mid) * np.sign(f_lower) <= 0
# Update bounds and stored function values
lower[below] = mid[below]
f_lower[below] = f_mid[below]
upper[above] = mid[above]
f_upper[above] = f_mid[above]
# Update midpoint and error
mid = lower / 2. + upper / 2.
error = upper / 2. - lower / 2.
# Update iteration counter
t += 1
if full_output:
return (mid, t)
return mid
def halley(f, fprime, f2prime, x0, f_args=tuple(), f_kwargs={},
tol=1e-8, maxIter=200, full_output=False):
'''
Implements (vectorized) Halley's method for root finding.
Requires function and its first two derivatives, all with first argument to
search over (x) and the same later arguments (provided via f_args and
f_kwargs). In the iterations, f, fprime, and f2prime are called as:
f(x, *f_args, **f_kwargs)
'''
# Initialization
t = 0
x = copy.deepcopy(x0)
while t < maxIter:
# Evaluate the function and its derivatives
fx = f(x, *f_args, **f_kwargs)
fprimex = fprime(x, *f_args, **f_kwargs)
f2primex = f2prime(x, *f_args, **f_kwargs)
# Update value of x
if f2primex is not None:
x = x - (2. * fx * fprimex) / (2. * fprimex ** 2 - fx * f2primex)
else:
# Fall back to Newton update
x = x - fx / fprimex
# Update iteration counter
t += 1
# Convergence based upon absolute function value
if(max(abs(fx)) < tol):
break
if full_output:
return (x, t)
return x
#==============================================================================
# Numerical integration functions
#==============================================================================
def laplace_approx(f, xhat, info, f_args=tuple(), f_kwargs={}):
'''
Computes Laplace approximation to integral of f over real line.
Takes mode xhat and observed information info as inputs.
Fully compatible with Numpy vector arguments so long as f is.
'''
integral = np.sqrt(2. * np.pi / info) * f(xhat, *f_args, **f_kwargs)
return integral
#==============================================================================
# Functions for commonly-used MAP estimates
#==============================================================================
def map_estimator_gamma(x, T=None, log=False, prior_shape=1., prior_rate=0.,
prior_mean_log=0., prior_prec_log=0., prior_adj=1.,
brent_scale=6., fallback_upper=10000.):
'''
Maximum a posteriori estimator for shape and rate parameters of gamma
distribution. If log, compute posterior mode for log(shape) and
log(rate) instead.
Assumes a conjugate gamma prior on the rate parameter and an independent
log-normal prior on the shape parameter, each with the given parameters.
Returns a 2-tuple with the MAP estimators for shape and rate.
'''
# Extract sufficient statistics if needed
if T is None:
# Sufficient statistics are (sum 1/variances, sum log 1/variances, and
# n)
T = np.array([np.sum(x), np.sum(np.log(x)), np.size(x)])
# Set upper bound first
if prior_prec_log > 0:
upper = np.exp(prior_mean_log + brent_scale / np.sqrt(prior_prec_log))
else:
upper = fallback_upper
# Verify that score is negative at upper bound
args = (None, T, log, prior_shape, prior_rate, prior_mean_log,
prior_prec_log, prior_adj)
while score_profile_posterior_gamma(upper, *args) > 0:
upper *= 2.
# Use Brent method to find root of score function
shape_hat = optimize.brentq(f=score_profile_posterior_gamma,
a=np.sqrt(EPS), b=upper,
args=args)
# Compute posterior mode of rate
rate_hat = ((shape_hat + (prior_shape - 1. + log) / prior_adj / T[2]) /
(T[0] / T[2] + prior_rate / prior_adj / T[2]))
return (shape_hat, rate_hat)
def map_estimator_nbinom(x, prior_a=1., prior_b=1., transform=False,
prior_mean_log=0., prior_prec_log=0., prior_adj=1.,
brent_scale=6., fallback_upper=10000.):
'''
Maximum a posteriori estimator for r (convolution) parameter and p parameter
of negative binomial distribution. If transform, compute posterior mode for
log(r) and logit(p) instead.
Assumes a conditionally conjugate beta prior on p and an independent
log-normal prior on r, each with the given parameters.
Returns a 2-tuple with the MAP estimators for r and p.
'''
# Compute posterior mode for r and p using profile log-posterior
n = np.size(x)
# Set upper bound first
if prior_prec_log > 0:
upper = np.exp(prior_mean_log + brent_scale / np.sqrt(prior_prec_log))
else:
upper = fallback_upper
# Verify that score is negative at upper bound
args = (x, transform, prior_a, prior_b, prior_mean_log, prior_prec_log,
prior_adj)
while score_profile_posterior_nbinom(upper, *args) > 0:
upper *= 2.
# Use Brent method to find root of score function
r_hat = optimize.brentq(f=score_profile_posterior_nbinom,
a=np.sqrt(EPS), b=upper,
args=args)
# Compute posterior mode of p
A = np.mean(x) + (prior_a - 1. + transform) / n / prior_adj
B = r_hat + (prior_b - 1. + transform) / n / prior_adj
p_hat = A / (A + B)
return (r_hat, p_hat)
#==============================================================================
# Specialized functions for marginalized missing data draws
#==============================================================================
def characterize_censored_intensity_dist(eta_0, eta_1, mu, sigmasq,
tol=1e-5, maxIter=200, bisectIter=10,
bisectScale=6., glm_link_name="Logit"):
'''
Constructs Gaussian approximation to conditional posterior of censored
intensity likelihood. Approximates marginal p(censored | params) via Laplace
approximation.
Returns dictionary with three entries:
1) y_hat, the approximate mode of the given conditional distribution
2) p_int_cen, the approximate probabilities of intensity-based
censoring
3) approx_sd, the approximate SDs of the conditional intensity
distributions
'''
# Construct kwargs for calls to densities and their derivatives
dargs = {'eta_0': eta_0,
'eta_1': eta_1,
'mu': mu,
'sigmasq': sigmasq,
'glm_link_name': glm_link_name}
# 1) Find mode of censored intensity density
# First, start with a bit of bisection to get in basin of attraction for
# Halley's method
lower = mu - bisectScale * np.sqrt(sigmasq)
upper = mu + bisectScale * np.sqrt(sigmasq)
# Make sure the starting points are of opposite signs
invalid = (np.sign(deriv_logdcensored(lower, **dargs)) *
np.sign(deriv_logdcensored(upper, **dargs)) > 0)
while np.any(invalid):
lower -= bisectScale * np.sqrt(sigmasq)
upper += bisectScale * np.sqrt(sigmasq)
invalid = (np.sign(deriv_logdcensored(lower, **dargs)) *
np.sign(deriv_logdcensored(upper, **dargs)) > 0)
# Run bisection
y_hat = vectorized_bisection(f=deriv_logdcensored, f_kwargs=dargs,
lower=lower, upper=upper,
tol=np.sqrt(tol), maxIter=bisectIter)
# Second, run Halley's method to find the censored intensity distribution's
# mode to much higher precision.
y_hat = halley(f=deriv_logdcensored, fprime=deriv2_logdcensored,
f2prime=deriv3_logdcensored, f_kwargs=dargs,
x0=y_hat, tol=tol, maxIter=maxIter)
# 2) Compute approximate SD of censored intensity distribution
info = -deriv2_logdcensored(y_hat, **dargs)
approx_sd = np.sqrt(1. / info)
# 3) Use Laplace approximation to approximate p(int. censoring); this is the
# normalizing constant of the given conditional distribution
p_int_cen = laplace_approx(f=dcensored, xhat=y_hat, info=info,
f_kwargs=dargs)
# Return dictionary containing combined result
result = {'y_hat': y_hat,
'p_int_cen': p_int_cen,
'approx_sd': approx_sd}
return result
def bound_density_ratio(eta_0, eta_1, mu, sigmasq, y_hat, approx_sd, propDf,
normalizing_cnst, tol=1e-10, maxIter=100,
bisectScale=1., glm_link_name="Logit"):
'''
Bound ratio of t proposal density to actual censored intensity density.
This is used to construct an efficient, robust rejection sampler to exactly
draw from the conditional posterior of censored intensities.
This computation is fully vectorized with respect to mu, sigmasq, y_hat,
approx_sd, and normalizing_cnst.
Based on the properties of these two densities, their ratio will have three
critical points. These consist of a local minimum, flanked by two local
maxima.
It returns the smallest constant M such that the t proposal density times M
is uniformly >= the actual censored intensity density.
'''
# Construct kwargs for calls to densities and their derivatives
dargs = {'eta_0': eta_0,
'eta_1': eta_1,
'mu': mu,
'sigmasq': sigmasq,
'glm_link_name': glm_link_name,
'approx_sd': approx_sd,
'y_hat': y_hat,
'propDf': propDf}
# Initialize vectors for all four of the bounds
left_lower = np.zeros_like(y_hat)
left_upper = np.zeros_like(y_hat)
right_lower = np.zeros_like(y_hat)
right_upper = np.zeros_like(y_hat)
# Make sure the starting points are the correct sign
left_lower = y_hat - bisectScale * approx_sd
left_upper = y_hat - 10 * tol
right_lower = y_hat + 10 * tol
right_upper = y_hat + bisectScale * approx_sd
# Left lower bounds
invalid = (deriv_logdensityratio(left_lower, **dargs) < 0)
while np.any(invalid):
left_lower[invalid] -= approx_sd[invalid]
invalid = (deriv_logdensityratio(left_lower, **dargs) < 0)
# Left upper bounds
invalid = (deriv_logdensityratio(left_upper, **dargs) > 0)
while np.any(invalid):
left_lower[invalid] -= 10 * tol
invalid = (deriv_logdensityratio(left_upper, **dargs) > 0)
# Right lower bounds
invalid = (deriv_logdensityratio(right_lower, **dargs) < 0)
while np.any(invalid):
right_lower[invalid] += 10 * tol
invalid = (deriv_logdensityratio(right_lower, **dargs) < 0)
# Right upper bounds
invalid = (deriv_logdensityratio(right_upper, **dargs) > 0)
while np.any(invalid):
right_upper[invalid] += approx_sd[invalid]
invalid = (deriv_logdensityratio(right_upper, **dargs) > 0)
# Find zeros that are less than y_hat using bisection.
left_roots = vectorized_bisection(f=deriv_logdensityratio, f_kwargs=dargs,
lower=left_lower, upper=left_upper,
tol=tol, maxIter=maxIter)
# Find zeros that are greater than y_hat using bisection.
right_roots = vectorized_bisection(f=deriv_logdensityratio, f_kwargs=dargs,
lower=right_lower, upper=right_upper,
tol=tol, maxIter=maxIter)
# Compute bounding factor M
f_left_roots = densityratio(left_roots, normalizing_cnst=normalizing_cnst,
**dargs)
f_right_roots = densityratio(
right_roots, normalizing_cnst=normalizing_cnst,
**dargs)
# Store maximum of each root
M = np.maximum(f_left_roots, f_right_roots)
# Return results
return M
def rintensities_cen(n_cen, mu, sigmasq, y_hat, approx_sd,
p_int_cen, p_rnd_cen,
eta_0, eta_1, propDf,
tol=1e-10, maxIter=100, glm_link_name="Logit"):
'''
Draw censored intensities and random censoring indicators given n_cen and
quantities computed from Laplace approximation.
Returns
-------
- intensities : ndarray
A 1d ndarray of sampled censored intensities
- mapping : ndarray
A 1d integer ndarray of peptide indices, one per censored state
- W : ndarray
A 1d integer ndarray of indicators for random censoring
'''
# Setup data structures for draws
n_states = np.sum(n_cen)
# Intensities
intensities = np.zeros(n_states, dtype=np.float64)
# And, the vital indexing vector of length sum(n). This can be used for
# direct referencing to all input vectors to handle the state to peptide
# mapping
mapping = np.zeros(n_states, dtype=int)
# Populate index vector
filled = 0
for i in xrange(n_cen.size):
if n_cen[i] > 0:
# Get slice to insert new data
pep = slice(filled, filled + n_cen[i])
# Populate index vector
mapping[pep] = i
# Update filled counter
filled += n_cen[i]
# Draw the random censoring indicators. Note that W=1 if randomly censored.
post_p_rnd_cen = p_rnd_cen / (p_rnd_cen + (1. - p_rnd_cen) * p_int_cen)
W = (np.random.uniform(
size=n_states) < post_p_rnd_cen[mapping]).astype(int)
# Drawing censored intensities
# First, get the maximum of the target / proposal ratio for each set of
# unique parameter values (not per state)
M = bound_density_ratio(eta_0=eta_0, eta_1=eta_1, mu=mu, sigmasq=sigmasq,
y_hat=y_hat, approx_sd=approx_sd,
normalizing_cnst=1. / p_int_cen, propDf=propDf,
tol=tol, maxIter=maxIter,
glm_link_name=glm_link_name)
# Next, draw randomly-censored intensities
intensities[W == 1] = np.random.normal(loc=mu[mapping[W == 1]],
scale=np.sqrt(
sigmasq[mapping[W == 1]]),
size=np.sum(W))
# Draw remaining intensity-censored intensities using rejection sampler
active = (W == 0)
if type(eta_0) != np.ndarray or len(eta_0) < 1:
eta_0 = eta_0 * np.ones_like(y_hat)
if type(eta_1) != np.ndarray or len(eta_1) < 1:
eta_1 = eta_1 * np.ones_like(y_hat)
while( | np.sum(active) | numpy.sum |
from .ConfidenceIntervalsOnlySamples import ConfidenceIntervalsOnlySamples
import numpy as np
class ConfidenceIntervalsOnlySamplesClassification(ConfidenceIntervalsOnlySamples):
def _stats_and_plot(self, baseName, batch_samples_list, real_valu_list, extra_batch_dict):
all_samples = np.concatenate(batch_samples_list, axis=0)
y = np.concatenate(real_valu_list, axis=0)
nb, no, ns = all_samples.shape
cumulative_preds = np.sum(all_samples, axis=2)
predictions_forced = np.argmax(cumulative_preds, axis=1)
accuracy_forced = np.mean(np.equal(predictions_forced, y)) * 100
total = len(predictions_forced)
#refuse prediction if uncertainties is too high (Bayesian defense)
fracs = [0.5, 0.7, 0.9]
accuracy_over_fracs = []
for frac in fracs:
accuracy_over_fracs.append(self._accuracy_over_threshold(cumulative_preds, frac, ns, y))
with open(self._create_name("stats", baseName) + '.txt', 'w') as f:
f.write("forced -> accuracy: {:} total: {:}\n".format(accuracy_forced, total))
for frac, (acc_over, tot_over) in zip(fracs, accuracy_over_fracs):
f.write("over {:} -> accuracy: {:} total: {:}\n".format(frac, acc_over, tot_over))
def _accuracy_over_threshold(self, cumulative_preds, frac, ns, y):
threshold = ns * frac
more_than = | np.array(cumulative_preds > threshold, dtype=np.int) | numpy.array |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Some math formula for various calculations
"""
import sys
import numpy as np
from math import log, exp, sqrt
from jcvi.utils.cbook import human_size
def mean_confidence_interval(data, confidence=0.95):
# Compute the confidence interval around the mean
import scipy
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t._ppf((1 + confidence) / 2.0, n - 1)
return m, m - h, m + h
def confidence_interval(data, confidence=0.95):
# Compute the confidence interval of the data
# Note the difference from mean_confidence_interval()
a = 1.0 * | np.array(data) | numpy.array |
import pytest
import numpy as np
import astropy.units as u
from astropy.tests.helper import remote_data
from .. import *
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestSun:
def test___repr__(self):
with default_sun.set('E490_2014LR'):
assert repr(Sun.from_default()
) == '<Sun: E490-00a (2014) low resolution reference solar spectrum (Table 4)>'
sun = Sun.from_array([1, 2] * u.um, [1, 2] * u.Jy)
assert repr(sun) == '<Sun>'
def test_from_builtin(self):
sun = Sun.from_builtin('E490_2014LR')
assert sun.description == sources.E490_2014LR['description']
def test_from_builtin_unknown(self):
with pytest.raises(ValueError):
Sun.from_builtin('not a solar spectrum')
def test_from_default(self):
with default_sun.set('E490_2014LR'):
sun = Sun.from_default()
assert sun.description == sources.E490_2014LR['description']
def test_call_single_wavelength(self):
with default_sun.set('E490_2014'):
sun = default_sun.get()
f = sun(0.5555 * u.um)
assert np.isclose(f.value, 1897)
def test_call_single_frequency(self):
with default_sun.set('E490_2014'):
sun = default_sun.get()
f = sun(3e14 * u.Hz)
assert np.isclose(f.value, 2.49484251e+14)
@pytest.mark.skipif('not HAS_SCIPY')
def test_sun_wavelength_array(self):
from scipy.integrate import trapz
# compare Sun's rebinning with an integration over the spectrum
sun = Sun.from_builtin('E490_2014')
wave0 = sun.wave.to('um').value
fluxd0 = sun.fluxd.to('W/(m2 um)').value
wave = np.linspace(0.35, 0.55, 6)
d = np.diff(wave)[0] / 2
left_bins = wave - d
right_bins = wave + d
fluxd1 = np.zeros(len(wave))
for i in range(len(wave)):
j = (wave0 >= left_bins[i]) * (wave0 <= right_bins[i])
fluxd1[i] = trapz(fluxd0[j] * wave0[j], wave0[j]) / trapz(
wave0[j], wave0[j])
fluxd2 = sun(wave * u.um).value
assert np.allclose(fluxd1, fluxd2, 0.005)
@remote_data
def test_filt_units(self):
"""Colina et al. V=-26.75 mag, for zero-point flux density
36.7e-10 ergs/s/cm2/Å.
"""
sun = Sun.from_builtin('E490_2014')
wave, fluxd = sun.filt('johnson_v', unit='erg/(s cm2 AA)')
assert np.isclose(wave.value, 5502, rtol=0.001)
assert | np.isclose(fluxd.value, 183.94, rtol=0.0003) | numpy.isclose |
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from mat4py import loadmat
plt.rc('font', family='serif')
plt.rc('font', serif='Times New Roman')
plt.rcParams["mathtext.fontset"] = "stix"
data_mat = './example.mat'
data = loadmat(data_mat)['res']
base_path = os.path.dirname(".")
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fix_data = dict()
decay_data = dict()
for key, value in data.items():
if 'fix' in key:
fix_data[key[3:]] = value
else:
decay_data[key[5:]] = value
COLORS = {"1": colors[0],
"5": colors[1],
"10": colors[2],
"50": colors[3],
"100": colors[4]}
LABELS = {'1': r'$E=1$',
'5': r'$E=5$',
'10': r'$E=10$',
'50': r'$E=50$',
'100': r'$E=100$'}
data_dict = {"fix": fix_data,
"decay": decay_data}
for name, value in data_dict.items():
plt.figure(figsize=(4, 3))
optimal_value = np.inf
for key, stat in value.items():
loss = | np.array(stat['loss']) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import dask_image.ndinterp as da_ndinterp
import numpy as np
import dask.array as da
from scipy import ndimage
def validate_rotate(n=2,
axes=(0,1),
reshape=False,
input_output_shape_per_dim=(16,16),
interp_order=1,
interp_mode='constant',
input_output_chunksize_per_dim=(6,6),
random_seed=0,
use_cupy=False,
):
"""
Compare the outputs of `ndimage.rotate`
and `dask_image.ndinterp.rotate`.
Notes
-----
Currently, prefilter is disabled and therefore the output
of `dask_image.ndinterp.rotateation` is compared
to `prefilter=False`.
"""
# define test image
a = input_output_shape_per_dim[0]
np.random.seed(random_seed)
image = | np.random.random([a] * n) | numpy.random.random |
import random
from scipy.spatial.distance import squareform, pdist
import numpy as np
from sklearn import linear_model
import gibbs
from sklearn.neighbors import NearestNeighbors
from vae_ld.learning_dynamics import logger
class TwoNN:
""" Implementation of the ID estimator TwoNN from [1]
[1] Estimating the intrinsic dimension of datasets by a minimal neighborhood information
<NAME>, <NAME>, <NAME>, and <NAME>, 2017
"""
def __init__(self):
self._to_keep = 0.9
self._knn = NearestNeighbors(n_neighbors=3)
@property
def to_keep(self):
return self._to_keep
@to_keep.setter
def to_keep(self, to_keep):
""" Set the fraction of data points to keep during the ID estimate
"""
if to_keep <= 0 or to_keep > 1:
raise ValueError("The fraction to keep must be between 0 (excluded) and 1.")
self._to_keep = to_keep
def fit_transform(self, X):
""" Compute the intrinsic dimension estimation, based on the implementation of [1] and [2].
The steps described in [3] (p.3) are outlined in the code comments.
[1] https://github.com/efacco/TWO-NN (C++ implementation by the authors of [3])
[2] https://github.com/ansuini/IntrinsicDimDeep (Python implementation by the authors of [4])
[3] Estimating the intrinsic dimension of datasets by a minimal neighborhood information
<NAME>, <NAME>, <NAME>, and <NAME>, 2017
[4] Intrinsic dimension of data representations in deep neural networks
<NAME>, <NAME>, <NAME>, and <NAME>, 2019
"""
self._knn.fit(X)
# 1. Compute the pairwise distances for each point in the dataset
logger.info("Computing the pairwise distance between each point of the dataset")
# x_dist = np.sort(squareform(pdist(X)), axis=1, kind="heapsort")
x_dist = self._knn.kneighbors(X)[0]
# 2. Get two shortest distances
logger.info("Getting the two shortest distances")
r1 = x_dist[:, 1]
r2 = x_dist[:, 2]
# This step was added in Ansuini et al. implementation
# logger.info("Removing zero values and degeneracies")
# zeros = np.where(r1 == 0)[0]
# degeneracies = np.where(r1 == r2)[0]
# good = np.setdiff1d(np.arange(x_dist.shape[0]), np.array(zeros))
# good = np.setdiff1d(good, np.array(degeneracies))
# logger.info(good.shape)
# r1 = r1[good]
# r2 = r2[good]
# 3. For each point i compute mu_i
logger.info("Computing mu_i for each point i")
mu = np.sort(r2/r1, kind="heapsort")
# 4. Compute the empirical cumulate Femp(mu)
logger.info("Computing the empirical cumulate")
n = r1.shape[0]
Femp = np.arange(0, n, dtype=np.float64) / n
# 5. Fit the points of the plane given by coordinates {(log(mu_i), -log(1 - Femp(mu_i)))|i=1, …, n} with a
# straight line passing through the origin, using the analytical solution of the linear regression.
# Note that we discard 10% of the points by default, as recommended in the TwoNN paper
logger.info("Fitting the {}% first points with a linear regression".format(self._to_keep * 100))
n_to_keep = int(n * self._to_keep)
x = np.log(mu)[:n_to_keep]
y = -np.log(1 - Femp)[:n_to_keep]
d = np.dot(x, y) / np.dot(x, x)
return d
class MLE:
def __init__(self, k, seed, runs=5, anchor=0.9):
self._anchor = anchor
self._k = k
self._seed = seed
self._n_runs = runs
self._knn = NearestNeighbors(n_neighbors=k+1)
@property
def anchor(self):
return self._anchor
@anchor.setter
def anchor(self, anchor):
""" Set the fraction of data points to keep during the ID estimate
"""
if anchor <= 0 or anchor > 1:
raise ValueError("The anchor fraction must be between 0 (excluded) and 1.")
self._anchor = anchor
@property
def k(self):
return self._k
@k.setter
def anchor(self, k):
""" Set the fraction of data points to keep during the ID estimate
"""
if k <= 0:
raise ValueError("The number of neighbours must be greater than 0.")
self._k = k
def fit_transform(self, X):
anchor_samples = int(self.anchor * X.shape[0])
res = np.zeros((self._n_runs,))
data_idxs = np.arange(X.shape[0])
self._knn.fit(X)
for i in range(self._n_runs):
logger.info("Computing iteration {} of MLE with k={}".format(i, self._k))
np.random.shuffle(data_idxs)
anchor_idxs = data_idxs[:anchor_samples]
res[i] = self._compute_mle(X[anchor_idxs])
return res.mean()
def _compute_mle(self, X):
dist = self._knn.kneighbors(X)[0][:, 1:]
if not np.all(dist > 0.):
logger.info(np.argwhere(dist <= 0.))
logger.info(dist[np.argwhere(dist <= 0.)])
assert np.all(dist > 0.)
d = np.log(dist[:, self._k - 1: self._k] / dist[:, 0:self._k - 1])
d = d.sum(axis=1) / (self.k - 2)
return 1. / d.mean()
class Hidalgo:
""" Compute Hidalgo, an algorithm initially proposed in [1].
The implementation is from https://github.com/micheleallegra/Hidalgo/tree/master/python,
the code released with [1].
[1] Data segmentation based on the local intrinsic dimension, Allegra et al., 2020
"""
def __init__(self, metric='euclidean', k=2, zeta=0.8, q=3, iters=10000, replicas=10, burn_in=0.9):
"""
:param metric: The metric to use for KNN, if predefined, then a distance matrix will be given when calling fit
:param k: The number of manifolds
:param zeta: The probability to sample the neighbour of a point from the same manifold (in the paper's formula,
this is xsi)
:param q: number of closest neighbours from each points to keep
:param iters: number of iterations of the Gibbs sampling
:param replicas: number of times the sampling should be replicated
:param burn_in: percentage of points to exclude of the estimation
"""
self.metric = metric
self.k = k
self.zeta = zeta
self.q = q
self.iters = iters
self.burn_in = burn_in
self.replicas = replicas
# Setting prior parameters of d to 1
self.a = np.ones(k)
self.b = np.ones(k)
# Setting prior parameter of p to 1
self.c = np.ones(k)
# Setting prior parameter of zeta to 1
self.f = np.ones(k)
# Setting the save samples every 10 sampling and compute the total number of samples
self.sampling_rate = 10
self.n_samples = np.floor((self.iters - np.ceil(self.burn_in * self.iters)) / self.sampling_rate).astype(int)
# z will not be fixed
self.fixed_z = 0
# Local interaction between z are used
self.use_local_z_interaction = 1
# z will not be updated during the training
self.update_z = 0
def _fit(self, X):
assert isinstance(X, np.ndarray), "X should be a numpy array"
assert len(np.shape(X)) == 2, "X should be a two-dimensional numpy array"
n, d = np.shape(X)
nns_mat = np.zeros((n, n))
logger.info("Getting the {} nearest neighbours from each point".format(self.q))
if self.metric == "predefined":
distances = np.sort(X)[:, :self.q + 1]
indices_in = np.argsort(X)[:, :self.q + 1]
else:
nns = NearestNeighbors(n_neighbors=self.q + 1, algorithm="ball_tree", metric=self.metric).fit(X)
distances, indices_in = nns.kneighbors(X)
for i in range(self.q):
nns_mat[indices_in[:, 0], indices_in[:, i + 1]] = 1
nns_count = | np.sum(nns_mat, axis=0) | numpy.sum |
from __future__ import print_function
import numpy as np
from skimage.morphology import cube, dilation
def create_boundary(lab, regions, width):
"""Create boundary of each region.
For each non-zeros regions, create a
boundary of dilation. Take the union
of these boundary areas to have a
so-called boundary zone, and assign it
a new label as max(regions) + 1
Omit the new boundary voxel if it overlaps
with any non-zero region.
For example, the input labeling has non-background regions
[1, 2, 3], then the corresponding boundary regions
are [4, 5, 6].
Arguments:
lab: numpy array
The 3d labeling matrix
regions: list or array of int
The non-background region list
width: int
The boundary width
"""
kernel = cube(2 * width + 1)
lab_dilated = lab.copy()
n_regions = len(regions)
idx_protected = np.in1d(lab.ravel(),
regions).reshape(lab.shape)
for region in regions:
lab_binary = np.zeros(lab.shape, dtype=lab.dtype)
lab_binary[np.where(lab == region)] = 1
lab_boundary = dilation(lab_binary, kernel) - lab_binary
# assign a label to this boundary
idx_boundary = (lab_boundary == 1)
lab_dilated[idx_boundary & ~idx_protected] = region + n_regions
return lab_dilated
class PickVoxel():
"""
Template of picking voxels.
"""
def __init__(self, labels, voxels, ignored_labels=None):
assert np.prod(labels.shape) == voxels.shape[0]
self.labels = labels
self.regions = np.unique(labels)
self.voxels = voxels
if ignored_labels:
self.regions = list(set(self.regions) - set(ignored_labels))
idx = np.in1d(self.labels.ravel(),
self.regions).reshape(self.labels.shape)
reg_x, reg_y, reg_z = np.where(idx)
self.voxels = np.array(list(zip(reg_x, reg_y, reg_z)))
def pick_voxels(self, n_voxels):
raise NotImplementedError
def pick_all(self):
raise NotImplementedError
class PickVoxelRandom(PickVoxel):
def __init__(self, labels, voxels, ignored_labels=None):
super(PickVoxelRandom, self).__init__(labels,
voxels,
ignored_labels)
def pick_voxels(self, n_voxels):
"""Randomly pick up voxels regardless of label."""
rp = np.random.permutation(range(self.voxels.shape[0]))
idx_voxels = rp[:n_voxels]
return self.voxels[idx_voxels]
def pick_all(self):
return self.voxels
class PickVoxelBalanced(PickVoxel):
def __init__(self, labels, voxels, ignored_labels=None):
super(PickVoxelBalanced, self).__init__(labels,
voxels,
ignored_labels)
def pick_voxels(self, n_voxels, expand_boundary=False):
"""Pick up voxels evently from each region.
In principle, each region will have n_voxels/n_region voxels.
If any of the region does not have sufficient voxels,
the small regions will have duplicated voxels
to fullfil the number of voxels needed for its region.
if expand_boundary is set to True,
Sample background voxels first on the boundary of
non-background voxels, and random sampling to get
the rest of required background voxels.
Note that if the number of voxels is less
than the number of regions, a random sampling
regardless of the regions is used.
"""
n_regions = len(self.regions)
if n_voxels < n_regions:
# TODO: get voxels in self.regions
rp = np.random.permutation(range(self.voxels.shape[0]))
idx_voxels = rp[:n_voxels]
return self.voxels[idx_voxels]
# Distribute the needed voxels to all regions
n_exp_vx, n_remain = divmod(n_voxels, n_regions)
# Number of voxels to be extracted
n_needed_voxels = np.zeros((n_regions,), dtype=int)
# Sample voxels as expected, leading duplicated voxels
n_needed_voxels = n_exp_vx * np.ones((n_regions,), dtype=int)
# randomly choose some non-background regions for remain voxels
rp = np.random.permutation(len(self.regions))
rp = rp[rp != 0]
for reg_id in rp[:n_remain]:
n_needed_voxels[reg_id] += 1
boundary_regions = []
# create boundary of each region
if expand_boundary:
nonzero_regions = self.regions.nonzero()[0]
# TODO: make boundary_width an argument
boundary_width = 10
self.labels = create_boundary(self.labels,
nonzero_regions,
boundary_width)
boundary_regions = list(set(np.unique(self.labels)) -
set(self.regions))
# Pick up the voxels
region_voxels = []
for i, reg_id in enumerate(self.regions):
n_needed = n_needed_voxels[i]
reg_indices = np.where(self.labels == reg_id)
vxs = np.asarray(reg_indices).T
n_vxs = vxs.shape[0]
# print("region {} has {}, needs {}".format(i, n_vxs, n_needed))
# randomly pick as many as it should/could
rp = np.random.permutation(range(n_vxs))
sampled_vxs = vxs[rp[:n_needed]]
region_voxels.extend(sampled_vxs)
# sample duplicate voxels if region is too small
if n_needed > n_vxs:
print("Extract duplicated voxels in region {}".format(i))
idx_dup_vxs = | np.random.randint(n_vxs, size=n_needed - n_vxs) | numpy.random.randint |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import keras.backend as k
import numpy as np
from art.attacks.evasion.hop_skip_jump import HopSkipJump
from art.estimators.estimator import BaseEstimator
from art.estimators.classification import ClassifierMixin
from art.estimators.classification.keras import KerasClassifier
from art.utils import random_targets
from tests.utils import TestBase
from tests.utils import get_image_classifier_tf, get_image_classifier_kr, get_image_classifier_pt
from tests.utils import get_tabular_classifier_tf, get_tabular_classifier_kr
from tests.utils import get_tabular_classifier_pt, master_seed
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
class TestHopSkipJump(TestBase):
"""
A unittest class for testing the HopSkipJump attack.
"""
@classmethod
def setUpClass(cls):
master_seed(seed=1234, set_tensorflow=True, set_torch=True)
super().setUpClass()
cls.n_train = 100
cls.n_test = 10
cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]
cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]
cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]
cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]
def setUp(self):
master_seed(seed=1234, set_tensorflow=True, set_torch=True)
super().setUp()
def test_3_tensorflow_mnist(self):
"""
First test with the TensorFlowClassifier.
:return:
"""
x_test_original = self.x_test_mnist.copy()
# Build TensorFlowClassifier
tfc, sess = get_image_classifier_tf()
# First targeted attack and norm=2
hsj = HopSkipJump(classifier=tfc, targeted=True, max_iter=20, max_eval=100, init_eval=10, verbose=False)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = hsj.generate(self.x_test_mnist, **params)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertTrue((target == y_pred_adv).any())
# Test the masking 1
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))
mask = mask.reshape(self.x_test_mnist.shape)
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Test the masking 2
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))
mask = mask.reshape(self.x_test_mnist.shape[1:])
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# First targeted attack and norm=np.inf
hsj = HopSkipJump(
classifier=tfc, targeted=True, max_iter=20, max_eval=100, init_eval=10, norm=np.Inf, verbose=False
)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = hsj.generate(self.x_test_mnist, **params)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertTrue((target == y_pred_adv).any())
# Test the masking 1
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))
mask = mask.reshape(self.x_test_mnist.shape)
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Test the masking 2
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))
mask = mask.reshape(self.x_test_mnist.shape[1:])
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Second untargeted attack and norm=2
hsj = HopSkipJump(classifier=tfc, targeted=False, max_iter=20, max_eval=100, init_eval=10, verbose=False)
x_test_adv = hsj.generate(self.x_test_mnist)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
y_pred = np.argmax(tfc.predict(self.x_test_mnist), axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertTrue((y_pred != y_pred_adv).any())
# Test the masking 1
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))
mask = mask.reshape(self.x_test_mnist.shape)
x_test_adv = hsj.generate(self.x_test_mnist, mask=mask)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Test the masking 2
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))
mask = mask.reshape(self.x_test_mnist.shape[1:])
x_test_adv = hsj.generate(self.x_test_mnist, mask=mask)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Second untargeted attack and norm=np.inf
hsj = HopSkipJump(
classifier=tfc, targeted=False, max_iter=20, max_eval=100, init_eval=10, norm=np.Inf, verbose=False
)
x_test_adv = hsj.generate(self.x_test_mnist)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
y_pred = np.argmax(tfc.predict(self.x_test_mnist), axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertTrue((y_pred != y_pred_adv).any())
# Test the masking 1
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))
mask = mask.reshape(self.x_test_mnist.shape)
x_test_adv = hsj.generate(self.x_test_mnist, mask=mask)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Test the masking 2
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))
mask = mask.reshape(self.x_test_mnist.shape[1:])
x_test_adv = hsj.generate(self.x_test_mnist, mask=mask)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Check that x_test has not been modified by attack and classifier
self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_mnist))), 0.0, delta=0.00001)
# Clean-up session
if sess is not None:
sess.close()
def test_8_keras_mnist(self):
"""
Second test with the KerasClassifier.
:return:
"""
x_test_original = self.x_test_mnist.copy()
# Build KerasClassifier
krc = get_image_classifier_kr()
# First targeted attack and norm=2
hsj = HopSkipJump(classifier=krc, targeted=True, max_iter=20, max_eval=100, init_eval=10, verbose=False)
params = {"y": random_targets(self.y_test_mnist, krc.nb_classes)}
x_test_adv = hsj.generate(self.x_test_mnist, **params)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)
self.assertTrue((target == y_pred_adv).any())
# Test the masking 1
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))
mask = mask.reshape(self.x_test_mnist.shape)
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Test the masking 2
mask = np.random.binomial(n=1, p=0.5, size= | np.prod(self.x_test_mnist.shape[1:]) | numpy.prod |
# coding=utf-8
# !/usr/bin/python3.6
"""
__synopsis__ : Visualize vectors in 2D using tsne.
__description__ :
__project__ : XCGCN
__author__ : <NAME> <<EMAIL>>
__version__ : ": "
__date__ : "26/06/19"
__copyright__ : "Copyright (c) 2019"
__license__ : This source code is licensed under the MIT-style license found in the LICENSE file in the root
directory of this source tree.
__classes__ : class_name
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from logger import logger
from text_process import Text_Process
from text_process.text_encoder import Text_Encoder
from config import configuration as config
class Vector_Visualizations:
""" Visualize vectors in 2D. """
def __init__(self) -> None:
super(Vector_Visualizations,self).__init__()
self.text_process = Text_Process()
self.text_encoder = Text_Encoder()
def create_vectors(self,cats: dict):
""" Creates vector from cats.
:param cats:
"""
self.cats_processed = self.text_process.process_cats(cats)
model = self.text_encoder.load_word2vec()
self.cats_processed_vecs,_ = self.text_process.gen_lbl2vec(self.cats_processed,model)
return self.cats_processed_vecs
def show_vectors(self,cats_processed_vecs=None):
if cats_processed_vecs is None: cats_processed_vecs = self.cats_processed_vecs
cats_processed_2d = self.use_tsne(cats_processed_vecs,list(self.cats.values()))
return cats_processed_2d
def view_closestwords_tsnescatterplot(self,model,word,word_dim=config["prep_vecs"]["input_size"],
sim_words=config["prep_vecs"]["sim_words"]):
""" Method to plot the top sim_words in 2D using TSNE.
:param model:
:param word:
:param word_dim:
:param sim_words:
:param plot_title:
"""
arr = np.empty((0,word_dim),dtype='f')
word_labels = [word]
## get close words
close_words = model.similar_by_word(word,topn=sim_words)
## add the vector for each of the closest words to the array
arr = np.append(arr,np.array([model[word]]),axis=0)
for wrd_score in close_words:
wrd_vector = model[wrd_score[0]]
word_labels.append(wrd_score[0])
arr = np.append(arr, | np.array([wrd_vector]) | numpy.array |
"""
Displaying eigenmodes of vibration using ``warp_by_vector``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This example applies the ``warp_by_vector`` filter to a cube whose eigenmodes
have been computed using the Ritz method, as outlined in Visscher, <NAME>.,
<NAME>, <NAME>, et <NAME>. "On the normal modes
of free vibration of inhomogeneous and anisotropic elastic objects". The
Journal of the Acoustical Society of America 90, n.4 (october 1991): 2154-62.
https://asa.scitation.org/doi/10.1121/1.401643
"""
###############################################################################
# First, let's solve the eigenvalue problem for a vibrating cube. We use
# a crude approximation (by choosing a low max polynomial order) to get a fast
# computation.
import numpy as np
from scipy.linalg import eigh
import pyvista as pv
def analytical_integral_rppd(p, q, r, a, b, c):
"""Returns the analytical value of the RPPD integral, i.e. the integral
of x**p * y**q * z**r for (x, -a, a), (y, -b, b), (z, -c, c)."""
if p < 0:
return 0.0
elif q < 0:
return 0.0
elif r < 0.0:
return 0.0
else:
return (
a ** (p + 1)
* b ** (q + 1)
* c ** (r + 1)
* ((-1) ** p + 1)
* ((-1) ** q + 1)
* ((-1) ** r + 1)
/ ((p + 1) * (q + 1) * (r + 1))
)
def make_cijkl_E_nu(E=200, nu=0.3):
"""Makes cijkl from E and nu.
Default values for steel are: E=200 GPa, nu=0.3."""
lambd = E * nu / (1 + nu) / (1 - 2 * nu)
mu = E / 2 / (1 + nu)
cij = np.zeros((6, 6))
cij[(0, 1, 2), (0, 1, 2)] = lambd + 2 * mu
cij[(0, 0, 1, 1, 2, 2), (1, 2, 0, 2, 0, 1)] = lambd
cij[(3, 4, 5), (3, 4, 5)] = mu
# check symmetry
assert np.allclose(cij, cij.T)
# convert to order 4 tensor
coord_mapping = {
(1, 1): 1,
(2, 2): 2,
(3, 3): 3,
(2, 3): 4,
(1, 3): 5,
(1, 2): 6,
(2, 1): 6,
(3, 1): 5,
(3, 2): 4,
}
cijkl = np.zeros((3, 3, 3, 3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
u = coord_mapping[(i + 1, j + 1)]
v = coord_mapping[(k + 1, l + 1)]
cijkl[i, j, k, l] = cij[u - 1, v - 1]
return cijkl, cij
def get_first_N_above_thresh(N, freqs, thresh, decimals=3):
"""Returns first N unique frequencies with amplitude above threshold based
on first decimals."""
unique_freqs, unique_indices = np.unique(np.round(freqs, decimals=decimals), return_index=True)
nonzero = unique_freqs > thresh
unique_freqs, unique_indices = unique_freqs[nonzero], unique_indices[nonzero]
return unique_freqs[:N], unique_indices[:N]
def assemble_mass_and_stiffness(N, F, geom_params, cijkl):
"""This routine assembles the mass and stiffness matrix.
It first builds an index of basis functions as a quadruplet of
component and polynomial order for (x^p, y^q, z^r) of maximum order N.
This routine only builds the symmetric part of the matrix to speed
things up.
"""
# building coordinates
triplets = []
for p in range(N + 1):
for q in range(N - p + 1):
for r in range(N - p - q + 1):
triplets.append((p, q, r))
assert len(triplets) == (N + 1) * (N + 2) * (N + 3) // 6
quadruplets = []
for i in range(3):
for triplet in triplets:
quadruplets.append((i, *triplet))
assert len(quadruplets) == 3 * (N + 1) * (N + 2) * (N + 3) // 6
# assembling the mass and stiffness matrix in a single loop
R = len(triplets)
E = np.zeros((3 * R, 3 * R)) # the mass matrix
G = np.zeros((3 * R, 3 * R)) # the stiffness matrix
for index1, quad1 in enumerate(quadruplets):
I, p1, q1, r1 = quad1
for index2, quad2 in enumerate(quadruplets[index1:]):
index2 = index2 + index1
J, p2, q2, r2 = quad2
G[index1, index2] = (
cijkl[I, 1 - 1, J, 1 - 1]
* p1
* p2
* F(p1 + p2 - 2, q1 + q2, r1 + r2, **geom_params)
+ cijkl[I, 1 - 1, J, 2 - 1]
* p1
* q2
* F(p1 + p2 - 1, q1 + q2 - 1, r1 + r2, **geom_params)
+ cijkl[I, 1 - 1, J, 3 - 1]
* p1
* r2
* F(p1 + p2 - 1, q1 + q2, r1 + r2 - 1, **geom_params)
+ cijkl[I, 2 - 1, J, 1 - 1]
* q1
* p2
* F(p1 + p2 - 1, q1 + q2 - 1, r1 + r2, **geom_params)
+ cijkl[I, 2 - 1, J, 2 - 1]
* q1
* q2
* F(p1 + p2, q1 + q2 - 2, r1 + r2, **geom_params)
+ cijkl[I, 2 - 1, J, 3 - 1]
* q1
* r2
* F(p1 + p2, q1 + q2 - 1, r1 + r2 - 1, **geom_params)
+ cijkl[I, 3 - 1, J, 1 - 1]
* r1
* p2
* F(p1 + p2 - 1, q1 + q2, r1 + r2 - 1, **geom_params)
+ cijkl[I, 3 - 1, J, 2 - 1]
* r1
* q2
* F(p1 + p2, q1 + q2 - 1, r1 + r2 - 1, **geom_params)
+ cijkl[I, 3 - 1, J, 3 - 1]
* r1
* r2
* F(p1 + p2, q1 + q2, r1 + r2 - 2, **geom_params)
)
G[index2, index1] = G[index1, index2] # since stiffness matrix is symmetric
if I == J:
E[index1, index2] = F(p1 + p2, q1 + q2, r1 + r2, **geom_params)
E[index2, index1] = E[index1, index2] # since mass matrix is symmetric
return E, G, quadruplets
N = 8 # maximum order of x^p y^q z^r polynomials
rho = 8.0 # g/cm^3
l1, l2, l3 = 0.2, 0.2, 0.2 # all in cm
geometry_parameters = {'a': l1 / 2.0, 'b': l2 / 2.0, 'c': l3 / 2.0}
cijkl, cij = make_cijkl_E_nu(200, 0.3) # Gpa, without unit
E, G, quadruplets = assemble_mass_and_stiffness(
N, analytical_integral_rppd, geometry_parameters, cijkl
)
# solving the eigenvalue problem using symmetric solver
w, vr = eigh(a=G, b=E)
omegas = np.sqrt(np.abs(w) / rho) * 1e5 # convert back to Hz
freqs = omegas / (2 * np.pi)
# expected values from (Bernard 2014, p.14),
# error depends on polynomial order ``N``
expected_freqs_kHz = np.array([704.8, 949.0, 965.2, 1096.3, 1128.4, 1182.8, 1338.9, 1360.9])
computed_freqs_kHz, mode_indices = get_first_N_above_thresh(8, freqs / 1e3, thresh=1, decimals=1)
print('found the following first unique eigenfrequencies:')
for ind, (freq1, freq2) in enumerate(zip(computed_freqs_kHz, expected_freqs_kHz)):
error = np.abs(freq2 - freq1) / freq1 * 100.0
print(f"freq. {ind + 1:1}: {freq1:8.1f} kHz, expected: {freq2:8.1f} kHz, error: {error:.2f} %")
###############################################################################
# Now, let's display a mode on a mesh of the cube.
# Create the 3D NumPy array of spatially referenced data
# (nx by ny by nz)
nx, ny, nz = 30, 31, 32
x = | np.linspace(-l1 / 2.0, l1 / 2.0, nx) | numpy.linspace |
"""
Tests for the measure module
"""
import star
import numpy as np
import math
import pytest
def test_calculate_distance():
r1 = np.array([0, 0, 0])
r2 = np.array([0, 1, 0])
expected_distance = 1
calculated_distance = star.calculate_distance(r1, r2)
assert expected_distance == calculated_distance
##assert 1 == 2
def test_calculate_angle():
r1 = np.array([0, 0, -1])
r2 = np.array([0, 0, 0])
r3 = np.array([1, 0, 0])
expected_angle = math.pi/2
calculated_angle = star.calculate_angle(r1, r2, r3)
assert pytest.approx(expected_angle) == calculated_angle
##assert 1 == 2
@pytest.mark.parametrize("p1, p2, p3, expected_angle",[
(np.array([np.sqrt(2)/2,np.sqrt(2)/2,0]),np.array([0,0,0]),np.array([1,0,0]),math.pi/4),
(np.array([0,0,-1]),np.array([0,1,0]), | np.array([1,0,0]) | numpy.array |
import numpy as np
def concordance_index(event_times, predicted_scores, event_observed=None):
"""
Code adapted from https://github.com/CamDavidsonPilon/lifelines/blob/master/lifelines/utils/concordance.py
to account for missing values in the context of the HECKTOR Challenge
Missing values are encoded by NaNs and are computed as non-concordant.
"""
event_times, predicted_scores, event_observed = _preprocess_scoring_data(
event_times, predicted_scores, event_observed)
# num_correct, num_tied, num_pairs = _concordance_summary_statistics(
# event_times, predicted_scores, event_observed)
num_correct, num_tied, num_pairs = _naive_concordance_summary_statistics(
event_times, predicted_scores, event_observed)
return _concordance_ratio(num_correct, num_tied, num_pairs)
def _concordance_ratio(num_correct, num_tied, num_pairs):
"""
Code adapted from https://github.com/CamDavidsonPilon/lifelines/blob/master/lifelines/utils/concordance.py
to account for missing values in the context of the HECKTOR Challenge
"""
if num_pairs == 0:
raise ZeroDivisionError("No admissable pairs in the dataset.")
return (num_correct + num_tied / 2) / num_pairs
def _naive_concordance_summary_statistics(event_times, predicted_event_times,
event_observed):
"""
Code adapted from https://github.com/CamDavidsonPilon/lifelines/blob/master/lifelines/utils/concordance.py
to account for missing values in the context of the HECKTOR Challenge
"""
def _valid_comparison(time_a, time_b, event_a, event_b):
"""True if times can be compared."""
if time_a == time_b:
# Ties are only informative if exactly one event happened
return event_a != event_b
if event_a and event_b:
return True
if event_a and time_a < time_b:
return True
if event_b and time_b < time_a:
return True
return False
def _concordance_value(time_a, time_b, pred_a, pred_b, event_a, event_b):
if np.isnan(pred_a) or | np.isnan(pred_b) | numpy.isnan |
"""Module to provide functionality to import structures."""
import os
import tempfile
import datetime
from collections import OrderedDict
from traitlets import Bool
import ipywidgets as ipw
from aiida.orm import CalcFunctionNode, CalcJobNode, Node, QueryBuilder, WorkChainNode, StructureData
from .utils import get_ase_from_file
class StructureManagerWidget(ipw.VBox): # pylint: disable=too-many-instance-attributes
'''Upload a structure and store it in AiiDA database.
Useful class members:
:ivar has_structure: whether the widget contains a structure
:vartype has_structure: bool
:ivar frozen: whenter the widget is frozen (can't be modified) or not
:vartype frozen: bool
:ivar structure_node: link to AiiDA structure object
:vartype structure_node: StructureData or CifData'''
has_structure = Bool(False)
frozen = Bool(False)
DATA_FORMATS = ('StructureData', 'CifData')
def __init__(self, importers, storable=True, node_class=None, **kwargs):
"""
:param storable: Whether to provide Store button (together with Store format)
:type storable: bool
:param node_class: AiiDA node class for storing the structure.
Possible values: 'StructureData', 'CifData' or None (let the user decide).
Note: If your workflows require a specific node class, better fix it here.
:param examples: list of tuples each containing a name and a path to an example structure
:type examples: list
:param importers: list of tuples each containing a name and an object for data importing. Each object
should containt an empty `on_structure_selection()` method that has two parameters: structure_ase, name
:type examples: list"""
from .viewers import StructureDataViewer
if not importers: # we make sure the list is not empty
raise ValueError("The parameter importers should contain a list (or tuple) of tuples "
"(\"importer name\", importer), got a falsy object.")
self.structure_ase = None
self._structure_node = None
self.viewer = StructureDataViewer(downloadable=False)
self.btn_store = ipw.Button(description='Store in AiiDA', disabled=True)
self.btn_store.on_click(self._on_click_store)
# Description that will is stored along with the new structure.
self.structure_description = ipw.Text(placeholder="Description (optional)")
# Select format to store in the AiiDA database.
self.data_format = ipw.RadioButtons(options=self.DATA_FORMATS, description='Data type:')
self.data_format.observe(self.reset_structure, names=['value'])
if len(importers) == 1:
# If there is only one importer - no need to make tabs.
self._structure_sources_tab = importers[0][1]
# Assigning a function which will be called when importer provides a structure.
importers[0][1].on_structure_selection = self.select_structure
else:
self._structure_sources_tab = ipw.Tab() # Tabs.
self._structure_sources_tab.children = [i[1] for i in importers] # One importer per tab.
for i, (label, importer) in enumerate(importers):
# Labeling tabs.
self._structure_sources_tab.set_title(i, label)
# Assigning a function which will be called when importer provides a structure.
importer.on_structure_selection = self.select_structure
if storable:
if node_class is None:
store = [self.btn_store, self.data_format, self.structure_description]
elif node_class not in self.DATA_FORMATS:
raise ValueError("Unknown data format '{}'. Options: {}".format(node_class, self.DATA_FORMATS))
else:
self.data_format.value = node_class
store = [self.btn_store, self.structure_description]
else:
store = [self.structure_description]
store = ipw.HBox(store)
super().__init__(children=[self._structure_sources_tab, self.viewer, store], **kwargs)
def reset_structure(self, change=None): # pylint: disable=unused-argument
if self.frozen:
return
self._structure_node = None
self.viewer.structure = None
def select_structure(self, structure_ase, name):
"""Select structure
:param structure_ase: ASE object containing structure
:type structure_ase: ASE Atoms
:param name: File name with extension but without path
:type name: str"""
if self.frozen:
return
self._structure_node = None
if not structure_ase:
self.btn_store.disabled = True
self.has_structure = False
self.structure_ase = None
self.structure_description.value = ''
self.reset_structure()
return
self.btn_store.disabled = False
self.has_structure = True
self.structure_description.value = "{} ({})".format(structure_ase.get_chemical_formula(), name)
self.structure_ase = structure_ase
self.viewer.structure = structure_ase
def _on_click_store(self, change): # pylint: disable=unused-argument
self.store_structure()
def store_structure(self, label=None, description=None):
"""Stores the structure in AiiDA database."""
if self.frozen:
return
if self.structure_node is None:
return
if self.structure_node.is_stored:
print("Already stored in AiiDA: " + repr(self.structure_node) + " skipping..")
return
if label:
self.structure_node.label = label
if description:
self.structure_node.description = description
self.structure_node.store()
print("Stored in AiiDA: " + repr(self.structure_node))
def freeze(self):
"""Do not allow any further modifications"""
self._structure_sources_tab.layout.visibility = 'hidden'
self.frozen = True
self.btn_store.disabled = True
self.structure_description.disabled = True
self.data_format.disabled = True
@property
def node_class(self):
return self.data_format.value
@node_class.setter
def node_class(self, value):
if self.frozen:
return
self.data_format.value = value
@property
def structure_node(self):
"""Returns AiiDA StructureData node."""
if self._structure_node is None:
if self.structure_ase is None:
return None
# perform conversion
if self.data_format.value == 'CifData':
from aiida.orm.nodes.data.cif import CifData
self._structure_node = CifData()
self._structure_node.set_ase(self.structure_ase)
else: # Target format is StructureData
self._structure_node = StructureData(ase=self.structure_ase)
self._structure_node.description = self.structure_description.value
self._structure_node.label = self.structure_ase.get_chemical_formula()
return self._structure_node
class StructureUploadWidget(ipw.VBox):
"""Class that allows to upload structures from user's computer."""
def __init__(self, text="Upload Structure"):
from fileupload import FileUploadWidget
self.on_structure_selection = lambda structure_ase, name: None
self.file_path = None
self.file_upload = FileUploadWidget(text)
supported_formats = ipw.HTML(
"""<a href="https://wiki.fysik.dtu.dk/ase/_modules/ase/io/formats.html" target="_blank">
Supported structure formats
</a>""")
self.file_upload.observe(self._on_file_upload, names='data')
super().__init__(children=[self.file_upload, supported_formats])
def _on_file_upload(self, change): # pylint: disable=unused-argument
"""When file upload button is pressed."""
self.file_path = os.path.join(tempfile.mkdtemp(), self.file_upload.filename)
with open(self.file_path, 'w') as fobj:
fobj.write(self.file_upload.data.decode("utf-8"))
structure_ase = get_ase_from_file(self.file_path)
self.on_structure_selection(structure_ase=structure_ase, name=self.file_upload.filename)
class StructureExamplesWidget(ipw.VBox):
"""Class to provide example structures for selection."""
def __init__(self, examples, **kwargs):
self.on_structure_selection = lambda structure_ase, name: None
self._select_structure = ipw.Dropdown(options=self.get_example_structures(examples))
self._select_structure.observe(self._on_select_structure, names=['value'])
super().__init__(children=[self._select_structure], **kwargs)
@staticmethod
def get_example_structures(examples):
"""Get the list of example structures."""
if not isinstance(examples, list):
raise ValueError("parameter examples should be of type list, {} given".format(type(examples)))
return [("Select structure", False)] + examples
def _on_select_structure(self, change): # pylint: disable=unused-argument
"""When structure is selected."""
if not self._select_structure.value:
return
structure_ase = get_ase_from_file(self._select_structure.value)
self.on_structure_selection(structure_ase=structure_ase, name=self._select_structure.label)
class StructureBrowserWidget(ipw.VBox):
"""Class to query for structures stored in the AiiDA database."""
def __init__(self):
# Find all process labels
qbuilder = QueryBuilder()
qbuilder.append(WorkChainNode, project="label")
qbuilder.order_by({WorkChainNode: {'ctime': 'desc'}})
process_labels = {i[0] for i in qbuilder.all() if i[0]}
layout = ipw.Layout(width="900px")
self.mode = ipw.RadioButtons(options=['all', 'uploaded', 'edited', 'calculated'],
layout=ipw.Layout(width="25%"))
# Date range
self.dt_now = datetime.datetime.now()
self.dt_end = self.dt_now - datetime.timedelta(days=10)
self.date_start = ipw.Text(value='', description='From: ', style={'description_width': '120px'})
self.date_end = ipw.Text(value='', description='To: ')
self.date_text = ipw.HTML(value='<p>Select the date range:</p>')
self.btn_date = ipw.Button(description='Search', layout={'margin': '1em 0 0 0'})
self.age_selection = ipw.VBox(
[self.date_text, ipw.HBox([self.date_start, self.date_end]), self.btn_date],
layout={
'border': '1px solid #fafafa',
'padding': '1em'
})
# Labels
self.drop_label = ipw.Dropdown(options=({'All'}.union(process_labels)),
value='All',
description='Process Label',
style={'description_width': '120px'},
layout={'width': '50%'})
self.btn_date.on_click(self.search)
self.mode.observe(self.search, names='value')
self.drop_label.observe(self.search, names='value')
h_line = ipw.HTML('<hr>')
box = ipw.VBox([self.age_selection, h_line, ipw.HBox([self.mode, self.drop_label])])
self.results = ipw.Dropdown(layout=layout)
self.results.observe(self._on_select_structure)
self.search()
super(StructureBrowserWidget, self).__init__([box, h_line, self.results])
@staticmethod
def preprocess():
"""Search structures in AiiDA database."""
queryb = QueryBuilder()
queryb.append(StructureData, filters={'extras': {'!has_key': 'formula'}})
for itm in queryb.all(): # iterall() would interfere with set_extra()
formula = itm[0].get_formula()
itm[0].set_extra("formula", formula)
def search(self, change=None): # pylint: disable=unused-argument
"""Launch the search of structures in AiiDA database."""
self.preprocess()
qbuild = QueryBuilder()
try: # If the date range is valid, use it for the search
self.start_date = datetime.datetime.strptime(self.date_start.value, '%Y-%m-%d')
self.end_date = datetime.datetime.strptime(self.date_end.value, '%Y-%m-%d') + datetime.timedelta(hours=24)
except ValueError: # Otherwise revert to the standard (i.e. last 7 days)
self.start_date = self.dt_end
self.end_date = self.dt_now + datetime.timedelta(hours=24)
self.date_start.value = self.start_date.strftime('%Y-%m-%d')
self.date_end.value = self.end_date.strftime('%Y-%m-%d')
filters = {}
filters['ctime'] = {'and': [{'<=': self.end_date}, {'>': self.start_date}]}
if self.drop_label.value != 'All':
qbuild.append(WorkChainNode, filters={'label': self.drop_label.value})
# print(qbuild.all())
# qbuild.append(CalcJobNode, with_incoming=WorkChainNode)
qbuild.append(StructureData, with_incoming=WorkChainNode, filters=filters)
else:
if self.mode.value == "uploaded":
qbuild2 = QueryBuilder()
qbuild2.append(StructureData, project=["id"])
qbuild2.append(Node, with_outgoing=StructureData)
processed_nodes = [n[0] for n in qbuild2.all()]
if processed_nodes:
filters['id'] = {"!in": processed_nodes}
qbuild.append(StructureData, filters=filters)
elif self.mode.value == "calculated":
qbuild.append(CalcJobNode)
qbuild.append(StructureData, with_incoming=CalcJobNode, filters=filters)
elif self.mode.value == "edited":
qbuild.append(CalcFunctionNode)
qbuild.append(StructureData, with_incoming=CalcFunctionNode, filters=filters)
elif self.mode.value == "all":
qbuild.append(StructureData, filters=filters)
qbuild.order_by({StructureData: {'ctime': 'desc'}})
matches = {n[0] for n in qbuild.iterall()}
matches = sorted(matches, reverse=True, key=lambda n: n.ctime)
options = OrderedDict()
options["Select a Structure ({} found)".format(len(matches))] = False
for mch in matches:
label = "PK: %d" % mch.pk
label += " | " + mch.ctime.strftime("%Y-%m-%d %H:%M")
label += " | " + mch.get_extra("formula")
label += " | " + mch.description
options[label] = mch
self.results.options = options
def _on_select_structure(self, change): # pylint: disable=unused-argument
"""When a structure was selected."""
if not self.results.value:
return
structure_ase = self.results.value.get_ase()
formula = structure_ase.get_chemical_formula()
if self.on_structure_selection is not None:
self.on_structure_selection(structure_ase=structure_ase, name=formula)
def on_structure_selection(self, structure_ase, name):
pass
class SmilesWidget(ipw.VBox):
"""Conver SMILES into 3D structure."""
SPINNER = """<i class="fa fa-spinner fa-pulse" style="color:red;" ></i>"""
def __init__(self):
try:
import openbabel # pylint: disable=unused-import
except ImportError:
super().__init__(
[ipw.HTML("The SmilesWidget requires the OpenBabel library, "
"but the library was not found.")])
return
self.smiles = ipw.Text()
self.create_structure_btn = ipw.Button(description="Generate molecule", button_style='info')
self.create_structure_btn.on_click(self._on_button_pressed)
self.output = ipw.HTML("")
super().__init__([self.smiles, self.create_structure_btn, self.output])
@staticmethod
def pymol_2_ase(pymol):
"""Convert pymol object into ASE Atoms."""
import numpy as np
from ase import Atoms, Atom
from ase.data import chemical_symbols
asemol = Atoms()
for atm in pymol.atoms:
asemol.append(Atom(chemical_symbols[atm.atomicnum], atm.coords))
asemol.cell = np.amax(asemol.positions, axis=0) - np.amin(asemol.positions, axis=0) + [10] * 3
asemol.pbc = True
asemol.center()
return asemol
def _optimize_mol(self, mol):
"""Optimize a molecule using force field (needed for complex SMILES)."""
# Note, the pybel module imported below comes together with openbabel package. Do not confuse it with
# pybel package available on PyPi: https://pypi.org/project/pybel/
import pybel # pylint:disable=import-error
self.output.value = "Screening possible conformers {}".format(self.SPINNER) #font-size:20em;
f_f = pybel._forcefields["mmff94"] # pylint: disable=protected-access
if not f_f.Setup(mol.OBMol):
f_f = pybel._forcefields["uff"] # pylint: disable=protected-access
if not f_f.Setup(mol.OBMol):
self.output.value = "Cannot set up forcefield"
return
# initial cleanup before the weighted search
f_f.SteepestDescent(5500, 1.0e-9)
f_f.WeightedRotorSearch(15000, 500)
f_f.ConjugateGradients(6500, 1.0e-10)
f_f.GetCoordinates(mol.OBMol)
self.output.value = ""
def _on_button_pressed(self, change): # pylint: disable=unused-argument
"""Convert SMILES to ase structure when button is pressed."""
self.output.value = ""
# Note, the pybel module imported below comes together with openbabel package. Do not confuse it with
# pybel package available on PyPi: https://pypi.org/project/pybel/
import pybel # pylint:disable=import-error
if not self.smiles.value:
return
mol = pybel.readstring("smi", self.smiles.value)
self.output.value = """SMILES to 3D conversion {}""".format(self.SPINNER)
mol.make3D()
pybel._builder.Build(mol.OBMol) # pylint: disable=protected-access
mol.addh()
self._optimize_mol(mol)
structure_ase = self.pymol_2_ase(mol)
formula = structure_ase.get_chemical_formula()
if self.on_structure_selection is not None:
self.on_structure_selection(structure_ase=structure_ase, name=formula)
def on_structure_selection(self, structure_ase, name):
pass
import numpy as np
from scipy.stats import mode
from numpy.linalg import norm
from pysmiles import read_smiles,write_smiles
from rdkit.Chem.rdmolfiles import MolFromSmiles,MolToMolFile
import networkx as nx
import math
from ase import Atoms
from ase.visualize import view
from IPython.display import display, clear_output
import ipywidgets as ipw
import nglview
from ase.data import covalent_radii
from ase.neighborlist import NeighborList
import ase.neighborlist
class SmilesWidget(ipw.VBox):
"""Conver SMILES into 3D structure."""
SPINNER = """<i class="fa fa-spinner fa-pulse" style="color:red;" ></i>"""
def __init__(self):
try:
import openbabel # pylint: disable=unused-import
except ImportError:
super().__init__(
[ipw.HTML("The SmilesWidget requires the OpenBabel library, "
"but the library was not found.")])
return
self.selection = set()
self.cell_ready = False
self.smiles = ipw.Text()
self.create_structure_btn = ipw.Button(description="Convert SMILES", button_style='info')
self.create_structure_btn.on_click(self._on_button_pressed)
self.create_cell_btn = ipw.Button(description="create GNR", button_style='info')
self.create_cell_btn.on_click(self._on_button2_pressed)
self.viewer = nglview.NGLWidget()
self.viewer.observe(self._on_picked, names='picked')
self.output = ipw.HTML("")
self.picked_out = ipw.Output()
self.button2_out = ipw.Output()
super().__init__([self.smiles, self.create_structure_btn,self.viewer,self_picked_out, self.output,self.button2_out])
########
@staticmethod
def guess_scaling_factor(atoms):
import numpy as np
# set bounding box as cell
cx = 1.5 * (np.amax(atoms.positions[:,0]) - np.amin(atoms.positions[:,0]))
cy = 1.5 * (np.amax(atoms.positions[:,1]) - np.amin(atoms.positions[:,1]))
cz = 15.0
atoms.cell = (cx, cy, cz)
atoms.pbc = (True,True,True)
# calculate all atom-atom distances
c_atoms = [a for a in atoms if a.symbol[0]=="C"]
n = len(c_atoms)
dists = np.zeros([n,n])
for i, a in enumerate(c_atoms):
for j, b in enumerate(c_atoms):
dists[i,j] = norm(a.position - b.position)
# find bond distances to closest neighbor
dists += np.diag([np.inf]*n) # don't consider diagonal
bonds = np.amin(dists, axis=1)
# average bond distance
avg_bond = float(mode(bonds)[0])
# scale box to match equilibrium carbon-carbon bond distance
cc_eq = 1.4313333333
s = cc_eq / avg_bond
return s
@staticmethod
def scale(atoms, s):
cx, cy, cz = atoms.cell
atoms.set_cell((s*cx, s*cy, cz), scale_atoms=True)
atoms.center()
return atoms
@staticmethod
def smiles2D(smiles):
mol = MolFromSmiles(smiles)
from rdkit.Chem import AllChem
# generate the 2D coordinates
AllChem.Compute2DCoords(mol)
# get the 2D coordinates
for c in mol.GetConformers():
coords=c.GetPositions()
# get the atom labels
ll=[]
for i in mol.GetAtoms():
#ll.append(i.GetSymbol())
ll.append(i.GetAtomicNum())
ll=np.asarray(ll)
# create an ASE frame
c=Atoms('{:d}N'.format(len(coords)))
c.set_positions(coords)
c.set_atomic_numbers(ll)
return c
@staticmethod
def construct_cell(atoms, id1, id2):
p1 = [atoms[id1].x, atoms[id1].y]
p0 = [atoms[id2].x, atoms[id2].y]
p2 = [atoms[id2].x, atoms[id1].y]
v0 = np.array(p0) - np.array(p1)
v1 = np.array(p2) - np.array(p1)
angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1))
#angle=np.degrees(angle)
cx = norm(v0)
#print np.degrees(angle),v0,v1,p0,p1
if np.abs(angle) > 0.01:
# s.euler_rotate(phi=angle,theta=0,psi=0,center(x[id1],y[id1],z[id1]))
atoms.rotate_euler(center=atoms[id1].position, phi=-angle,theta=0.0,psi=0.0)
yrange = | np.amax(atoms.positions[:,1]) | numpy.amax |
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on May 22, 2015
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import base64
import json
import logging
from random import randint
import numpy
from six import BytesIO
from twisted.internet import reactor
from twisted.web.client import Agent, FileBodyProducer
import unittest
from twisted.web.http_headers import Headers
from zope.interface import implementer
from veles.dummy import DummyWorkflow
from veles.loader import Loader, ILoader
from veles.loader.restful import RestfulLoader
from veles.logger import Logger
from veles.memory import Array
from veles.pickle2 import pickle
from veles.plumbing import Repeater
from veles.restful_api import RESTfulAPI, NumpyJSONEncoder
from veles.tests import timeout
@implementer(ILoader)
class DummyLoader(Loader):
def load_data(self):
pass
def create_minibatch_data(self):
pass
def fill_minibatch(self):
pass
class RESTAPITest(unittest.TestCase):
@timeout()
def test_workflow(self):
workflow = DummyWorkflow()
workflow.run_is_blocking = False
repeater = Repeater(workflow)
repeater.link_from(workflow.start_point)
port = 6565 + randint(-1000, 1000)
api = RESTfulAPI(workflow, port=port, path="/api")
api.link_from(repeater)
base_loader = DummyLoader(workflow)
base_loader.minibatch_data.reset(numpy.zeros((10, 10, 10)))
base_loader.normalizer.analyze(base_loader.minibatch_data.mem)
loader = RestfulLoader(workflow, minibatch_size=1)
loader.derive_from(base_loader)
loader.link_from(api)
workflow.del_ref(base_loader)
api.link_attrs(loader, "feed", "requests", "minibatch_size")
api.results = [numpy.ones((3, 3))]
repeater.link_from(loader)
workflow.end_point.link_from(api).unlink_from(workflow.start_point)
workflow.end_point.gate_block <<= True
loader.gate_block = ~workflow.end_point.gate_block
workflow.initialize()
run = api.run
def finish_run():
workflow.end_point.gate_block <<= not api.run_was_called
run()
api.run = finish_run
reactor.callWhenRunning(workflow.run)
agent = Agent(reactor)
headers = Headers({b'User-Agent': [b'twisted'],
b'Content-Type': [b'application/json']})
body = FileBodyProducer(BytesIO(json.dumps(
{"input": numpy.ones((10, 10)).tolist(),
"codec": "list"}).encode('charmap')))
d = agent.request(
b'POST', ("http://localhost:%d/api" % port).encode('charmap'),
headers=headers, bodyProducer=body)
response = [None]
def finished(result):
print("Received the result %d" % result.code)
response[0] = result
reactor.stop()
def failed(error):
error.printTraceback()
if reactor.running:
reactor.stop()
self.fail()
d.addCallback(finished)
d.addErrback(failed)
def stop():
reactor.callFromThread(reactor.stop)
workflow.thread_pool.register_on_shutdown(stop)
reactor.run()
self.assertIsNotNone(response[0])
self.assertEqual(response[0].code, 200)
# We should use deliverBody here, but the response is small enough
self.assertEqual(
response[0]._bodyBuffer[0],
b'{"result": [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]}')
def test_pickling(self):
workflow = DummyWorkflow()
port = 6565 + randint(-1000, 1000)
api = RESTfulAPI(workflow, port=port)
base_loader = DummyLoader(workflow)
base_loader.minibatch_data = numpy.zeros((10, 10, 10))
loader = RestfulLoader(workflow, minibatch_size=1)
loader.derive_from(base_loader)
workflow.del_ref(base_loader)
api.link_attrs(loader, "feed", "requests", "minibatch_size")
api.results = [ | numpy.ones((3, 3)) | numpy.ones |
import numpy as np
from functions.filterForTarget import filterForTarget
import unittest
class filterForTarget_spec(unittest.TestCase):
"""Test filterForTarget"""
def test_single_digit_list(self):
result = filterForTarget(np.array([1,2,3,4,5]), np.array([5]))
self.assertEqual(result, [5])
def test_double_digit_list(self):
result = filterForTarget(np.array([11,12,13,14,15]), np.array([5]))
self.assertEqual(result, [15])
def test_odd_digit_list(self):
result = filterForTarget(np.array([21,152,13,154,125]), | np.array([10]) | numpy.array |
import os
import pickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import font_manager as fm, rcParams
import numpy as np, scipy.stats as st
import glob
import os
import csv
import shutil
import random
# Say, "the default sans-serif font is COMIC SANS"
matplotlib.rcParams['font.sans-serif'] = "Consolas"
# Then, "ALWAYS use sans-serif fonts"
matplotlib.rcParams['font.family'] = "sans-serif"
#fpath = os.path.join("/usr/share/fonts/truetype", "Consolas-Bold_11600.ttf")
#prop = fm.FontProperties(fname=fpath)
def smart_dir(dir_name, base_list = None):
dir_name = dir_name + '/'
if base_list is None:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
return dir_name
else:
dir_names = []
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for d in range(len(base_list)):
dir_names.append(dir_name + base_list[d] + '/')
if not os.path.exists(dir_names[d]):
os.makedirs(dir_names[d])
return dir_names
colors = ['b','r','g','y','m','b','r','g','y','m','b','r','g','y','m', 'b','r','g','y','m','b','r','g','y','m','b','r','g','y','m']
marks = ['D','D','D','D','D','x','x','x','x','x','s','s','s','s','s', 'D','D','D','D','D','x','x','x','x','x','s','s','s','s','s']
styles = ['dashed','dashed','dashed','dashed','dashed','solid', 'solid', 'solid', 'solid', 'solid','dotted','dotted','dotted','dotted','dotted', 'dashed','dashed','dashed','dashed','dashed','solid', 'solid', 'solid', 'solid', 'solid','dotted','dotted','dotted','dotted','dotted']
def reshape_data(data, experiment):
shape = list(data[experiment].shape)
combined = shape[0] * shape[1]
del shape[0]
shape[0] = combined
data[experiment] = data[experiment].reshape(tuple(shape))
return data[experiment]
def process_data(dataset, model, experiment, task_ind, method_ind):
loaded = pickle.load( open( "results/{}/{}/results.pkl".format(model, dataset.lower()), "rb" ))
loaded = reshape_data(loaded, experiment)
data = loaded
if experiment == 'image_retreival_acc':
mean = np.mean(data[:, :, task_ind], axis=0)
std = np.std(data[:, :, task_ind], axis=0)
elif experiment == 'classification_accuracy_pc':
mean = np.mean(data[:, :, task_ind, method_ind, :], axis=0)
std = np.std(data[:, :, task_ind, method_ind, :], axis=0)
ret_info = []
for i in range(mean.shape[1]):
mean[:,i][mean[:,i] == -1] = np.nan
ret_info.append((str(i), mean[:,i], std[:,i]))
alll = ('all', np.mean(mean, axis=1), np.mean(std, axis=1))
ret_info.append(alll)
return ret_info
elif experiment == 'class_informative':
mean = np.mean(data[:, :, task_ind, :], axis=0) * 100
std = np.std(data[:, :, task_ind, :], axis=0) * 10
ret_info = []
for i in range(mean.shape[1]):
ret_info.append(('Layer {}'.format(i+1), mean[:,i], std[:,i]))
return ret_info
else:
mean = np.mean(data[:, :, task_ind, method_ind], axis=0)
std = np.std(data[:, :, task_ind, method_ind], axis=0)
return (model, mean, std)
def comparison_line(models, title, x_label, y_label, dataset, fig_name, cluster_flag=False):
num_phases, dataset, data_size, l_examples, classes_per_phase = dataset
fig_name, schedule = fig_name
title = "{}, {}, {} labeled examples p.c.\n{} phases, {} new classes per phase".format(dataset, schedule, l_examples, num_phases, classes_per_phase)
if cluster_flag:
title = "{}, {}\n{} phases, {} new classes per phase".format(dataset, schedule, num_phases, classes_per_phase)
plt.figure(figsize=(6,3))
x = | np.arange(data_size, (num_phases+1)*data_size, data_size) | numpy.arange |
import logging
try:
import qutip as qtp
except ImportError as e:
logging.warning('Could not import qutip, tomo code will not work')
import numpy as np
import time
import scipy
import os
import lmfit
import matplotlib.pyplot as plt
from pycqed.analysis import measurement_analysis as ma
class TomoAnalysis_JointRO():
"""
Performs state tomography based on an overcomplete set of measurements
and calibration measurements. Uses qutip to calculate resulting basis
states from applied rotations.
Works for Joint RO (like REM experiment).
Uses binary counting as general guideline in ordering states. Calculates
rotations by using the qutip library
BEFORE YOU USE THIS SET THE CORRECT ORDER BY CHANGING
'rotation_matrixes'
'measurement_basis' + 'measurement_basis_labels'
to values corresponding to your experiment
and maybe 'readout_basis'
"""
# The set of single qubit rotation matrixes used in the tomography
# measurement (will be assumed to be used on all qubits)
rotation_matrixes = [qtp.identity(2), qtp.sigmax(),
qtp.rotation(qtp.sigmax(), np.pi / 2),
qtp.rotation(qtp.sigmay(), np.pi / 2),
qtp.rotation(qtp.sigmax(), -np.pi / 2),
qtp.rotation(qtp.sigmay(), -np.pi / 2)]
measurement_operator_labels = ['I', 'X', 'x', 'y', '-x','-y']
#MAKE SURE THE LABELS CORRESPOND TO THE ROTATION MATRIXES DEFINED ABOVE
# The set of single qubit basis operators and labels
measurement_basis = [
qtp.identity(2), qtp.sigmaz(), qtp.sigmax(), qtp.sigmay()]
measurement_basis_labels = ['I', 'Z', 'X', 'Y']
# The operators used in the readout basis on each qubit
readout_basis = [qtp.identity(2), qtp.sigmaz()]
def __init__(self, measurements_cal, measurements_tomo,
n_qubits=2, n_quadratures=1, check_labels=True):
"""
keyword arguments:
measurements_cal --- Should be an array of length 2 ** n_qubits
measurements_tomo --- Should be an array of length
length(rotation_matrixes) ** n_qubits
n_qubits --- default(2) the amount of qubits present in the experement
n_quadratures --- default(1(either I or Q)) The amount of complete
measurement data sets. For example a combined IQ measurement has
2 measurement sets.
"""
self.measurements_cal = measurements_cal
self.measurements_tomo = measurements_tomo
self.n_qubits = n_qubits
self.n_states = 2 ** n_qubits
self.n_quadratures = n_quadratures
# Generate the vectors of matrixes that correspond to all measurements,
# readout bases and rotations
self.basis_vector = self._calculate_matrix_set(
self.measurement_basis, n_qubits)
self.readout_vector = self._calculate_matrix_set(
self.readout_basis, n_qubits)
self.rotation_vector = self._calculate_matrix_set(
self.rotation_matrixes, n_qubits)
if check_labels is True:
print('Measurement op. labels: {}'.format(self.get_meas_operator_labels(n_qubits)))
print('Basis labels: {}'.format(self.get_basis_labels(n_qubits)))
def execute_pseudo_inverse_tomo(self):
"""
Performs a linear tomography by simple inversion of the system of
equations due to calibration points
"""
# calculate beta positions in coefficient matrix
coefficient_matrix = self._calculate_coefficient_matrix()
basis_decomposition = np.zeros(4 ** self.n_qubits)
# first skip beta0
basis_decomposition[1:] = np.dot(
np.linalg.pinv(coefficient_matrix[:, 1:]), self.measurements_tomo)
# re-add beta0
basis_decomposition[0] = 1
# now recreate the rho
rho = sum([basis_decomposition[i] * self.basis_vector[i] /
(2 ** self.n_qubits)
for i in range(len(basis_decomposition))])
return (basis_decomposition, rho)
def execute_least_squares_physical_tomo(self, use_weights=True, show_time=False,
ftol=0.01, xtol=0.001, full_output=0,
max_iter=1000):
"""
Performs a max likelihood optimization using fmin_powell in order to
get the closest physically realisable state.
This is done by constructing a lower triangular matrix T consisting of
4 ** n qubits params
Keyword arguments:
use_weights : default(true) Weighs the quadrature data by the std in
betas obtained
--- arguments for scipy fmin_powel method below, see
the powel documentation
"""
# first we calculate the measurement matrices
tstart = time.time()
measurement_vector = []
n_rot = len(self.rotation_matrixes) ** self.n_qubits
# initiate with equal weights
self.weights = np.ones(self.n_quadratures * n_rot)
for quadrature in range(self.n_quadratures):
betas = self._calibrate_betas(
self.measurements_cal[quadrature * self.n_states:
(1 + quadrature) * self.n_states])
# determine the weights based on betas absolote difference and
# accuracy
if (use_weights):
self.weights[
quadrature * n_rot:(1+quadrature) * n_rot] = (
max(betas) - min(betas)) / np.var(betas)
for rotation_index, rotation in enumerate(self.rotation_vector):
measurement_vector.append(
betas[0] * rotation.dag()
* self.readout_vector[0] * rotation)
for i in range(1, len(betas)):
measurement_vector[n_rot * quadrature + rotation_index] += betas[
i] * rotation.dag() * self.readout_vector[i] * rotation
# save it in the object for use in optimization
self.measurement_vector = measurement_vector
self.measurement_vector_numpy = [
vec.full() for vec in measurement_vector]
tlinear = time.time()
# find out the starting rho by the linear tomo
discard, rho0 = self.execute_pseudo_inverse_tomo()
# now fetch the starting t_params from the cholesky decomp of rho
tcholesky = time.time()
T0 = np.linalg.cholesky(scipy.linalg.sqrtm((rho0.dag() * rho0).full()))
t0 = np.zeros(4 ** self.n_qubits, dtype='complex')
di = | np.diag_indices(2 ** self.n_qubits) | numpy.diag_indices |
import numpy as np
import matplotlib.pyplot as plt
from qibo.models import Circuit
from qibo import gates
import aux_functions as aux
def rw_circuit(qubits, parameters, X=True):
"""Circuit that implements the amplitude distributor part of the option pricing algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
X (bool): whether or not the first X gate is executed.
Returns:
generator that yield the gates needed for the amplitude distributor circuit
"""
if qubits%2==0:
mid1 = int(qubits/2)
mid0 = int(mid1-1)
if X:
yield gates.X(mid1)
yield gates.fSim(mid1, mid0, parameters[mid0]/2, 0)
for i in range(mid0):
yield gates.fSim(mid0-i, mid0-i-1, parameters[mid0-i-1]/2, 0)
yield gates.fSim(mid1+i, mid1+i+1, parameters[mid1+i]/2, 0)
else:
mid = int((qubits-1)/2)
if X:
yield gates.X(mid)
for i in range(mid):
yield gates.fSim(mid-i, mid-i-1, parameters[mid-i-1]/2, 0)
yield gates.fSim(mid+i, mid+i+1, parameters[mid+i]/2, 0)
def rw_circuit_inv(qubits, parameters, X=True):
"""Circuit that implements the amplitude distributor part of the option pricing algorithm in reverse.
Used in the amplitude estimation part of the algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
X (bool): whether or not the first X gate is executed.
Returns:
generator that yield the gates needed for the amplitude distributor circuit in reverse order.
"""
if qubits%2==0:
mid1 = int(qubits/2)
mid0 = int(mid1-1)
for i in range(mid0 - 1, -1, -1):
yield gates.fSim(mid0 - i, mid0 - i - 1, -parameters[mid0 - i - 1]/2, 0)
yield gates.fSim(mid1 + i, mid1 + i + 1, -parameters[mid1 + i]/2, 0)
yield gates.fSim(mid1, mid0, -parameters[mid0]/2, 0)
if X:
yield gates.X(mid1)
else:
mid = int((qubits-1)/2)
for i in range(mid - 1, -1, -1):
yield gates.fSim(mid + i, mid + i + 1, -parameters[mid + i] / 2, 0)
yield gates.fSim(mid - i, mid - i - 1, -parameters[mid - i - 1] / 2, 0)
if X:
yield gates.X(mid)
def create_qc(qubits):
"""Creation of the quantum circuit and registers where the circuit will be implemented.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
q (list): quantum register encoding the asset's price in the unary bases.
ancilla (int): qubit that encodes the payoff of the options.
circuit (Circuit): quantum circuit with enough allocated space for the algorithm to run.
"""
q = [i for i in range(qubits)]
ancilla = qubits
circuit = Circuit(qubits+1)
return q, ancilla, circuit
def rw_parameters(qubits, pdf):
"""Parameters that encode a target probability distribution into the unary basis
Args:
qubits (int): number of qubits used for the unary basis.
pdf (list): known probability distribution function that wants to be reproduced.
Returns:
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
"""
if qubits%2==0:
mid = qubits // 2
else:
mid = (qubits-1)//2 #Important to keep track of the centre
last = 1
parameters = []
for i in range(mid-1):
angle = 2 * np.arctan(np.sqrt(pdf[i]/(pdf[i+1] * last)))
parameters.append(angle)
last = (np.cos(angle/2))**2 #The last solution is needed to solve the next one
angle = 2 * np.arcsin(np.sqrt(pdf[mid-1]/last))
parameters.append(angle)
last = (np.cos(angle/2))**2
for i in range(mid, qubits-1):
angle = 2 * np.arccos(np.sqrt(pdf[i]/last))
parameters.append(angle)
last *= (np.sin(angle/2))**2
return parameters
def measure_probability(q):
"""Measuring gates on the unary basis qubits to check the validity of the amplitude distributor.
Args:
q (list): quantum register encoding the asset's price in the unary bases.
Returns:
generator that yels the measuring gates to check the probability distribution.
"""
yield gates.M(*q, register_name='prob') #No measure on the ancilla qubit is necessary
def extract_probability(qubits, counts, samples):
"""Measuring gates on the unary basis qubits to check the validity of the amplitude distributor.
Args:
qubits (int): number of qubits used for the unary basis.
counts (dict): times each output has been measured.
samples (int): number of samples for normalization.
Returns:
prob (list): normalized probabilities for the measured outcomes.
"""
form = '{0:0%sb}' % str(qubits) # qubits?
prob = []
for i in reversed(range(qubits)):
prob.append(counts.get(form.format(2**i), 0)/samples)
return prob
def get_pdf(qubits, S0, sig, r, T):
"""Get a pdf to input into the quantum register from a target probability distribution.
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
values (np.array): price values associated to the unary basis.
pdf (np.array): probability distribution for the asset's price evolution.
"""
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
values = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
pdf = aux.log_normal(values, mu, sig * np.sqrt(T))
return values, pdf
def load_quantum_sim(qu, S0, sig, r, T):
"""Get a pdf to input into the quantum register from a target probability distribution.
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
circuit (Circuit): quantum circuit with the target probability encoded in the unary basis
values (np.array): price values associated to the unary basis.
pdf (np.array): probability distribution for the asset's price evolution.
"""
(values, pdf) = get_pdf(qu, S0, sig, r, T)
q, ancilla, circuit = create_qc(qu)
lognormal_parameters = rw_parameters(qu, pdf) # Solve for the parameters needed to create the target lognormal distribution
circuit.add(rw_circuit(qu, lognormal_parameters)) # Build the probaility loading circuit with the adjusted parameters
circuit.add(measure_probability(q)) #Circuit to test the precision of the probability loading algorithm
return circuit, (values, pdf)
def run_quantum_sim(qubits, circuit, shots):
"""Execute the quantum circuit and extract the probability of measuring each state of the unary basis
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): quantum circuit with the target probability encoded in the unary basis.
shots (int): number of samples to extract from the circuit.
Returns:
prob_sim (list): normalized probability of each possible output in the unary basis.
"""
result = circuit(nshots=shots)
frequencies = result.frequencies(binary=True, registers=False)
prob_sim = extract_probability(qubits, frequencies, shots)
return prob_sim
def payoff_circuit(qubits, ancilla, K, S):
"""Quantum circuit that encodes the expected payoff into the probability of measuring an acilla qubit.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
Returns:
generator that yields the gates required to encode the payoff into an ancillary qubit.
"""
for i in range(qubits): #Determine the first qubit's price that
qK = i #surpasses the strike price
if K<S[i]:
break
for i in range(qK, qubits): #Control-RY rotations controled by states
angle = 2 * np.arcsin(np.sqrt((S[i]-K)/(S[qubits-1]-K))) #with higher value than the strike
yield gates.RY(ancilla, angle).controlled_by(i) #targeting the ancilla qubit
def payoff_circuit_inv(qubits, ancilla, K, S):
"""Quantum circuit that encodes the expected payoff into the probability of measuring an acilla qubit in reverse.
Circuit used in the amplitude estimation part of the algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
Returns:
generator that yields the gates required for the inverse of the circuit used to encode
the payoff into an ancillary qubit.
"""
for i in range(qubits): #Determine the first qubit's price that
qK = i #surpasses the strike price
if K<S[i]:
break
for i in range(qK, qubits): #Control-RY rotations controled by states
angle = 2 * np.arcsin(np.sqrt((S[i]-K)/(S[qubits-1]-K))) #with higher value than the strike
yield gates.RY(ancilla, -angle).controlled_by(i) #targeting the ancilla qubit
def measure_payoff(q, ancilla):
"""Measurement gates needed to measure the expected payoff and perform post-selection
Args:
q (list): quantum register encoding the asset's price in the unary bases.
ancilla (int): qubit that encodes the payoff of the options.
Returns:
generator that yields the measurement gates to recover the expected payoff.
"""
yield gates.M(*(q+[ancilla]), register_name='payoff')
def load_payoff_quantum_sim(qubits, S0, sig, r, T, K):
"""Measurement gates needed to measure the expected payoff and perform post-selection
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
K (real): strike price.
Returns:
circuit (Circuit): full quantum circuit with the amplitude distributor and payoff estimator.
S (np.array): equivalent asset price for each element of the unary basis.
"""
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
S = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
ln = aux.log_normal(S, mu, sig * np.sqrt(T))
q, ancilla, circuit = create_qc(qubits)
lognormal_parameters = rw_parameters(qubits, ln)
circuit.add(rw_circuit(qubits, lognormal_parameters))
circuit.add(payoff_circuit(qubits, ancilla, K, S))
circuit.add(measure_payoff(q, ancilla))
return circuit, S
def run_payoff_quantum_sim(qubits, circuit, shots, S, K):
"""Exacute the circuit that estimates the payoff of the option in the unary representation. Includes
post-selection scheme.
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): full quantum circuit with the amplitude distributor and payoff estimator.
shots (int): number of shots to be performed
S (np.array): equivalent asset price for each element of the unary basis.
K (real): strike price.
Returns:
qu_payoff_sim (real): estimated payoff from the probability of the ancillary qubit.
"""
job_payoff_sim = circuit(nshots=shots)
counts_payoff_sim = job_payoff_sim.frequencies(binary=True, registers=False)
ones=0
zeroes=0
for key in counts_payoff_sim.keys(): # Post-selection
unary = 0
for i in range(0,qubits):
unary+=int(key[i])
if unary==1:
if int(key[qubits])==0:
zeroes+=counts_payoff_sim.get(key)
else:
ones+=counts_payoff_sim.get(key)
qu_payoff_sim = ones * (S[qubits - 1]-K) / (ones + zeroes)
return qu_payoff_sim
def diff_qu_cl(qu_payoff_sim, cl_payoff):
"""Calculation of the error from the simulated results and the classical expeted value.
Args:
qu_payoff_sim (real): estimated payoff from the probability of the ancillary qubit.
cl_payoff (real): exact value computed classically.
Returns:
error (real): relative error between the simulated and exact result, in percentage.
"""
error = (100 * np.abs(qu_payoff_sim - cl_payoff) / cl_payoff)
return error
def diffusion_operator(qubits):
"""Quantum circuit that performs the diffusion operator, part of the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
generator that yield the necessary gates to perform the diffusion operator.
"""
if qubits%2==0:
mid = int(qubits/2)
else:
mid = int((qubits-1)/2) #The random walk starts from the middle qubit
yield gates.X(qubits)
yield gates.H(qubits)
yield gates.CNOT(mid, qubits)
yield gates.H(qubits)
yield gates.X(qubits)
def oracle_operator(qubits):
"""Quantum circuit that performs the oracle operator, part of the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
generator that yield the necessary gates to perform the oracke operator.
"""
yield gates.Z(qubits)
def Q(qubits, ancilla, K, S, lognormal_parameters):
"""Quantum circuit that performs the main operator for the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
lognormal_parameters (list): values to be introduces into the fSim gates for amplitude distribution.
Returns:
generator that yield the necessary gates to perform the main operator for AE.
"""
yield oracle_operator(qubits)
yield payoff_circuit_inv(qubits, ancilla, K, S)
yield rw_circuit_inv(qubits, lognormal_parameters, X=False)
yield diffusion_operator(qubits)
yield rw_circuit(qubits, lognormal_parameters, X=False)
yield payoff_circuit(qubits, ancilla, K, S)
def load_Q_operator(qubits, iterations, S0, sig, r, T, K):
"""Quantum circuit that performs the main operator for the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
iterations (int): number of consecutive implementations of operator Q.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
K (real): strike price.
Returns:
circuit (Circuit): quantum circuit that performs the m=iterations step of the iterative
amplitude estimation algorithm.
"""
iterations = int(iterations)
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
S = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
ln = aux.log_normal(S, mu, sig * np.sqrt(T))
lognormal_parameters = rw_parameters(qubits,ln)
q, ancilla, circuit = create_qc(qubits)
circuit.add(rw_circuit(qubits, lognormal_parameters))
circuit.add(payoff_circuit(qubits, ancilla, K, S))
for i in range(iterations):
circuit.add(Q(qubits, ancilla, K, S, lognormal_parameters))
circuit.add(measure_payoff(q, ancilla))
return circuit
def run_Q_operator(qubits, circuit, shots):
"""Execution of the quantum circuit for a step in the used amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): quantum circuit that performs the m=iterations step of the iterative
amplitude estimation algorithm.
shots (int): number of shots to be taken in intermediate steps of the AE algorithm.
Returns:
ones (int): number of measured ones after post-selection.
zeroes (int): number of measured zeroes after post-selection.
"""
job_payoff_sim = circuit(nshots=shots)
counts_payoff_sim = job_payoff_sim.frequencies(binary=True, registers=False)
ones = 0
zeroes = 0
for key in counts_payoff_sim.keys():
unary = 0
for i in range(0, qubits):
unary += int(key[i])
if unary == 1:
if int(key[qubits]) == 0:
zeroes += counts_payoff_sim.get(key)
else:
ones += counts_payoff_sim.get(key)
return ones, zeroes
def paint_prob_distribution(bins, prob_sim, S0, sig, r, T):
"""Funtion that returns a histogram with the probabilities of the outcome measures and compares it
with the target probability distribution.
Args:
bins (int): number of bins of precision.
prob_sim (list): probabilities from measuring the quantum circuit.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
image of the probability histogram in a .png file.
"""
from scipy.integrate import trapz
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
S = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance),bins)
width = (S[1] - S[0]) / 1.2
fig, ax = plt.subplots()
ax.bar(S, prob_sim, width, label='Quantum', alpha=0.8)
x = np.linspace(max(mean - 3 * | np.sqrt(variance) | numpy.sqrt |
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
from skimage.color import rgb2hsv, hsv2rgb, rgb2gray
from skimage.filters import gaussian
class patcher(patcher):
def __init__(self, body='./body/body_yuko.png', **options):
super().__init__('幽狐', body=body, pantie_position=[-2, 1130], **options)
self.mask = io.imread('./mask/mask_yuko.png')
self.ribbon_position = [1712, 1601]
self.bra_position = [298, 1301]
try:
self.use_ribbon_mesh = self.options['use_ribbon_mesh']
except:
self.use_ribbon_mesh = self.ask(question='Use Yuko ribbon mesh?', default=False)
if self.use_ribbon_mesh:
self.ribbon_base = io.imread('./mask/ribbon_yuko.png')[:, :, :3] / 255
self.ribbon_shade = io.imread('./material/ribbon_yuko.png')[:, :, 3] / 255
self.bra_base = io.imread('./mask/bra_yuko.png')[1300:, 300:-400] / 255
self.bra_mask = self.bra_base[:, :, 0] > 0
self.bra_center = io.imread('./mask/bra_yuko_center.png')[1300:, 300:-400, 0] > 0
self.bra_shade = io.imread('./material/bra_yuko_shade.png')[1300:, 300:-400, 3] / 255
self.frill = io.imread('./material/bra_yuko_frill.png')[1300:, 300:-400] / 255
self.lace = io.imread('./material/bra_yuko_lace.png')[1300:, 300:-400] / 255
self.ribbon_mask = io.imread('./mask/ribbon.png')
def gen_ribbon(self, image):
image = np.array(image)
ribbon = image[19:58, 5:35, :3]
base_color = np.mean(np.mean(ribbon[5:12, 16:20], axis=0), axis=0) / 255
shade_color = np.mean(np.mean(ribbon[8:14, 7:15], axis=0), axis=0) / 255
ribbon_base = io.imread('./mask/ribbon_yuko.png')[:, :, :3] / 255
ribbon_shade = io.imread('./material/ribbon_yuko.png')[:, :, 3] / 255
ribbon_base = (self.ribbon_base > 0) * base_color
ribbon_shade = self.ribbon_shade[:, :, None] * (1 - shade_color)
ribbon = ribbon_base - ribbon_shade
ribbon = np.dstack((ribbon, ribbon[:, :, 0] > 0))
ribbon = np.clip(ribbon, 0, 1)
return Image.fromarray(np.uint8(ribbon * 255))
def gen_bra(self, image):
# image = Image.open('./dream/0101.png')
pantie = np.array(image)
if self.use_ribbon_mesh:
pantie = ribbon_inpaint(pantie)
else:
ribbon = pantie.copy()
ribbon[:, :, 3] = self.ribbon_mask[:, :, 1]
ribbon = ribbon[19:58, 8:30] / 255.0
front = pantie[20:100, 30:80, :3] / 255
front_shade = pantie[100:150, 0:40, :3] / 255
center = pantie[20:170, -200:-15, :3] / 255
base_color = np.mean(np.mean(center, axis=0), axis=0)
front_color = np.mean(np.mean(front, axis=0), axis=0)
shade_color = np.mean(np.mean(front_shade, axis=0), axis=0)
# make seamless design
design = rgb2gray(center[:, :, :3])[::-1, ::-1]
design = (design - np.min(design)) / (np.max(design) - np.min(design))
edge = 3
design_seamless = gaussian(design, sigma=3)
design_seamless[edge:-edge, edge:-edge] = design[edge:-edge, edge:-edge]
[hr, hc, hd] = center.shape
y = np.arange(-hr / 2, hr / 2, dtype=np.int16)
x = np.arange(-hc / 2, hc / 2, dtype=np.int16)
design_seamless = (design_seamless[y, :])[:, x] # rearrange pixels
design_seamless = resize(design_seamless, [1.65, 1.8])
design_seamless = np.tile(design_seamless, (3, 4))
posy = int((self.bra_center.shape[0] - design_seamless.shape[0]) / 2)
posx = int((self.bra_center.shape[1] - design_seamless.shape[1]) / 2)
sx = 0
sy = 0
design_seamless = (np.pad(design_seamless, [(posy + sy + 1, posy - sy), (posx + sx, posx - sx)], mode='constant'))
# Base shading
bra_base = self.bra_base[:, :, :3] * front_color
bra_base = bra_base - design_seamless[:, :, None] / 10
shade = rgb2hsv(np.tile((self.bra_shade)[:, :, None], [1, 1, 3]) * base_color)
shade[:, :, 0] -= 1
shade[:, :, 1] *= 0.5 + np.mean(base_color) / 3
shade[:, :, 2] /= 1 + 1 * np.mean(base_color)
bra_shade = hsv2rgb(shade)
# bra_shade = bra_shade[:, :, None] * shade_color
# Center painting
sx = -270
sy = -50
center = resize(center, [4, 4])
posy = int((self.bra_center.shape[0] - center.shape[0]) / 2)
posx = int((self.bra_center.shape[1] - center.shape[1]) / 2)
center = (np.pad(center, [(posy + sy, posy - sy), (posx + sx, posx - sx), (0, 0)], mode='constant'))
center = center * self.bra_center[:, :, None]
# Decoration painting
deco_shade = np.median(pantie[5, :, :3], axis=0) / 255
frill = np.dstack((self.frill[:, :, :3] * deco_shade, self.frill[:, :, 3]))
lace = | np.dstack((self.lace[:, :, :3] * shade_color, self.lace[:, :, 3])) | numpy.dstack |
from __future__ import generators, print_function
import numpy as np
from random import shuffle
from scipy.io import loadmat
import functools
import Queue
#from multiprocessing import Process, Queue, Manager, Pool
import threading
import time
from collections import defaultdict
def async_prefetch_wrapper(iterable, buffer=100):
"""
wraps an iterater such that it produces items in the background
uses a bounded queue to limit memory consumption
"""
done = 'DONE'# object()
def worker(q, it):
for item in it:
q.put(item)
q.put(done)
# launch a thread to fetch the items in the background
queue = Queue.Queue(buffer)
#pool = Pool()
#m = Manager()
#queue = m.Queue()
it = iter(iterable)
#workers = pool.apply_async(worker, (queue, it))
thread = threading.Thread(target=worker, args=(queue, it))
#thread = Process(target=worker, args=(queue, it))
thread.daemon = True
thread.start()
# pull the items of the queue as requested
while True:
item = queue.get()
if item == 'DONE':#done:
return
else:
yield item
#pool.close()
#pool.join()
def async_prefetch(func):
"""
decorator to make generator functions fetch items in the background
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
return async_prefetch_wrapper(func(*args, **kwds))
return wrapper
class DataSet(object):
def __init__(self, cfg):
"""Construct a DataSet.
"""
self.cfg = cfg
self.all_walks = np.fliplr(np.loadtxt(cfg.walks_dir, dtype=np.int)) # reverse the sequence
self.node_seq = self.all_walks[:, -1] # index by ending node
self.all_labels = self.get_labels(cfg.label_dir)
self.all_features= self.get_fetaures(cfg.features_dir)
#Increment the positions by 1 and mark the 0th one as False
self.train_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'train_ids.npy')))
self.val_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'val_ids.npy')))
self.test_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'test_ids.npy')))
# [!!!IMP!!]Assert no overlap between test/val/train nodes
self.change = 0
self.path_pred_variance = {}
self.label_cache, self.update_cache = {0:self.all_labels[0]}, {}
self.wce = self.get_wce()
def get_fetaures(self, path):
# Serves 2 purpose:
# a) add feature for dummy node 0 a.k.a <EOS> and <unlabeled>
# b) increments index of all features by 1, thus aligning it with indices in walks
all_features = np.load(path)
all_features = all_features.astype(np.float32, copy=False) # Required conversion for Python3
all_features = np.concatenate(([np.zeros(all_features.shape[1])], all_features), 0)
return all_features
def get_labels(self, path):
# Labels start with node '0'; Walks_data with node '1'
# To get corresponding mapping, increment the label node number by 1
# add label for dummy node 0 a.k.a <EOS> and <unlabeled>
all_labels = np.load(path)
all_labels = np.concatenate(([np.zeros(all_labels.shape[1])], all_labels), 0)
return all_labels
def get_wce(self):
if self.cfg.solver.wce:
valid = self.train_nodes + self.val_nodes
tot = np.dot(valid, self.all_labels)
wce = 1/(len(tot) * (tot*1.0/np.sum(tot)))
else:
wce = [1]*self.all_labels.shape[1]
print("Cross-Entropy weights: ",wce)
return wce
def accumulate_label_cache(self, labels, nodes):
#Aggregates all the labels for the corresponding nodes
#and tracks the count of updates made
default = (self.all_labels[0], 0) #Initial estimate -> all_zeros
#WTF!labels = labels[0]
if self.cfg.data_sets.binary_label_updates:
#Convert to binary and keep only the maximum value as 1
amax = np.argmax(labels, axis = 1)
labels = np.zeros(labels.shape)
for idx, pos in enumerate(amax):
labels[idx,pos] = 1
for idx, node in enumerate(nodes):
prv_label, prv_count = self.update_cache.get(node, default)
new_label = prv_label + labels[idx]
new_count = prv_count + 1
self.update_cache[node] = (new_label, new_count)
def update_label_cache(self):
#Average all the predictions made for the corresponding nodes and reset cache
alpha = self.cfg.solver.label_update_rate
update_no = len(self.path_pred_variance.items())
self.path_pred_variance[update_no] = {}
if len(self.label_cache.items()) <= 1: alpha =1
for k, v in self.update_cache.items():
old = self.label_cache.get(k, self.label_cache[0])
cur = v[0]/v[1]
new = (1-alpha)*old + alpha*cur
self.change += np.mean((new - old) **2)
self.path_pred_variance[update_no][k] = cur
self.label_cache[k] = new
print("\nChange in label: :", np.sqrt(self.change/self.cfg.data_sets._len_vocab)*100)
self.change = 0
self.update_cache = {}
def get_nodes(self, dataset):
nodes = []
if dataset == 'train':
nodes = self.train_nodes
elif dataset == 'val':
nodes = self.val_nodes
elif dataset == 'test':
nodes = self.test_nodes
elif dataset == 'all':
# Get all the nodes except the 0th node
nodes = [True]*len(self.train_nodes)
nodes[0] = False
else:
raise ValueError
return nodes
@async_prefetch
def next_batch(self, dataset, batch_size, shuffle=True):
nodes = self.get_nodes(dataset)
label_len = np.shape(self.all_labels)[1]
# Get position of all walks ending with desired set of nodes
pos = []
for node in np.where(nodes)[0]:
pos.extend(np.where(self.node_seq == node)[0])
pos = np.array(pos)
if shuffle:
indices = np.random.permutation(len(pos))
pos = pos[indices]
if batch_size == -1:
batch_size = len(pos)
tot = len(pos)//batch_size
for i in range(0, len(pos), batch_size):
x = self.all_walks[pos[i: i + batch_size]]
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.all_labels[0]) for item in row] for row in x]
y = [self.all_labels[item] for item in x[-1]]
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
seq = self.node_seq[pos[i: i + batch_size]]
yield (x, x2, seq, y, tot)
@async_prefetch
def next_batch_same(self, dataset, node_count=1, shuffle=False):
nodes = self.get_nodes(dataset)
nodes = | np.where(nodes) | numpy.where |
from datetime import datetime
import numpy as np
import os
import glob
from pathlib import Path
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy, numpy_to_vtkIdTypeArray
import vtk
import vedo
import math
#import trimesh
ROOT_FOLDER = Path(__file__).parent.parent
ASSETS_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_assets')
FONTS_FOLDER = ASSETS_FOLDER.joinpath('./fonts')
EXAMPLES_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_examples')
EXAMPLES_DATA_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_examples/data')
def get_type(element):
"""
Get the type of object as a string
:return: String
"""
return str(element.__class__.__name__).lower()
def get_unique_name(collection, name, spacer='_'):
"""
Create a unique key for a collection by appending numbers when entries exist
:param collection: A list, collection, array, ...
:param name: Name (for instance 'Points')
:param spacer: Spacer char
:return: New name, for instance 'Points_4'
"""
similar_ones = []
max_value = 0
if name not in collection:
return name
for key in collection:
if name in key:
similar_ones.append(key)
if spacer in key:
value = key.split(spacer)[1]
max_value = max(int(value), max_value)
value = max(len(similar_ones), max_value)
return f'{name}{spacer}{value}'
def numpy2vtk(arr, dtype=None, deep=True, name=""):
"""
Convert a numpy array into a vtkDataArray
:param arr: Array
:param dtype: Data type. Allows to set a specific data type to the VTK array
:param deep: Whether a deep copy is made. Defaults to True
:param name: Name of the array
"""
if arr is None:
return None
arr = np.ascontiguousarray(arr)
if dtype is not None and dtype!='id':
arr = arr.astype(dtype)
if dtype and dtype=='id':
varr = numpy_to_vtkIdTypeArray(arr.astype(np.int64), deep=deep)
else:
varr = numpy_to_vtk(arr, deep=deep)
if name:
varr.SetName(name)
return varr
def spherical_degree_angles_to_xyz(radius, theta, phi):
"""
Convert spherical degree angles to XYZ coordinates
:param radius: Radius
:param theta: Theta angle value in degrees
:param phi: Phi angle value in degrees
:return: List of 3 coordinates
"""
return vedo.spher2cart(radius, theta / 180 * math.pi, phi / 180 * math.pi)
def pick_object(plot, event_name=None, priority=None, cid=None):
"""
Pick an object
"""
x, y = plot.interactor.GetEventPosition()
plot.renderer = plot.interactor.FindPokedRenderer(x, y)
if not plot.picker:
plot.picker = vtk.vtkPropPicker()
plot.picker.PickProp(x, y, plot.renderer)
plot.picked2d = (x,y)
xp, yp = plot.interactor.GetLastEventPosition()
actor = plot.picker.GetProp3D()
delta3d = np.array([0,0,0])
picked3d = None
picker = plot.picker
if actor is None:
# Ok, this is tricky. I found out that vtkPropPicker, even
# if it optimized, can fail at detecting a simple mesh
# so we use the vtkPicker as fall back plan
picker = vtk.vtkPicker()
picker.Pick(x, y, 0.0, plot.renderer)
actor = picker.GetProp3D()
if actor is not None:
picked3d = np.array(picker.GetPickPosition())
if isinstance(actor, vedo.Mesh):
# There is a bug with transparent objects or objects that do not have ForceOpaqueOn()
# which prevents picked3d from being valid so we have to use another picking method
cell_picker = vtk.vtkCellPicker()
cell_picker.Pick(x, y, 0.0, plot.renderer)
if cell_picker.GetProp3D() == actor:
picked3d = np.array(cell_picker.GetPickPosition())
try:
if actor.picked3d is not None:
delta3d = picked3d - actor.picked3d
actor.picked3d = picked3d
except AttributeError:
return
else:
actor = plot.picker.GetActor2D()
dx, dy = x-xp, y-yp
event_dict = vedo.utils.dotdict({
"name": event_name,
"id": cid,
"priority": priority,
"at": plot.renderers.index(plot.renderer),
"actor": actor,
"picked3d": picked3d,
"keyPressed": plot.interactor.GetKeySym(),
"picked2d": (x,y),
"delta2d": (dx, dy),
"angle2d": np.arctan2(dy,dx),
"speed2d": np.sqrt(dx*dx+dy*dy),
"delta3d": delta3d,
"speed3d": np.sqrt(np.dot(delta3d,delta3d)),
"isPoints": isinstance(actor, vedo.Points),
"isMesh": isinstance(actor, vedo.Mesh),
"isAssembly": isinstance(actor, vedo.Assembly),
"isVolume": isinstance(actor, vedo.Volume),
"isPicture": isinstance(actor, vedo.Picture),
"isActor2D": isinstance(actor, vtk.vtkActor2D)
})
return event_dict
def add_callback(plot, event_name, func, priority=0.0):
"""
Modified function from vedo. The issue is that the way vedo (and pyvista for that matter)
is structured is that it helps using vtk but sometimes hinders using it with code that makes
assumptions we don't want.
Add a function to be executed while show() is active.
Information about the event can be acquired with method ``getEvent()``.
Return a unique id for the callback.
The callback function (see example below) exposes a dictionary
Frequently used events are:
- KeyPress, KeyRelease: listen to keyboard events
- LeftButtonPress, LeftButtonRelease: listen to mouse clicks
- MiddleButtonPress, MiddleButtonRelease
- RightButtonPress, RightButtonRelease
- MouseMove: listen to mouse pointer changing position
- MouseWheelForward, MouseWheelBackward
- Enter, Leave: listen to mouse entering or leaving the window
- Pick, StartPick, EndPick: listen to object picking
- ResetCamera, ResetCameraClippingRange
- Error, Warning
- Char
- Timer
Check the complete list of events here:
https://vtk.org/doc/nightly/html/classvtkCommand.html
"""
if not plot.interactor:
return None
# Processing names is removed from vedo function
# Also the whole thing is refactored with improved picking
def wrapper(iren=None, event_name=None):
event_dict = pick_object(plot, event_name, priority, cid)
func(event_dict)
cid = plot.interactor.AddObserver(event_name, wrapper, priority)
return cid
def get_file_name(file_name, extension):
"""
Get full file name
:param file_name: File name without extension
:param extension: File extension
:return: File name with extension
"""
if str(file_name).endswith(extension):
full_file_name = str(file_name)
else:
full_file_name = str(file_name) + '.' + str(extension)
return full_file_name
def get_local_data_file_path(file_name, extension, sub_folder=''):
"""
Get data path
:param file_name: File name without extension
:param extension: File extension
:return: File path
"""
return ASSETS_FOLDER.joinpath(sub_folder, get_file_name(file_name, extension))
def get_surface_mesh_path(file_name, meshes_path=None, extension='ply', default_meshes_path=None):
"""
Get a surface mesh file path
:param file_name: File name without extension
:param meshes_path: Folder path. If None given, this method will look into the data folder of iblviewer
:param extension: File extension
:param default_meshes_path: Fallback local or remote path
:return: Full mesh file path
"""
if meshes_path is None:
region_mesh_path = str(get_local_data_file_path(file_name, extension, 'surfaces'))
if not os.path.exists(region_mesh_path):
if default_meshes_path is not None:
region_mesh_path = default_meshes_path
else:
region_mesh_path = 'https://raw.github.com/int-brain-lab/iblviewer/main/iblviewer_assets/surfaces/'
region_mesh_path += get_file_name(file_name, extension)
else:
region_mesh_path = str(os.path.join(meshes_path, get_file_name(file_name, extension)))
return region_mesh_path
def load_surface_mesh(file_name, meshes_path=None, extension='ply'):
"""
Load a surface mesh with vedo.
:param file_name: File name without extension
:param meshes_path: Folder path. If None given, this method will look into the data folder of iblviewer
:param extension: File extension
:return: Mesh or None if path is invalid
"""
file_path = get_surface_mesh_path(file_name, meshes_path, extension)
if file_path.startswith('https') or os.path.exists(file_path):
return vedo.load(file_path)
def change_file_name(file_path, prefix=None, name=None, suffix=None):
"""
Change the file name from the given file path
:param file_path: Input file path
:param prefix: Prefix to the file name
:param name: Whether a new name is set instead of the current name.
If None, the current file name is used.
:param suffix: Suffix to the file name
:return: New file path
"""
path, file_name, extension = split_path(file_path)
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if name is None or name == '' or not isinstance(name, str):
name = file_name
return os.path.join(path, prefix + name + suffix + extension)
def split_path(path):
"""
Split any given file path to folder path, file name and extension
:return: Folder path, file name and extension
"""
base_name = os.path.basename(path)
file_name, extension = os.path.splitext(base_name)
return path[:-len(base_name)], file_name, extension
def time_diff(t):
"""
Get a time difference in seconds
:param t: Time
:return: Number of seconds
"""
now = datetime.now()
duration = now - t
return duration.total_seconds()
def recompute_normals(target):
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(target)
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
return pdnorm.GetOutput() #self._data
def get_actor_center(actor):
"""
Get the absolute center position of an actor
:param actor: VTK actor
:return: 3d array
"""
try:
if isinstance(actor, vedo.Volume):
return actor.center() + actor.pos()
else:
return actor.centerOfMass() + actor.pos() # TODO: check that this is necessary (adding pos)
except Exception as e:
raise e
def get_actor_bounds(actor):
"""
Get the bounds of an actor as xmin, xmax, ymin, ymax, zmin, zmax
:param actor: VTK actor
:return: Array with 6 values
"""
if actor is None:
return
try:
if isinstance(actor, vedo.Volume):
d = actor.dimensions() * actor.spacing()
c = get_actor_center(actor)
return c[0] - d[0], c[0] + d[0], c[1] - d[1], c[1] + d[1], c[2] - d[2], c[2] + d[2]
else:
return actor.bounds()
except Exception as e:
raise e
def get_actor_dimensions(actor):
"""
Get the dimensions of an actor
:param actor: VTK actor
:return: 3d array
"""
if actor is None:
return
try:
if isinstance(actor, vedo.Volume):
return actor.dimensions() * actor.spacing()# equivalent to self.model.resolution
else:
xmin, xmax, ymin, ymax, zmin, zmax = actor.bounds()
return np.array([xmax - xmin, ymax - ymin, zmax - zmin])
except Exception as e:
raise e
def get_bounding_planes(actor):
"""
Get bounding planes for an actor
:param actor: VTK actor
:return: vtkPlanes
"""
planes = vtk.vtkPlanes()
planes.SetBounds(actor.GetBounds())
return planes
def get_planes_bounds(planes):
"""
Get the bounding box coordinates of a series of planes.
[WARNING] Only works for six planes (box mode) at the moment
:param planes: vtkPlaneCollection
:return: 6 values
"""
origins = list()
for p_id in range(planes.GetNumberOfItems()):
plane = planes.GetItem(p_id)
origin = np.array(plane.GetOrigin())
origins.append(origin)
# We don't want zeros to be accounted for so we select planes of interest
# First x planes, then y planes, then z ones. To be improved/generalized.
origins = np.array(origins)
mi_x = np.min(origins[:2], axis=0).tolist()
ma_x = np.max(origins[:2], axis=0).tolist()
mi_y = np.min(origins[2:4], axis=0).tolist()
ma_y = np.max(origins[2:4], axis=0).tolist()
mi_z = | np.min(origins[4:6], axis=0) | numpy.min |
import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import scipy.optimize as optimize
import scipy.integrate as integrate
import sklearn.linear_model
import kernels
import ep_fast
#import EP_cython
np.set_printoptions(precision=4, linewidth=200)
class GradientFields():
def __init__(self, K_nodiag, s0, t_i, prev):
normPDF = stats.norm(0,1)
try: t_i[0]
except: t_i = np.zeros(K_nodiag.shape[0]) + t_i
#general computations (always the same if the fixed effects are 0!!!!!)
self.Ki = normPDF.sf(t_i)
self.Ps = s0 + (1-s0)*self.Ki
self.Pi = self.Ki / self.Ps
self.stdY = np.sqrt(self.Pi * (1-self.Pi))
#compute Atag0 and B0
self.phi_ti = normPDF.pdf(t_i)
self.phitphit = np.outer(self.phi_ti, self.phi_ti)
self.stdY_mat = np.outer(self.stdY, self.stdY)
mat1_temp = self.phi_ti / self.stdY
self.mat1 = np.outer(mat1_temp, mat1_temp)
sumProbs_temp = np.tile(self.Pi, (K_nodiag.shape[0], 1))
sumProbs = sumProbs_temp + sumProbs_temp.T
Atag0_B0_inner_vec = self.Pi*(1-s0)
self.mat2 = np.outer(Atag0_B0_inner_vec, Atag0_B0_inner_vec) + 1-sumProbs*(1-s0)
self.Atag0 = self.mat1*self.mat2
self.B0 = np.outer(self.Ps, self.Ps)
#Compute the elements of the function value (the squared distance between the observed and expected pairwise phenotypic covariance)
self.K_nodiag_AB0 = K_nodiag * self.Atag0/self.B0
self.K_nodiag_sqr_AB0 = K_nodiag * self.K_nodiag_AB0
class PrevTest():
def __init__(self, n, m, prev, useFixed, h2Scale=1.0, prng=None, num_generate=None):
self.prng = prng
if (prng is None): self.prng = np.random.RandomState(args.seed)
self.n = n
self.useFixed = useFixed
self.h2Scale = h2Scale
if num_generate is None:
if prev == 0.5:
numGeno = n
else:
numGeno = np.maximum(int(float(self.n)/float(2*prev)), 25000)
else:
numGeno = num_generate
#generate SNPs
mafs = self.prng.rand(m) * 0.45 + 0.05
self.X = prng.binomial(2, mafs, size=(numGeno, m)).astype(np.float)
mafs_estimated = mafs.copy()
self.X_estimated = self.X.copy()
self.X -= 2*mafs
self.X_estimated -= 2*mafs_estimated
self.X /= np.sqrt(2*mafs*(1-mafs))
self.X_estimated /= np.sqrt(2*mafs_estimated*(1-mafs_estimated))
self.m = m
self.n = n
X_mean_diag = np.mean(np.einsum('ij,ij->i', self.X, self.X)) / self.X.shape[1]
X_estimated_mean_diag = np.mean(np.einsum('ij,ij->i', self.X_estimated, self.X_estimated)) / self.X.shape[1]
self.diag_ratio = X_estimated_mean_diag / X_mean_diag
self.prev = prev
#approx coeffs lam_i and c_i for logistic likelihood
self.logistic_c = np.array([1.146480988574439e+02, -1.508871030070582e+03, 2.676085036831241e+03, -1.356294962039222e+03, 7.543285642111850e+01])
self.logistic_lam = np.sqrt(2)*np.array([0.44 ,0.41, 0.40, 0.39, 0.36])
self.logistic_lam2 = self.logistic_lam**2
self.logistic_clam = self.logistic_c * self.logistic_lam
def genData(self, h2, eDist, numFixed, ascertain=True, scaleG=False, extraSNPs=0, fixedVar=0, frac_cases=0.5, kernel='linear', rbf_scale=1.0):
args.seed += 1
self.true_h2 = h2
self.ascertain = ascertain
self.eDist = eDist
if (numFixed==0): fixedVar=0
if (numFixed > 0): assert fixedVar>0
self.fixedVar = fixedVar
self.covars = self.prng.randn(self.X.shape[0], numFixed)
if (eDist == 'normal' and not scaleG): sig2g = h2/(1-h2)
elif (eDist == 'normal' and scaleG): sig2g = h2
elif (eDist == 'logistic' and not scaleG): sig2g = (np.pi**2)/3.0 * h2 / (1 - h2)
elif (eDist == 'logistic' and scaleG): sig2g = h2
else: raise ValueError('unknown e_dist. Valid value are normal, logistic')
if kernel == 'linear':
self.beta = self.prng.randn(self.m) * np.sqrt(sig2g/self.m) #generate effect sizes
self.g = self.X.dot(self.beta) #generate genetic effects
self.g_estimated = self.X_estimated.dot(self.beta)
elif args.kernel == 'rbf':
assert scaleG
kernel_obj = kernels.ScaledKernel(kernels.RBFKernel(self.X))
K = kernel_obj.getTrainKernel(np.array([np.log(rbf_scale), np.log(sig2g) / 2.0]))
L = la.cholesky(K, lower=True, overwrite_a=True)
self.g = L.dot(np.random.randn(K.shape[0]))
if np.allclose(self.X, self.X_estimated):
self.g_estimated = self.g.copy()
else:
kernel_obj_estimated = kernels.ScaledKernel(kernels.RBFKernel(self.X_estimated))
K_estimated = kernel_obj_estimated.getTrainKernel(np.array([np.log(rbf_scale), np.log(sig2g) / 2.0]))
L_estimated = la.cholesky(K_estimated, lower=True, overwrite_a=True)
self.g_estimated = L_estimated.dot(np.random.randn(K_estimated.shape[0]))
else:
raise ValueError('unknown kernel')
#create identical twins if needed
if self.prev == 0.5:
numGeno = self.n
else:
numGeno = np.maximum(int(float(self.n)/float(2*self.prev)), 25000)
self.fixedEffects = np.ones(numFixed) * (0 if (numFixed==0) else np.sqrt(fixedVar / numFixed))
self.covars = self.prng.randn(self.g.shape[0], numFixed)
m = self.covars.dot(self.fixedEffects)
self.g += m
self.g_estimated += m
if (eDist == 'logistic' and numFixed>0): raise ValueError('logistic distribution with fixed effects not supported')
#generate environmental effect
if (eDist == 'normal' and not scaleG): e = self.prng.randn(self.g.shape[0])
elif (eDist == 'normal' and scaleG): e = self.prng.randn(self.g.shape[0]) * np.sqrt(1 - sig2g)# - (fixedVar if (numFixed>0) else 0))
elif (eDist == 'logistic' and not scaleG): e = stats.logistic(0,1).rvs(self.g.shape[0])
elif (eDist == 'logistic' and scaleG): e = stats.logistic(0,1).rvs(self.g.shape[0]) * np.sqrt(1-sig2g) / np.sqrt((np.pi**2)/3.0)
else: raise ValueError('unknown e distribution: ' + self.eDist)
self.yAll = self.g + e
self.yAll_estimated = self.g_estimated + e
self.affCutoff = np.percentile(self.yAll, 100*(1-self.prev))
cases = (self.yAll >= self.affCutoff) #determine cases
cases_estimated = (self.yAll_estimated >= self.affCutoff) #determine cases
controls = ~cases
controls_estimated = ~cases_estimated
self.y = np.ones(self.yAll.shape[0])
self.y[controls] = -1
self.y_estimated = np.ones(self.yAll.shape[0])
self.y_estimated = np.ones(self.yAll.shape[0])
self.y_estimated[controls_estimated] = -1
#select cases and controls
caseInds = np.where(cases)[0]
controlInds = np.where(controls)[0]
if ascertain:
numCases = np.sum(cases)
if (numCases > self.n/2+2):
selectedCases = self.prng.permutation(numCases)[:self.n//2]
caseInds = caseInds[selectedCases]
numCases = len(caseInds)
numControls = int(numCases * (1-frac_cases)/frac_cases)
selectedControls = self.prng.permutation(controls.sum())[:numControls]
selectedInds = np.concatenate((caseInds, controlInds[selectedControls]))
else:
while True:
selectedInds = self.prng.permutation(cases.shape[0])[:self.n]
if (np.sum(cases[selectedInds]) > 0): break
#scramble inds to avoid numerical issues
self.prng.shuffle(selectedInds)
self.y = self.y[selectedInds]
###print('%%cases: %0.2f'%(np.mean(self.y>0)))
self.g = self.g[selectedInds]
self.g_estimated = self.g_estimated[selectedInds]
self.y_cont = self.yAll[selectedInds]
self.covars = self.covars[selectedInds, :]
self.X_selected = self.X_estimated[selectedInds, :]
if (extraSNPs > 0):
###print('Adding', extraSNPs, 'non-causal SNPs...')
mafs = self.prng.rand(extraSNPs) * 0.45 + 0.05
X2 = self.prng.binomial(2, mafs, size=(self.X_selected.shape[0], extraSNPs)).astype(np.float)
X2 -= 2*mafs
X2 /= np.sqrt(2*mafs*(1-mafs))
self.X_selected = np.concatenate((self.X_selected, X2), axis=1)
#create the kernel matrix
if kernel=='linear':
kernel_obj = kernels.linearKernel(self.X_selected)
K = kernel_obj.getTrainKernel(np.array([]))
elif kernel=='rbf':
kernel_obj = kernels.RBFKernel(self.X_selected)
K = kernel_obj.getTrainKernel(np.array([np.log(rbf_scale)]))
else:
raise ValueError('unknown kernel')
self.kernel = kernels.ScaledKernel(kernel_obj)
def computeT(self, K, sig2e=np.pi**2/3.0):
if (self.prev==0.5): return 0.0
controls = (self.y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = (1-self.prev)*np.mean(diagK[controls]) + self.prev*np.mean(diagK[cases])
if (self.eDist == 'normal'): t = stats.norm(0, np.sqrt(sig2g+1)).isf(self.prev)
elif (self.eDist == 'logistic'):
s = np.sqrt(3*sig2e/np.pi**2)
normCache = np.log(np.sqrt(2*np.pi*sig2g))
llF = lambda f,t: -(f-t)**2/(2*sig2g) - normCache
pFAndY = lambda f,t: np.exp(llF(f,t)) * (1.0/(1+np.exp(-f/s)) if f>-35 else 0.0)
pY = lambda t: integrate.quad(lambda f:pFAndY(f,t), -np.inf, np.inf)
t = -optimize.minimize_scalar(lambda t:(pY(t)[0]-self.prev)**2, method='bounded', bounds=(-8, 8)).x
else: raise Exception('unknown e distribution: ' + self.eDist)
return t
def likErf_EP(self, y, mu, s2, hyp=None, compDerivs=False):
sqrtVarDenom = 1.0 / np.sqrt(1+s2)
z = mu * sqrtVarDenom * y
normPDF = stats.norm(0,1)
lZ = normPDF.logcdf(z)
if (not compDerivs): return lZ
n_p = np.exp(normPDF.logpdf(z) - lZ)
dlZ = n_p * sqrtVarDenom * y #1st derivative wrt mean
d2lZ = -n_p * (z+n_p) / (1+s2) #2nd derivative wrt mean
return lZ, dlZ, d2lZ
#compute EP for a single individual, and compute derivatives with respect to the mean (mu)
def likLogistic_EP_single_new(self, y, mu, s2, hyp):
t = hyp[4]; mu = mu-t
hyp[4] = 0
lZc, dlZc, d2lZc = self.likProbit_EP_single(y, mu*self.logistic_lam, s2*self.logistic_lam2, hyp)
lZ = self.log_expA_x_single(lZc, self.logistic_c) #A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
dlZ = self.expABz_expAx_single(lZc, self.logistic_c, dlZc, self.logistic_clam) #((exp(A).*B)*d)./(exp(A)*c)
#d2lZ = ((exp(A).*Z)*e)./(exp(A)*c) - dlZ.^2 where e = c.*(lam.^2)'
d2lZ = self.expABz_expAx_single(lZc, self.logistic_c, dlZc**2+d2lZc, self.logistic_c * self.logistic_lam2) - dlZ**2
#A note (from the GPML package documentation):
#The scale mixture approximation does not capture the correct asymptotic
#behavior; we have linear decay instead of quadratic decay as suggested
#by the scale mixture approximation. By observing that for large values
#of -f*y ln(p(y|f)) for likLogistic is linear in f with slope y, we are
#able to analytically integrate the tail region.
val = np.abs(mu) - 196/200*s2-4 #empirically determined bound at val==0
lam = 1.0 / (1.0+np.exp(-10*val)) #interpolation weights
lZtail = np.minimum(s2/2.0-np.abs(mu), -0.1) #apply the same to p(y|f) = 1 - p(-y|f)
if (mu*y > 0):
lZtail = np.log(1-np.exp(lZtail)) #label and mean agree
dlZtail = 0
else:
dlZtail = -np.sign(mu)
lZ = (1-lam)* lZ + lam* lZtail #interpolate between scale ..
dlZ = (1-lam)* dlZ + lam* dlZtail #.. mixture and ..
d2lZ = (1-lam)*d2lZ #.. tail approximation
hyp[4] = t
return lZ, dlZ, d2lZ
def likLogistic_EP_multi_new(self, y, mu, s2, hyp=None):
t = hyp[4]; mu = mu-t
hyp[4] = 0
lZc = self.likProbit_EP_multi(np.outer(y, np.ones(5)), np.outer(mu, self.logistic_lam), np.outer(s2, self.logistic_lam2), hyp)
lZ = self.log_expA_x_multi(lZc, self.logistic_c) #A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
val = np.abs(mu) - 196/200*s2-4 #empirically determined bound at val==0
lam = 1.0 / (1.0+np.exp(-10*val)) #interpolation weights
lZtail = np.minimum(s2/2.0-np.abs(mu), -0.1) #apply the same to p(y|f) = 1 - p(-y|f)
muy = mu*y
id = muy>0; lZtail[id] = np.log(1-np.exp(lZtail[id])) #label and mean agree
lZ = (1-lam)*lZ + lam*lZtail #interpolate between scale mixture and tail approximation
hyp[4] = t
return lZ
def likProbit_EP_multi(self, y, mu, s2, hyp):
sig2e, t = hyp[0], hyp[4]
lZ = stats.norm(0,1).logcdf(y * (mu-t) / np.sqrt(s2+sig2e))
return lZ
def likProbit_EP_single(self, y, mu, s2, hyp):
sig2e, t = hyp[0], hyp[4]
a = y / np.sqrt(s2+sig2e)
z = a * (mu-t)
normPDF = stats.norm(0,1)
lZ = normPDF.logcdf(z)
n_p = np.exp(normPDF.logpdf(z) - lZ)
dlZ = a * n_p
d2lZ = -a**2 * n_p * (z+n_p)
return lZ, dlZ, d2lZ
def likFunc_EP_asc_multi(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ = likFunc_numer(1, mu, s2, hyp)
logZstar = np.logaddexp(logS0, logSDiff+lZ)
return logZstar
def likFunc_EP_asc_single(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ, dlZ, d2lZ = likFunc_numer(1, mu, s2, hyp)
logZstar = np.logaddexp(logS0, logSDiff+lZ)
expDiff = np.exp(lZ-logZstar)
temp = sDiff * expDiff
dZstar = temp * dlZ
d2Zstar = temp * (d2lZ + dlZ**2 * (1-temp))
return logZstar, dZstar, d2Zstar
def likFunc_EP_both_single(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ_numer, dlZ_numer, d2lZ_numer = likFunc_numer(y, mu, s2, hyp)
lZ_numer += (logS0 if y<0 else 0)
lZ_denom, dlZ_denom, d2lZ_denom = likFunc_denom(y, mu, s2, hyp)
return lZ_numer-lZ_denom, dlZ_numer-dlZ_denom, d2lZ_numer-d2lZ_denom
def likFunc_EP_both_multi(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ_numer = likFunc_numer(y, mu, s2, hyp)
lZ_numer[y<0] += logS0 #note: we assume that logS1=0
lZ_denom = likFunc_denom(y, mu, s2, hyp)
return lZ_numer-lZ_denom
def evalLL_EP(self, hyp):
try: hyp[0]
except: hyp=np.array([hyp])
tol = 1e-4; max_sweep = 20; min_sweep = 2 #tolerance to stop EP iterations
p = np.mean(self.y>0)
s1 = 1.0
s0 = s1 * self.prev / (1-self.prev) * (1-p) / p
logS0 = np.log(s0); sDiff = s1-s0; logSDiff = np.log(sDiff)
K = self.kernel.getTrainKernel(hyp)
m = np.zeros(self.y.shape[0])
controls = (self.y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = (1-self.prev)*np.mean(diagK[controls]) + self.prev*np.mean(diagK[cases])
if (sig2g > self.h2Scale): raise ValueError('sig2g larger than h2Scale found')
if (self.covars.shape[1] > 0):
C = self.covars
logreg = sklearn.linear_model.LogisticRegression(penalty='l2', C=1000, fit_intercept=True)
s0 = self.prev / (1-self.prev) * (1-np.mean(self.y>0)) / np.mean(self.y>0)
logreg.fit(C, self.y)
Pi = logreg.predict_proba(C)[:,1]
Ki = Pi * s0 / (1 - Pi*(1-s0))
if (self.eDist == 'logistic'):
old_prev = self.prev
t = np.empty(self.y.shape[0])
for i in range(self.y.shape[0]):
self.prev = Ki[i]
t[i] = self.computeT(K, self.h2Scale-sig2g)
self.prev = old_prev
else: t = stats.norm(0,1).isf(Ki)
if (self.eDist == 'normal'):
likFunc_numer_multi = self.likProbit_EP_multi
likFunc_numer_single = self.likProbit_EP_single
sig2e = self.h2Scale - sig2g
if (self.covars.shape[1] == 0): t = np.zeros(self.y.shape[0]) + stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
#t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
elif (self.eDist == 'logistic'):
likFunc_numer_multi = self.likLogistic_EP_multi_new
likFunc_numer_single = self.likLogistic_EP_single_new
sig2e = (self.h2Scale - sig2g) / (np.pi**2 / 3.0)
#if (self.covars.shape[1] == 0): t = np.zeros(self.y.shape[0]) + self.computeT(K, self.h2Scale-sig2g)
t = self.computeT(K, self.h2Scale-sig2g)
else: raise ValueError('unknown eDist')
likHyp_multi = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_multi, self.likFunc_EP_asc_multi]
likHyp_single = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_single, self.likFunc_EP_asc_single]
likFuncMulti = likFunc_numer_multi
likFuncSingle = likFunc_numer_single
Sigma = K.copy()
mu = m.copy() #- t
nlZ0 = -np.sum(likFuncMulti(self.y, mu, np.diag(K), likHyp_multi))
ttau, tnu = np.zeros(self.y.shape[0]), np.zeros(self.y.shape[0])
nlZ_old, sweep = np.inf, 0
nlZ = nlZ0
while ((np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep) or sweep<min_sweep):
nlZ_old = nlZ
sweep+=1
if (self.eDist == 'logistic'): ttau, tnu = self.EP_innerloop2(Sigma, self.y, mu, ttau, tnu, likFuncSingle, likHyp_single)
else: ttau, tnu = ep_fast.EP_innerloop_probit(Sigma, self.y, mu, ttau, tnu, sig2e, t)
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams2(K, self.y, ttau, tnu, m, likFuncMulti, likHyp_multi)
if (sweep == max_sweep and np.abs(nlZ-nlZ_old) > tol):
nlZ = np.inf
if (nlZ < 0): nlZ = np.inf
self.mu = mu
return nlZ
def evalLL_AEP(self, hyp, grad=False, update_freq=1):
try: hyp[0]
except: hyp=np.array([hyp])
tol = 1e-4; max_sweep = 20; min_sweep = 2 #tolerance to stop EP iterations
p = np.mean(self.y>0)
s1 = 1.0
s0 = s1 * self.prev / (1-self.prev) * (1-p) / p
y = self.y.copy()
useCython = True
logS0 = np.log(s0)
sDiff = s1-s0
logSDiff = np.log(sDiff)
#Generate problem settings
hyp_scaled = hyp.copy()
if self.h2Scale != 1.0:
hyp_scaled[-1] = np.log(np.exp(2*hyp[-1]) * self.h2Scale) / 2.0
K = self.kernel.getTrainKernel(hyp_scaled)
C = self.covars.copy()
m = np.zeros(y.shape[0])
controls = (y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = np.exp(2*hyp[-1])
if (self.eDist == 'normal'): sig2e = self.h2Scale - sig2g
elif (self.eDist == 'logistic'): sig2e = (self.h2Scale - sig2g) / (np.pi**2 / 3.0)
else: raise ValueError('unknown eDist')
if (sig2g > self.h2Scale):
raise ValueError('sig2g larger than h2Scale found')
if C.shape[1] > 0 and self.useFixed:
logreg = sklearn.linear_model.LogisticRegression(penalty='l2', C=1000, fit_intercept=True)
s0 = self.prev / (1-self.prev) * (1-np.mean(y>0)) / np.mean(y>0)
logreg.fit(C, y)
Pi = logreg.predict_proba(C)[:,1]
Ki = Pi * s0 / (1 - Pi*(1-s0))
if (self.eDist == 'logistic'):
old_prev = self.prev
t = np.empty(y.shape[0])
for i in range(y.shape[0]):
self.prev = Ki[i]
t[i] = self.computeT(K, self.h2Scale-sig2g)
self.prev = old_prev
else: t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(Ki)
if (self.eDist == 'normal'):
likFunc_numer_single = self.likProbit_EP_single
likFunc_numer_multi = self.likProbit_EP_multi
if (C.shape[1] == 0 or not self.useFixed): t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
elif (self.eDist == 'logistic'):
likFunc_numer_single = self.likLogistic_EP_single_new
likFunc_numer_multi = self.likLogistic_EP_multi_new
if (C.shape[1] == 0 or not self.useFixed): t = self.computeT(K, self.h2Scale-sig2g)
else: raise ValueError('unknown eDist')
likHyp_multi = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_multi, self.likFunc_EP_asc_multi]
likHyp_single = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_single, self.likFunc_EP_asc_single]
likFuncMulti = self.likFunc_EP_both_multi
likFuncSingle = self.likFunc_EP_both_single
#initialize Sigma and mu, the parameters of the Gaussian posterior approximation
Sigma = K.copy()
mu = m.copy()
#marginal likelihood for ttau = tnu = zeros(n,1); equals n*log(2) for likCum*
nlZ0 = -np.sum(likFuncMulti(y, mu, np.diag(K), likHyp_multi))
ttau, tnu = np.zeros(y.shape[0]), np.zeros(y.shape[0])
nlZ_old, sweep = np.inf, 0
nlZ = nlZ0
while ((np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep) or sweep<min_sweep):
nlZ_old = nlZ
sweep+=1
if (self.eDist == 'logistic' or not useCython): ttau, tnu = self.EP_innerloop2(Sigma, y, mu, ttau, tnu, likFuncSingle, likHyp_single)
else:
ttau, tnu = ep_fast.EP_innerloop_probit_both_parallel(Sigma, y, mu, s0, sDiff, ttau, tnu, sig2e, np.zeros(y.shape[0])+t, update_freq=update_freq)
try:
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams2(K, y, ttau, tnu, m, likFuncMulti, likHyp_multi)
except:
nlZ=np.inf
print('\t', 'Cholesky failed!')
raise
break
if (sweep == max_sweep and np.abs(nlZ-nlZ_old) > tol):
nlZ = np.inf
nlZ_asc = nlZ
if (len(self.prev_nlZ) >= 2):
prev_diff = np.maximum(np.abs(self.prev_nlZ[-1]-self.prev_nlZ[-2]), 2)
bad_inds = ((np.abs(ttau)>100) | (np.abs(tnu)>100))
if (np.abs(nlZ - self.prev_nlZ[-1]) > 2*np.abs(prev_diff) and np.any(bad_inds)):
nlZ = np.inf
nlZ_asc = nlZ
if (nlZ == np.inf):
self.old_ttau
tol=1e-2
ttau, tnu = self.old_ttau, self.old_tnu
Sigma = self.old_Sigma
mu = self.old_mu
nlZ_old, sweep = np.inf, 0
nlZ = np.inf
nlZ_arr = []
max_sweep=40
while (sweep<min_sweep or (np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep)):
nlZ_old = nlZ
sweep+=1
if (self.eDist == 'logistic' or not useCython): ttau, tnu = self.EP_innerloop2(Sigma, y, mu, ttau, tnu, likFuncSingle, likHyp_single)
else:
ttau, tnu = ep_fast.EP_innerloop_probit_both_parallel(Sigma, y, mu, s0, sDiff, ttau, tnu, sig2e, np.zeros(y.shape[0])+t, update_freq=update_freq)
try:
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams2(K, y, ttau, tnu, m, likFuncMulti, likHyp_multi)
except:
nlZ = np.inf
break
nlZ_arr.append(nlZ)
nlZ_arr = np.array(nlZ_arr)
if (sweep == max_sweep and np.abs(nlZ-nlZ_old) > tol):
if (np.abs(nlZ-nlZ_old) < 3):
if (np.all(nlZ_arr[5:] < self.old_nlZ)): nlZ = np.max(nlZ_arr[5:])
elif (np.all(nlZ_arr[5:] > self.old_nlZ)): nlZ = np.min(nlZ_arr[5:])
else:
nlZ = np.inf
prev_diff = np.maximum(np.abs(self.prev_nlZ[-1]-self.prev_nlZ[-2]), 2)
bad_inds = ((np.abs(ttau)>100) | (np.abs(tnu)>100))
try:
if (nlZ < np.inf and np.max(np.abs(nlZ_arr[5:] - self.prev_nlZ[-1])) > 2*np.abs(prev_diff) and np.any(bad_inds)):
nlZ = np.inf
except:
pass
nlZ_asc = nlZ
if (nlZ < np.inf):
self.old_ttau, self.old_tnu, self.old_Sigma, self.old_mu, self.old_nlZ = ttau, tnu, Sigma, mu, nlZ
self.prev_nlZ.append(nlZ)
self.mu = mu
nlZ = nlZ_asc
if (nlZ < 0): nlZ = np.inf
return nlZ
def likLogistic_EP_multi(self, y, mu, s2, hyp=None):
lZc = self.likErf_EP(np.outer(y, np.ones(5)), np.outer(mu, self.logistic_lam), np.outer(s2, self.logistic_lam2), compDerivs=False)
lZ = self.log_expA_x_multi(lZc, self.logistic_c) #A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
val = np.abs(mu) - 196/200*s2-4 #empirically determined bound at val==0
lam = 1.0 / (1.0+np.exp(-10*val)) #interpolation weights
lZtail = np.minimum(s2/2.0-np.abs(mu), -0.1) #apply the same to p(y|f) = 1 - p(-y|f)
muy = mu*y
id = muy>0; lZtail[id] = np.log(1-np.exp(lZtail[id])) #label and mean agree
lZ = (1-lam)*lZ + lam*lZtail #interpolate between scale mixture and tail approximation
return lZ
#computes y = log( exp(A)*x ) in a numerically safe way by subtracting the
# maximal value in each row to avoid cancelation after taking the exp
def log_expA_x_multi(self, A, x):
maxA = np.max(A, axis=1) #number of columns, max over columns
y = np.log(np.exp(A - maxA[:, np.newaxis]).dot(x)) + maxA #exp(A) = exp(A-max(A))*exp(max(A))
return y
#computes y = log( exp(A)*x ) in a numerically safe way by subtracting the
# maximal value in each row to avoid cancelation after taking the exp
def log_expA_x_single(self, A, x):
maxA = np.max(A) #number of columns, max over columns
y = np.log(np.exp(A-maxA).dot(x)) + maxA #exp(A) = exp(A-max(A))*exp(max(A))
return y
# computes y = ( (exp(A).*B)*z ) ./ ( exp(A)*x ) in a numerically safe way.
#The function is not general in the sense that it yields correct values for
#all types of inputs. We assume that the values are close together.
def expABz_expAx_single(self, A,x,B,z):
maxA = np.max(A) #number of columns, max over columns
expA = np.exp(A-maxA)
y = np.dot(expA*B, z) / np.dot(expA, x)
return y
def evalLL(self, hyp, method):
if (method == 'aep'): return self.evalLL_AEP(hyp)
elif (method == 'aep_parallel'): return self.evalLL_AEP(hyp, update_freq=10000000000)
elif (method == 'ep'): return self.evalLL_EP(hyp)
else: raise ValueError('unrecognized method: %s. Valid methods are reml, pcgc, apl, aep, aep_parallel or ep'%(method))
def reml(self, is_binary):
K = self.kernel.getTrainKernel(np.array([0]))
logdetXX = 0
#eigendecompose
s,U = la.eigh(K)
s[s<0]=0
ind = np.argsort(s)[::-1]
U = U[:, ind]
s = s[ind]
#Prepare required matrices
if is_binary: y = (self.y>0).astype(np.int)
else: y = self.y_cont
Uy = U.T.dot(y).flatten()
covars = np.ones((y.shape[0], 1))
UX = U.T.dot(covars)
if (U.shape[1] < U.shape[0]):
UUX = covars - U.dot(UX)
UUy = y - U.dot(Uy)
UUXUUX = UUX.T.dot(UUX)
UUXUUy = UUX.T.dot(UUy)
UUyUUy = UUy.T.dot(UUy)
else: UUXUUX, UUXUUy, UUyUUy = None, None, None
n = U.shape[0]
ldeltaopt_glob = optimize.minimize_scalar(self.negLLevalLong, bounds=(-5, 5), method='Bounded', args=(s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, n)).x
ll, sig2g, beta, r2 = self.negLLevalLong(ldeltaopt_glob, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, n, returnAllParams=True)
sig2e = np.exp(ldeltaopt_glob) * sig2g
return sig2g/(sig2g+sig2e)
def negLLevalLong(self, logdelta, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, returnAllParams=False):
Sd = s + np.exp(logdelta)
UyS = Uy / Sd
yKy = UyS.T.dot(Uy)
logdetK = np.log(Sd).sum()
null_ll, sigma2, beta, r2 = self.lleval(Uy, UX, Sd, yKy, logdetK, logdetXX, logdelta, UUXUUX, UUXUUy, UUyUUy, numIndividuals)
if returnAllParams: return null_ll, sigma2, beta, r2
else: return -null_ll
def lleval(self, Uy, UX, Sd, yKy, logdetK, logdetXX, logdelta, UUXUUX, UUXUUy, UUyUUy, numIndividuals):
N = numIndividuals
D = UX.shape[1]
UXS = UX / np.lib.stride_tricks.as_strided(Sd, (Sd.size, D), (Sd.itemsize,0))
XKy = UXS.T.dot(Uy)
XKX = UXS.T.dot(UX)
if (Sd.shape[0] < numIndividuals):
delta = np.exp(logdelta)
denom = delta
XKX += UUXUUX / denom
XKy += UUXUUy / denom
yKy += UUyUUy / denom
logdetK += (numIndividuals-Sd.shape[0]) * logdelta
[SxKx,UxKx]= la.eigh(XKX)
i_pos = SxKx>1E-10
beta = np.dot(UxKx[:,i_pos], (np.dot(UxKx[:,i_pos].T, XKy) / SxKx[i_pos]))
r2 = yKy-XKy.dot(beta)
reml = True
if reml:
logdetXKX = np.log(SxKx).sum()
sigma2 = (r2 / (N - D))
ll = -0.5 * (logdetK + (N-D)*np.log(2.0*np.pi*sigma2) + (N-D) + logdetXKX - logdetXX)
else:
sigma2 = r2 / N
ll = -0.5 * (logdetK + N*np.log(2.0*np.pi*sigma2) + N)
return ll, sigma2, beta, r2
def solveChol(self, L, B, overwrite_b=True):
cholSolve1 = la.solve_triangular(L, B, trans=1, check_finite=False, overwrite_b=overwrite_b)
cholSolve2 = la.solve_triangular(L, cholSolve1, check_finite=False, overwrite_b=True)
return cholSolve2
def evalLL_EP(self, hyp):
tol = 1e-4; max_sweep = 20; min_sweep = 2 #tolerance to stop EP iterations
s0 = self.prev / (1-self.prev)
s1 = 1.0
useCython = False
try: hyp[0]
except: hyp=np.array([hyp])
if (self.prev < 0.5):
logS0 = np.log(s0)
logSdiff = np.log(s1-s0)
else:
logS0 = -np.inf
logSdiff = 0.0
#Generate problem settings
K = self.kernel.getTrainKernel(hyp)
m = np.zeros(self.y.shape[0])
if self.useFixed: m += self.covars.dot(self.fixedEffects)
controls = (self.y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = (1-self.prev)*np.mean(diagK[controls]) + self.prev*np.mean(diagK[cases])
if (sig2g > 1.0): raise ValueError('sig2g larger than 1.0 found')
sig2e = 1.0 - sig2g
t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
m -= t
if useCython:
EP_func = EP_cython.EPInnerLoop_cython
else:
EP_func = self.EPInnerLoop
llFunc = self.llFuncStandard
#A note on naming (taken directly from the GPML documentation):
#variables are given short but descriptive names in
#accordance with <NAME> "GPs for Machine Learning" (2006): mu
#and s2 are mean and variance, nu and tau are natural parameters. A leading t
#means tilde, a subscript _ni means "not i" (for cavity parameters), or _n
#for a vector of cavity parameters. N(f|mu,Sigma) is the posterior.
#initialize Sigma and mu, the parameters of the Gaussian posterior approximation
Sigma = K.copy()
mu = m.copy()
#marginal likelihood for ttau = tnu = zeros(n,1); equals n*log(2) for likCum*
nlZ0 = -np.sum(llFunc(self.y, mu, np.diag(K), sig2e))
ttau, tnu = np.zeros(self.y.shape[0]), np.zeros(self.y.shape[0])
nlZ_old, sweep = np.inf, 0
nlZ = nlZ0
while ((np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep) or sweep<min_sweep):
nlZ_old = nlZ
sweep+=1
Sigma, mu, ttau, tnu = EP_func(Sigma, self.y, mu, ttau, tnu, sig2e)
#recompute since repeated rank-one updates can destroy numerical precision
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams(K, self.y, ttau, tnu, sig2e, m, llFunc)
self.mu = mu
return nlZ
def llFuncStandard(self, y, mu, s2, sig2e):
z = mu / np.sqrt(sig2e+s2) * y
nlZ = stats.norm(0,1).logcdf(z)
return nlZ
def EP_innerloop2(self, Sigma, y, mu, ttau, tnu, likFuncSingle, likHyp):
randpermN = np.random.permutation(range(y.shape[0]))
normPDF = stats.norm(0,1)
for i in randpermN: #iterate EP updates (in random order) over examples
#first find the cavity distribution params tau_ni and nu_ni
if (ttau[i] > 1.0/Sigma[i,i]):
raise ValueError('infeasible ttau[i] found!!!')
tau_ni = 1.0/Sigma[i,i] - ttau[i] #Equation 3.56 rhs (and 3.66) from GP book
nu_ni = (mu[i]/Sigma[i,i] - tnu[i]) #Equation 3.56 lhs (and 3.66) from GP book
mu_ni = nu_ni / tau_ni
#compute the desired derivatives of the individual log partition function
try:
t = likHyp[4]
likHyp[4] = t[i]
lZ, dlZ, d2lZ = likFuncSingle(y[i], mu_ni, 1.0/tau_ni, likHyp)
likHyp[4] = t
except:
lZ, dlZ, d2lZ = likFuncSingle(y[i], mu_ni, 1.0/tau_ni, likHyp)
ttau_old, tnu_old = ttau[i], tnu[i] #find the new tilde params, keep old
ttau[i] = -d2lZ / (1+d2lZ/tau_ni)
ttau[i] = np.maximum(ttau[i], 0) #enforce positivity i.e. lower bound ttau by zero
tnu[i] = (dlZ - mu_ni*d2lZ ) / (1+d2lZ/tau_ni)
if (ttau[i] == 0): tnu[i]=0
dtt = ttau[i] - ttau_old
dtn = tnu[i] - tnu_old #rank-1 update Sigma
si = Sigma[:,i]
ci = dtt / (1+dtt*si[i])
mu -= (ci* (mu[i]+si[i]*dtn) - dtn) * si #Equation 3.53 from GP book
Sigma -= np.outer(ci*si, si) #Equation 3.70 from GP book (#takes 70% of total time)
return ttau, tnu
def EPInnerLoop(self, Sigma, y, mu, ttau, tnu, sig2e):
randpermN = np.random.permutation(range(y.shape[0]))
normPDF = stats.norm(0,1)
for i in randpermN: #iterate EP updates (in random order) over examples
#first find the cavity distribution params tau_ni and mu_ni
tau_ni = 1.0/Sigma[i,i] - ttau[i] #Equation 3.56 rhs (and 3.66) from GP book
mu_ni = (mu[i]/Sigma[i,i] - tnu[i]) / tau_ni #Equation 3.56 lhs (and 3.66) from GP book
#compute the desired derivatives of the individual log partition function
s2 = 1.0/tau_ni
sqrtS2 = np.sqrt(s2 + sig2e)
z = mu_ni * y[i] / sqrtS2 #Equation 3.82 from GP book
ttau_old, tnu_old = ttau[i], tnu[i] #find the new tilde params, keep old
Z = normPDF.logcdf(z)
n_p = np.exp(normPDF.logpdf(z) - Z) #Equation 3.82 from GP book
#matlab computation...
dlZ = y[i] * n_p / sqrtS2 #1st derivative of log(Z) wrt mean
d2lZ = -n_p*(z+n_p)/(sig2e+s2) #2nd derivative of log(Z) wrt mean
ttau_matlab = -d2lZ / (1+d2lZ/tau_ni)
tnu_matlab = (dlZ - mu_ni*d2lZ ) / (1+d2lZ/tau_ni)
#my new computation...
meanQx = mu_ni + s2*n_p * y[i] / sqrtS2 #This is mu_hat from Equations 3.57-3.59 (specifically this is Equation 3.85)
meanQx2 = dlZ/tau_ni + mu_ni
assert np.isclose(meanQx, meanQx2)
varQx = s2 - s2**2 * n_p / (sig2e+s2) * (z + n_p) #This is sigma^2_hat from Equations 3.57-3.59 (specifically this is equation 3.87)
#varQx2 = d2lZ/tau_ni**2 + 2*mu_ni*meanQx - mu_ni**2 + 1.0/tau_ni + dlZ**2/tau_ni**2 - meanQx2**2
varQx2 = (d2lZ+dlZ**2)/tau_ni**2 + 2*mu_ni*meanQx - mu_ni**2 + 1.0/tau_ni - meanQx2**2
assert np.isclose(varQx, varQx2)
ttau[i] = 1.0/varQx - tau_ni #Equation 3.59 (and 3.66)
tnu[i] = meanQx/varQx - mu_ni*tau_ni #Equation 3.59 (and 3.66)
ttau[i] = np.maximum(ttau[i], 0) #enforce positivity i.e. lower bound ttau by zero
dtt = ttau[i] - ttau_old
dtn = tnu[i] - tnu_old #rank-1 update Sigma
si = Sigma[:,i]
ci = dtt / (1+dtt*si[i])
mu -= (ci* (mu[i]+si[i]*dtn) - dtn) * si #Equation 3.53 from GP book
Sigma -= np.outer(ci*si, si) #Equation 3.70 from GP book (#takes 70% of total time)
return Sigma, mu, ttau, tnu
def epComputeParams2(self, K, y, ttau, tnu, m, likFuncMulti, likHyp):
n = y.shape[0]
sW = np.sqrt(ttau) #compute Sigma and mu
L = la.cholesky(np.eye(n) + np.outer(sW, sW) * K, overwrite_a=True, check_finite=False)
#L.T*L=B=eye(n)+sW*K*sW
V = la.solve_triangular(L, K*np.tile(sW, (n, 1)).T, trans=1, check_finite=False, overwrite_b=True)
Sigma = K - V.T.dot(V)
alpha = tnu-sW * self.solveChol(L, sW*(K.dot(tnu)+m))
mu = K.dot(alpha) + m
v = np.diag(Sigma)
tau_n = 1.0/np.diag(Sigma) - ttau #compute the log marginal likelihood
nu_n = mu/np.diag(Sigma) - tnu #vectors of cavity parameters
lZ = likFuncMulti(y, nu_n/tau_n, 1.0/tau_n, likHyp)
p = tnu - m*ttau #auxiliary vectors
q = nu_n - m*tau_n #auxiliary vectors
nlZ = (np.sum(np.log(np.diag(L))) - lZ.sum() - (p.T.dot(Sigma)).dot(p/2.0) + (v.T.dot(p**2))/2.0
- q.T.dot((ttau/tau_n*q - 2*p) * v)/2.0 - np.sum(np.log(1+ttau/tau_n))/2.0)
return (Sigma, mu, L, alpha, nlZ)
def epComputeParams(self, K, y, ttau, tnu, sig2e, m, llFunc):
n = y.shape[0]
sW = np.sqrt(ttau) #compute Sigma and mu
L = la.cholesky(np.eye(n) + np.outer(sW, sW) * K, overwrite_a=True, check_finite=False)
#L.T*L=B=eye(n)+sW*K*sW
V = la.solve_triangular(L, K*np.tile(sW, (n, 1)).T, trans=1, check_finite=False, overwrite_b=True)
Sigma = K - V.T.dot(V)
alpha = tnu-sW * self.solveChol(L, sW*(K.dot(tnu)+m))
mu = K.dot(alpha) + m
v = np.diag(Sigma)
tau_n = 1.0/np.diag(Sigma) - ttau #compute the log marginal likelihood
nu_n = mu/np.diag(Sigma) - tnu #vectors of cavity parameters
mu_temp = nu_n/tau_n
s2 = 1.0/tau_n
lZ = llFunc(y, mu_temp, s2, sig2e)
p = tnu - m*ttau #auxiliary vectors
q = nu_n - m*tau_n #auxiliary vectors
nlZ = (np.sum(np.log(np.diag(L))) - np.sum(lZ) - (p.T.dot(Sigma)).dot(p/2.0) + (v.T.dot(p**2))/2.0
- q.T.dot((ttau/tau_n*q - 2*p) * v)/2.0 - np.sum(np.log(1+ttau/tau_n))/2.0)
return (Sigma, mu, L, alpha, nlZ)
def solveChol(self, L, B, overwrite_b=True):
cholSolve1 = la.solve_triangular(L, B, trans=1, check_finite=False, overwrite_b=overwrite_b)
cholSolve2 = la.solve_triangular(L, cholSolve1, check_finite=False, overwrite_b=True)
return cholSolve2
def pairwise_ml(self):
K = self.kernel.getTrainKernel(np.array([0]))
yBinary = (self.y>0).astype(np.int)
t = stats.norm(0,1).isf(self.prev)
#estimate initial fixed effects
C = self.covars
if C.shape[1] > 0 and self.useFixed:
logreg = sklearn.linear_model.LogisticRegression(penalty='l2', C=1000, fit_intercept=True)
s0 = self.prev / (1-self.prev) * (1-np.mean(yBinary>0)) / np.mean(yBinary>0)
logreg.fit(C, yBinary)
Pi = logreg.predict_proba(C)[:,1]
Ki = Pi * s0 / (1 - Pi*(1-s0))
t = stats.norm(0,1).isf(Ki)
phit = stats.norm(0,1).pdf(t)
ysum_temp = np.tile(yBinary, (yBinary.shape[0], 1))
sumY = ysum_temp + ysum_temp.T
#sumY_flat = sumY[np.triu_indices(K.shape[0], 1)]
Y0 = (sumY==0)
Y1 = (sumY==1)
Y2 = (sumY==2)
P = np.mean(yBinary)
denom = (self.prev**2 * (1-self.prev)**2)
coef0 = phit**2 * P * (1-P)**2 * (2*self.prev-P) / denom
coef1 = -(phit**2 * 2 * P * (1-P) * (P**2 + self.prev - 2*self.prev*P)) / denom
coef2 = phit**2 * (1-P) * P**2 * (1-2*self.prev+P) / denom
intercept = Y0*(1-P)**2 + Y1*2*P*(1-P) + Y2*P**2
coef = Y0*coef0 + Y1*coef1 + Y2*coef2
coefG = coef*K
np.fill_diagonal(coefG, 0) #to ensure log(intercept + coefG*h2)=0 in diagonal
np.fill_diagonal(intercept, 1) #to ensure log(intercept + coefG*h2)=0 in diagonal
def pw_nll(h2):
ll = np.sum(np.log(intercept + coefG*h2))
if np.isnan(ll): ll=-np.inf
return -ll
optObj = optimize.minimize_scalar(pw_nll, bounds=(0, 1), method='bounded')
best_h2 = optObj.x
return best_h2, optObj.fun
def pcgc(self, rbf_hyp=None):
t = stats.norm(0,1).isf(self.prev)
if rbf_hyp is None:
K = self.kernel.getTrainKernel( | np.array([0]) | numpy.array |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def with_tvm(lam, *args):
""" Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
ctx = tvm.cpu(0)
pls = [] # placeholders
vals_nd = [] # initial values
for i,arg in enumerate(args):
pls.append(tvm.placeholder(arg.shape, name='pl'+str(i)))
vals_nd.append(tvm.nd.array(arg, ctx))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), ctx)
s = tvm.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd+[out_nd]))
return out_nd.asnumpy()
def verify_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a,
np.transpose(b) if transp_b else b)
c2 = with_tvm(lambda A,B: topi.matmul(A,B,transp_a,transp_b), a,b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_matmul():
verify_matmul((1,1),(1,1),False,False)
verify_matmul((1,1),(1,1),True,True)
verify_matmul((2,2),(2,2),False,False)
verify_matmul((2,2),(2,2),True,True)
verify_matmul((2,3),(3,5),False,False)
verify_matmul((5,3),(3,2),False,False)
verify_matmul((3,5),(3,2),True,False)
verify_matmul((3,5),(2,3),True,True)
def verify_tensordot(sa, sb, axes):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = | np.tensordot(a, b, axes) | numpy.tensordot |
import numpy as np
import pytest
from scipy.interpolate import RegularGridInterpolator
from RAiDER.interpolate import interpolate, interpolate_along_axis
from RAiDER.interpolator import RegularGridInterpolator as Interpolator, interpolateDEM
from RAiDER.interpolator import fillna3D, interp_along_axis, interpVector
@pytest.fixture
def nanArr():
array = np.random.randn(2, 2, 3)
array[0, 0, 0] = np.nan
array[0, 0, 1] = np.nan
array[0, 0, 2] = np.nan
array[1, 0, 0] = np.nan
array[0, 1, 1] = np.nan
array[1, 1, 2] = np.nan
true_array = array.copy()
true_array[0, 0, 0] = np.nan
true_array[0, 0, 1] = np.nan
true_array[0, 0, 2] = np.nan
true_array[1, 0, 0] = true_array[1, 0, 1]
true_array[0, 1, 1] = (true_array[0, 1, 0] + true_array[0, 1, 2]) / 2
true_array[1, 1, 2] = true_array[1, 1, 1]
return array, true_array
def test_interpVector():
assert np.allclose(
interpVector(
np.array([
0, 1, 2, 3, 4, 5,
0, 0.84147098, 0.90929743, 0.14112001, -0.7568025, -0.95892427,
0.5, 1.5, 2.5, 3.5, 4.5
]),
6
),
np.array([0.42073549, 0.87538421, 0.52520872, -0.30784124, -0.85786338])
)
def test_fillna3D(nanArr):
arr, tarr = nanArr
assert np.allclose(fillna3D(arr), tarr, equal_nan=True)
def test_interp_along_axis():
z2 = np.tile(np.arange(100)[..., np.newaxis], (5, 1, 5)).swapaxes(1, 2)
zvals = 0.3 * z2 - 12.75
newz = np.tile(
np.array([1.5, 9.9, 15, 23.278, 39.99, 50.1])[..., np.newaxis],
(5, 1, 5)
).swapaxes(1, 2)
corz = 0.3 * newz - 12.75
assert np.allclose(interp_along_axis(z2, newz, zvals, axis=2), corz)
def shuffle_along_axis(a, axis):
idx = np.random.rand(*a.shape).argsort(axis=axis)
return np.take_along_axis(a, idx, axis=axis)
def test_interpolate_along_axis():
# Rejects scalar values
with pytest.raises(TypeError):
interpolate_along_axis(np.array(0), np.array(0), np.array(0))
# Rejects mismatched number of dimensions
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(1), np.zeros((1, 1)))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros((1, 1)), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((1, 1)), np.zeros(1), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros((1, 1)), np.zeros((1, 1)))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((1, 1)), np.zeros((1, 1)), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((1, 1)), np.zeros(1), np.zeros((1, 1)))
# Rejects mismatched shape for points and values
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(2), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((9, 2)), np.zeros((9, 3)), np.zeros(1))
# Rejects bad axis
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(1), np.zeros(1), axis=1)
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(1), np.zeros(1), axis=-2)
# Rejects bad interp_points shape
with pytest.raises(TypeError):
interpolate_along_axis(
np.zeros((2, 2)), np.zeros((2, 2)), np.zeros((3, 2))
)
with pytest.raises(TypeError):
interpolate_along_axis(
np.zeros((2, 2)), np.zeros((2, 2)), np.zeros((2, 3)),
axis=0, max_threads=1
)
def test_interp_along_axis_1d():
def f(x):
return 2 * x
xs = np.array([1, 2, 3, 4])
ys = f(xs)
points = np.array([1.5, 3.1])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=0),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=0, max_threads=1),
2 * points
)
def test_interp_along_axis_1d_out_of_bounds():
def f(x):
return 2 * x
xs = np.array([1, 2, 3, 4])
ys = f(xs)
points = np.array([0, 5])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=0),
np.array([np.nan, np.nan]),
equal_nan=True
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=0,
max_threads=1, fill_value=np.nan),
np.array([np.nan, np.nan]),
equal_nan=True
)
def test_interp_along_axis_2d():
def f(x):
return 2 * x
xs = np.array([
[1, 2, 3, 4],
[3, 4, 5, 6]
])
ys = f(xs)
points = np.array([
[1.5, 3.1, 3.6],
[3.5, 5.1, 5.2]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=1),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=1),
2 * points
)
def test_interp_along_axis_2d_threads_edge_case():
def f(x):
return 2 * x
# Max of 4 threads but 5 rows to interpolate over. Each thread will get 2
# rows which means only 3 threads will be used
max_threads = 4
xs = np.array([
[1, 2, 3, 4],
[3, 4, 5, 6],
[7, 8, 9, 10],
[11, 12, 13, 14],
[15, 16, 17, 18]
])
ys = f(xs)
points = np.array([
[1.5, 3.1, 3.6],
[3.5, 5.1, 5.2],
[7.5, 9.1, 9.9],
[11.1, 12.2, 13.3],
[15.1, 16.2, 17.3]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=1),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=1, max_threads=max_threads),
2 * points
)
def test_interp_along_axis_3d():
def f(x):
return 2 * x
xs = np.array([
[[1, 2, 3, 4],
[3, 4, 5, 6]],
[[10, 11, 12, 13],
[21, 22, 23, 24]]
])
ys = f(xs)
points = np.array([
[[1.5, 3.1],
[3.5, 5.1]],
[[10.3, 12.9],
[22.6, 22.1]]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=2),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=2),
2 * points
)
def test_interp_along_axis_3d_axis1():
def f(x):
return 2 * x
xs = np.array([
[[1, 2],
[3, 4]],
[[10, 11],
[21, 22]]
])
ys = f(xs)
points = np.array([
[[1.5, 3.1],
[2.5, 2.1]],
[[10.3, 12.9],
[15, 17]]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=1),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=1),
2 * points
)
@pytest.mark.parametrize("num_points", (7, 200, 500))
def test_interp_along_axis_3d_large(num_points):
def f(x):
return 2 * x
# To scale values along axis 0 of a 3 dimensional array
scale = np.arange(1, 101).reshape((100, 1, 1))
axis1 = np.arange(100)
axis2 = np.repeat(np.array([axis1]), 100, axis=0)
xs = np.repeat(np.array([axis2]), 100, axis=0) * scale
ys = f(xs)
points = np.array([np.linspace(0, 99, num=num_points)]).repeat(100, axis=0)
points = np.repeat(np.array([points]), 100, axis=0) * scale
ans = 2 * points
assert np.allclose(interp_along_axis(xs, points, ys, axis=2), ans)
assert np.allclose(interpolate_along_axis(xs, ys, points, axis=2), ans)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=2, assume_sorted=True), ans
)
def test_interp_along_axis_3d_large_unsorted():
def f(x):
return 2 * x
# To scale values along axis 0 of a 3 dimensional array
scale = np.arange(1, 101).reshape((100, 1, 1))
axis1 = np.arange(100)
axis2 = np.repeat(np.array([axis1]), 100, axis=0)
xs = np.repeat(np.array([axis2]), 100, axis=0) * scale
ys = f(xs)
points = np.array([np.linspace(0, 99, num=300)]).repeat(100, axis=0)
points = np.repeat(np.array([points]), 100, axis=0) * scale
points = shuffle_along_axis(points, 2)
ans = 2 * points
assert np.allclose(interp_along_axis(xs, points, ys, axis=2), ans)
assert np.allclose(interpolate_along_axis(xs, ys, points, axis=2), ans)
def test_grid_dim_mismatch():
with pytest.raises(TypeError):
interpolate(
points=(np.zeros((10,)), np.zeros((5,))),
values=np.zeros((1,)),
interp_points=np.zeros((1,))
)
def test_basic():
ans = interpolate(
points=(np.array([0, 1]),),
values=np.array([0, 1]),
interp_points=np.array([[0.5]]),
max_threads=1,
assume_sorted=True
)
assert ans == np.array([0.5])
def test_1d_out_of_bounds():
ans = interpolate(
points=(np.array([0, 1]),),
values=np.array([0, 1]),
interp_points=np.array([[100]]),
max_threads=1,
assume_sorted=True
)
# Output is extrapolated
assert ans == np.array([100])
def test_1d_fill_value():
ans = interpolate(
points=(np.array([0, 1]),),
values=np.array([0, 1]),
interp_points=np.array([[100]]),
max_threads=1,
fill_value=np.nan,
assume_sorted=True
)
assert np.all(np.isnan(ans))
def test_small():
ans = interpolate(
points=(np.array([1, 2, 3, 4, 5, 6]),),
values=np.array([10, 9, 30, 10, 6, 1]),
interp_points=np.array([1.25, 2.9, 3.01, 5.7]).reshape(-1, 1)
)
assert ans.shape == (4,)
assert np.allclose(ans, np.array([9.75, 27.9, 29.8, 2.5]), atol=1e-15)
def test_small_not_sorted():
ans = interpolate(
points=(np.array([1, 2, 3, 4, 5, 6]),),
values=np.array([10, 9, 30, 10, 6, 1]),
interp_points=np.array([2.9, 1.25, 5.7, 3.01]).reshape(-1, 1),
)
assert ans.shape == (4,)
assert np.allclose(ans, np.array([27.9, 9.75, 2.5, 29.8]), atol=1e-15)
def test_exact_points():
ans = interpolate(
points=(np.array([1, 2, 3, 4, 5, 6]),),
values=np.array([10, 9, 30, 10, 6, 1]),
interp_points=np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1)
)
assert ans.shape == (6,)
assert np.allclose(ans, np.array([10, 9, 30, 10, 6, 1]), atol=1e-15)
def test_2d_basic():
xs = np.array([0, 1])
ys = np.array([0, 1])
values = (lambda x, y: x + y)(
*np.meshgrid(xs, ys, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=np.array([[0.5, 0.5]])
)
assert ans == np.array([1])
def test_2d_out_of_bounds():
xs = np.array([0, 1])
ys = np.array([0, 1])
values = (lambda x, y: x + y)(
*np.meshgrid(xs, ys, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=np.array([[100, 100]])
)
# Output is extrapolated
assert ans == np.array([200])
def test_2d_fill_value():
xs = np.array([0, 1])
ys = np.array([0, 1])
values = (lambda x, y: x + y)(
*np.meshgrid(xs, ys, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=np.array([[100, 100]]),
fill_value=np.nan
)
assert np.all(np.isnan(ans))
def test_2d_square_small():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_2d_rectangle_small():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(0, 2000, 200)
ys = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_2d_rectangle_small_2():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 2000, 200)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_2d_square_large():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(-10_000, 10_000, num=1_000)
ys = np.linspace(0, 20_000, num=1_000)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
num_points = 2_000_000
points = np.stack((
np.linspace(10, 990, num_points),
np.linspace(10, 890, num_points)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_3d_basic():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
values = (lambda x, y, z: x + y + z)(
*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=np.array([[0.5, 0.5, 0.5]]),
assume_sorted=True
)
assert ans == np.array([1.5])
def test_3d_out_of_bounds():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
values = (lambda x, y, z: x + y + z)(
*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=np.array([[100, 100, 100]]),
assume_sorted=True
)
# Output is extrapolated
assert ans == np.array([300])
def test_3d_fill_value():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
values = (lambda x, y, z: x + y + z)(
*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=np.array([[100, 100, 100]]),
fill_value=np.nan,
assume_sorted=True
)
assert np.all(np.isnan(ans))
def test_3d_cube_small():
def f(x, y, z):
return x ** 2 + 3 * y - z
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 1000, 100)
zs = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5),
np.linspace(10, 780, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys, zs), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, 1e-15)
def test_3d_cube_small_not_sorted():
def f(x, y, z):
return x ** 2 + 3 * y - z
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 1000, 100)
zs = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True))
points = np.stack((
np.random.uniform(10, 990, 10),
np.random.uniform(10, 890, 10),
np.random.uniform(10, 780, 10)
), axis=-1)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=points,
)
rgi = RegularGridInterpolator((xs, ys, zs), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, 1e-15)
def test_3d_prism_small():
def f(x, y, z):
return x ** 2 + 3 * y - z
xs = np.linspace(0, 2000, 200)
ys = np.linspace(0, 1000, 100)
zs = np.linspace(0, 1000, 50)
values = f(*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5),
np.linspace(10, 780, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys, zs), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, 1e-15)
def test_3d_prism_small_2():
def f(x, y, z):
return x ** 2 + 3 * y - z
xs = np.linspace(0, 2000, 100)
ys = np.linspace(0, 1000, 200)
zs = np.linspace(0, 1000, 50)
values = f(*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5),
np.linspace(10, 780, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys, zs), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, 1e-15)
def test_3d_prism_small_3():
def f(x, y, z):
return x ** 2 + 3 * y - z
xs = np.linspace(0, 2000, 50)
ys = np.linspace(0, 1000, 200)
zs = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5),
np.linspace(10, 780, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys, zs), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, 1e-15)
def test_3d_cube_large():
def f(x, y, z):
return x ** 2 + 3 * y - z
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 1000, 100)
zs = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True))
num_points = 2_000_000
points = np.stack((
np.linspace(10, 990, num_points),
np.linspace(10, 890, num_points),
np.linspace(10, 780, num_points)
), axis=-1)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys, zs), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, 1e-15)
def test_4d_basic():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
ws = np.array([0, 1])
values = (lambda x, y, z, w: x + y + z + w)(
*np.meshgrid(xs, ys, zs, ws, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs, ws),
values=values,
interp_points=np.array([[0.5, 0.5, 0.5, 0.5]])
)
assert ans == np.array([2])
def test_4d_out_of_bounds():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
ws = np.array([0, 1])
values = (lambda x, y, z, w: x + y + z + w)(
*np.meshgrid(xs, ys, zs, ws, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs, ws),
values=values,
interp_points=np.array([[100, 100, 100, 100]])
)
# Output is extrapolated
assert ans == np.array([400])
def test_4d_fill_value():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
ws = np.array([0, 1])
values = (lambda x, y, z, w: x + y + z + w)(
*np.meshgrid(xs, ys, zs, ws, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs, ws),
values=values,
interp_points=np.array([[100, 100, 100, 100]]),
fill_value=np.nan
)
assert np.all(np.isnan(ans))
def test_4d_cube_small():
def f(x, y, z, w):
return x ** 2 + 3 * y - z * w
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 1000, 100)
zs = np.linspace(0, 1000, 100)
ws = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, zs, ws, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5),
| np.linspace(10, 780, 5) | numpy.linspace |
"""This is a Python package for dual-Doppler uncertainty assessment.
It includes several class which are used to perform the assessment.
"""
from math import cos, sin, pi, radians
import numpy as np
import xarray as xr
def wind_vector_to_los(u,v,w, azimuth, elevation, ignore_elevation = True):
"""Projects wind vector to the beam line-of-sight (LOS).
Parameters
----------
u : ndarray
nD array of `float` or `int` corresponding to u component of wind.
In cf convention v is eastward_wind.
Units m/s.
v : ndarray
nD array `float` or `int` corresponding to v component of wind.
In cf convention v is northward_wind.
Units m/s.
w : ndarray
nD array `float` or `int` corresponding to w component of wind.
In cf convention w is upward_air_velocity.
Units m/s.
azimuth : ndarray
nD array `float` or `int` corresponding to LOS direction in azimuth.
Units degree.
elevation : ndarray
nD array `float` or `int` corresponding to LOS direction in elevation.
Units degree.
Returns
-------
los : ndarray
nD array `float` or `int` corresponding to LOS component of wind.
In cf convention w is radial_velocity_of_scatterers_toward_instrument.
Units m/s.
Notes
-----
LOS or radial wind speed, :math:`{V_{radial}}`, is calculated using the
following mathematical expression:
.. math::
V_{radial} = u \sin({\\theta})\cos({\\varphi}) +
v \cos({\\theta})\cos({\\varphi}) +
w\sin({\\varphi})
where :math:`{\\theta}` and :math:`{\\varphi}` are the azimuth and
elevation angle of the beam, :math:`{u}` is the wind component toward East,
:math:`{v}` is the wind component toward North, and :math:`{w}` is the
upward air velocity. The sign of :math:`{V_{radial}}` is assumed to be
positive if wind aprroaches the instrument, otherwise it is negative.
"""
# handles both single values as well arrays
azimuth = np.radians(azimuth)
elevation = np.radians(elevation)
if ignore_elevation:
los = u * np.sin(azimuth) + v * np.cos(azimuth)
else:
los = u * np.sin(azimuth) * np.cos(elevation) + \
v * np.cos(azimuth) * np.cos(elevation) + \
w * np.sin(elevation)
return los
def generate_mesh(center, map_extent, mesh_res):
"""
Generate a horizontal mesh containing equally spaced (measurement) points.
Parameters
----------
center : ndarray
1D array containing data with `float` or `int` type corresponding to
Northing, Easting, Height agl and asl coordinates of the mesh center.
1D array data are expressed in meters.
map_extent : int
map extent in Northing (y) and Easting (x) in meters.
mesh_res : int
mesh resolution for Northing (y) and Easting (x) in meters.
Returns
-------
mesh : ndarray
nD array containing a list of mesh points.
x : ndarray
nD shaped array for Easting (x) coordinate of mesh points.
y : ndarray
nD shaped array for Northing (y) coordinate of mesh points.
Notes
-----
The generated mesh will be squared, i.e. having the same length in both dimensions.
"""
map_corners = np.array([center[:2] - int(map_extent),
center[:2] + int(map_extent)])
x, y = np.meshgrid(
np.arange(map_corners[0][0], map_corners[1][0]+ int(mesh_res), int(mesh_res)),
np.arange(map_corners[0][1], map_corners[1][1]+ int(mesh_res), int(mesh_res))
)
H_asl = np.full(x.shape, center[2])
H_agl = np.full(x.shape, center[3])
mesh = np.array([x, y, H_asl, H_agl]).T.reshape(-1, 4)
return x, y, mesh
def generate_beam_coords(lidar_pos, meas_pt_pos):
"""
Generates beam steering coordinates in spherical coordinate system.
Parameters
----------
lidar_pos : ndarray
1D array containing data with `float` or `int` type corresponding to
Northing, Easting and Height coordinates of a lidar.
Coordinates unit is meter.
meas_pt_pos : ndarray
nD array containing data with `float` or `int` type corresponding to
Northing, Easting and Height coordinates of a measurement point(s).
Coordinates unit is meter.
Returns
-------
beam_coords : ndarray
nD array containing beam steering coordinates for given measurement points.
Coordinates have following structure [azimuth, elevation, range].
Azimuth and elevation angles are given in degree.
Range unit is meter.
"""
# testing if meas_pt has single or multiple measurement points
if len(meas_pt_pos.shape) == 2:
x_array = meas_pt_pos[:, 0]
y_array = meas_pt_pos[:, 1]
z_array = meas_pt_pos[:, 2]
else:
x_array = np.array([meas_pt_pos[0]])
y_array = np.array([meas_pt_pos[1]])
z_array = np.array([meas_pt_pos[2]])
# calculating difference between lidar_pos and meas_pt_pos coordiantes
dif_xyz = np.array([lidar_pos[0] - x_array, lidar_pos[1] - y_array, lidar_pos[2] - z_array])
# distance between lidar and measurement point in space
distance_3D = np.sum(dif_xyz**2,axis=0)**(1./2)
# distance between lidar and measurement point in a horizontal plane
distance_2D = np.sum(np.abs([dif_xyz[0],dif_xyz[1]])**2,axis=0)**(1./2)
# in radians
azimuth = np.arctan2(x_array-lidar_pos[0], y_array-lidar_pos[1])
# conversion to metrological convention
azimuth = (360 + azimuth * (180 / pi)) % 360
# in radians
elevation = np.arccos(distance_2D / distance_3D)
# conversion to metrological convention
elevation = np.sign(z_array - lidar_pos[2]) * (elevation * (180 / pi))
beam_coord = np.transpose(np.array([azimuth, elevation, distance_3D]))
return beam_coord
class Atmosphere:
"""
A class containing methods and attributes related to atmosphere.
Methods
-------
add_atmosphere(atmosphere_id, model, model_parameters)
Adds description of the atmosphere to the atmosphere dictionary.
"""
def __init__(self):
self.atmosphere = {}
self.wind_field = None
self.verbos = True
def add_atmosphere(self, atmosphere_id, model, model_parameters):
"""
Adds description of the atmosphere to the atmosphere dictionary.
This description is used to calculate the lidar uncertainty.
Parameters
----------
atmosphere_id : str, required
String which identifies atmosphere instance in the dictionary.
model : str, required
This is a string describing which atmospheric model is used.
model_parameters : dict, required
This is a dictionary which contains parameters which detail
the selected atmospheric model.
Raises
------
UnsupportedModel
If the selected model is not supported by the package.
Notes
-----
Currently method 'add_atmosphere' only supports power law model of the
atmosphere. The power law model requires following inputs in a form of
Python dictionary: horizontal speed, wind direction, shear exponent and
reference height (height above ground level) for horizontal speed.
TODO
----
- Support other atmospheric models (e.g., log wind profile)
"""
if (model != 'power_law'):
raise ValueError("UnsupportedModel")
if ('wind_speed' in model_parameters
and model_parameters['wind_speed'] is not None
and model_parameters['wind_speed'] is not 0
and 'wind_from_direction' in model_parameters
and model_parameters['wind_from_direction'] is not None
and 'shear_exponent' in model_parameters
and model_parameters['shear_exponent'] is not None
and model_parameters['shear_exponent'] is not 0
and 'reference_height' in model_parameters
and model_parameters['reference_height'] is not None
and model_parameters['reference_height'] >= 0):
wind_speed = model_parameters["wind_speed"]
wind_from_direction = model_parameters["wind_from_direction"]
u = - wind_speed * sin(radians(wind_from_direction))
v = - wind_speed * cos(radians(wind_from_direction))
w = model_parameters['upward_air_velocity'] if 'upward_air_velocity' in model_parameters else 0
model_parameters.update({
'eastward_wind' : u,
'northward_wind' : v,
'upward_air_velocity' : w
})
dict_input = {atmosphere_id: {
"model" : model,
"model_parameters": model_parameters}}
self.atmosphere.update(dict_input)
if self.verbos:
print('Atmosphere \'' + atmosphere_id
+ '\' added to the atmosphere dictionary,'
+ ' which now contains '
+ str(len(self.atmosphere))
+ ' atmosphere instance(s).')
else:
print('Incorrect parameters for power law model!')
class Measurements(Atmosphere):
"""
A class containing methods and attributes related to measurements.
Methods
-------
add_atmosphere(atmosphere_id, model, model_parameters)
Adds description of the atmosphere to the atmosphere dictionary.
"""
def __init__(self):
self.measurements = {}
Atmosphere.__init__(self)
@staticmethod
def check_measurement_positions(measurement_positions):
"""
Validates the measurement position
Parameters
----------
measurement_positions : ndarray
nD array containing data with `float` or `int` type corresponding
to Northing, Easting and Height coordinates of the instrument.
nD array data are expressed in meters.
Returns
-------
True / False
See also
--------
add_measurements() : adds measurements to the measurement dictionary
"""
if(type(measurement_positions).__module__ == np.__name__):
if (len(measurement_positions.shape) == 2
and measurement_positions.shape[1] == 4): # it is 4 since due to height asl and agl
return True
else:
# print('Wrong dimensions! Must be == (n,3) where ')
# print('n == number of measurement points!')
# print('Measurement positions were not added!')
return False
else:
# print('Input is not numpy array!')
# print('Measurement positions were not added!')
return False
def add_measurements(self, measurements_id, category='points',
utm_zone = '', **kwargs):
"""
Adds desired measurement positions to the measurements dictionary.
The measurement points are used for the uncertainty calculation.
Parameters
----------
measurements_id : str, required
String which identifies measurements instance in the dictionary.
category : str, required
Indicates category of measurements that are added to the dictionary.
This paremeter can be equal to 'points' or 'horizontal_mesh'.
Default value is set to 'points'.
utm_zone : str, optional
Indicates UTM zone in which points are located.
Default values is set to None.
Other Parameters
-----------------
positions : ndarray
nD array containing data with `float` or `int` type corresponding
to Northing, Easting, Height above ground level, and Height above
sea level coordinates of the measurement pts.
nD array data are expressed in meters.
This kwarg is required if category=='points'
mesh_center : ndarray
nD array containing data with `float` or `int` type
corresponding to Northing, Easting and Height above ground level,
and Height above sea level of the mesh center.
nD array data are expressed in meters.
This kwarg is required if category=='horizontal_mesh'.
extent : int
mesh extent in Northing and Easting in meters.
This kwarg is required if category=='horizontal_mesh'.
resolution : int
mesh resolution in meters.
This kwarg is required if category=='horizontal_mesh'.
Raises
------
UnsupportedCategory
If the category of measurement points is not supported.
PositionsMissing
If category=='points' but the position of points is not provided.
InappropriatePositions
If the provided points positions are not properly provided.
MissingKwargs
If one or more kwargs are missing.
TODO
----
- Accept other categories such as LOS, PPI, RHI, VAD and DBS
"""
if category not in {'points', 'horizontal_mesh'}:
raise ValueError("UnsupportedCategory")
if category == 'points' and 'positions' not in kwargs:
raise ValueError("PositionsMissing")
if (category == 'points' and
'positions' in kwargs and
not(self.check_measurement_positions(kwargs['positions']))):
raise ValueError("InappropriatePositions")
if category == 'horizontal_mesh' and set(kwargs) != {'resolution','mesh_center', 'extent'}:
raise ValueError("MissingKwargs")
if category == 'points':
measurements_dict = {measurements_id :
{'category': category,
'positions' : kwargs['positions']
}
}
self.measurements.update(measurements_dict)
elif category == 'horizontal_mesh':
x, y, mesh_points = generate_mesh(kwargs['mesh_center'],
kwargs['extent'],
kwargs['resolution'])
nrows, ncols = x.shape
measurements_dict = {measurements_id :
{'category': category,
'nrows' : nrows,
'ncols' : ncols,
'positions' : mesh_points
}
}
self.measurements.update(measurements_dict)
if self.verbos:
print('Measurements \'' + measurements_id
+ '\' added to the measurement dictionary,'
+ ' which now contains '
+ str(len(self.measurements))
+ ' measurement instance(s).')
def __create_wind_ds(self, atmosphere, measurements,
u, v, w, wind_speed, wind_from_direction):
"""
Creates wind field xarray object.
Parameters
----------
atmosphere : dict
Dictionary containing information on atmosphere.
measurements : dict
Dictionary containing information on measurements.
u : ndarray
nD array of `float` or `int` corresponding to u component of wind.
In cf convention v is eastward_wind.
Units m/s.
v : ndarray
nD array `float` or `int` corresponding to v component of wind.
In cf convention v is northward_wind.
Units m/s.
w : ndarray
nD array `float` or `int` corresponding to w component of wind.
In cf convention w is upward_air_velocity.
Units m/s.
wind_speed : ndarray
nD array `float` or `int` corresponding to the wind speed.
Units m/s.
wind_from_direction : ndarray
nD array `float` or `int` corresponding to the wind direction.
Units degree.
Notes
----
Currently this method only supports points and horizontal mesh data structures.
The method is inline with the cf convention for variable naming.
"""
positions = measurements['positions']
category = measurements['category']
# make special data structure for PPI scans
if category == 'points':
self.wind_field = xr.Dataset({'eastward_wind':(['point'], u),
'northward_wind':(['point'], v),
'upward_air_velocity':(['point'], w),
'wind_speed':(['point'], wind_speed),
'wind_from_direction':(['point'], wind_from_direction)},
coords={'Easting':(['point'], positions[:,0]),
'Northing':(['point'], positions[:,1]),
'Height_asl': (['point'], positions[:,2]),
'Height_agl': (['point'], positions[:,3])}
)
if category == 'horizontal_mesh':
nrows = measurements['nrows']
ncols = measurements['ncols']
self.wind_field = xr.Dataset({'eastward_wind':(['Northing', 'Easting'], u.reshape(nrows, ncols).T),
'northward_wind':(['Northing', 'Easting'], v.reshape(nrows, ncols).T),
'upward_air_velocity':(['Northing', 'Easting'], w.reshape(nrows, ncols).T),
'wind_speed':(['Northing', 'Easting'], wind_speed.reshape(nrows, ncols).T),
'wind_from_direction':(['Northing', 'Easting'], wind_from_direction.reshape(nrows, ncols).T)},
coords={'Easting': np.unique(positions[:,0]),
'Northing': np.unique(positions[:,1]),
'Height_asl': positions[1,2],
'Height_agl': positions[1,3]}
)
self.wind_field.attrs['title'] = 'Wind characteristics at measurement points of interest'
self.wind_field.attrs['convention'] = 'cf'
self.wind_field.attrs['atmospheric_model'] = atmosphere['model']
self.wind_field.attrs['atmospheric_model_parameters'] = atmosphere['model_parameters']
self.wind_field.eastward_wind.attrs['units'] = 'm s-1'
self.wind_field.northward_wind.attrs['units'] = 'm s-1'
self.wind_field.upward_air_velocity.attrs['units'] = 'm s-1'
self.wind_field.wind_speed.attrs['units'] = 'm s-1'
self.wind_field.wind_from_direction.attrs['units'] = 'degree'
self.wind_field.Easting.attrs['units'] = 'm'
self.wind_field.Northing.attrs['units'] = 'm'
self.wind_field.Height_asl.attrs['units'] = 'm'
self.wind_field.Height_agl.attrs['units'] = 'm'
def calculate_wind(self, measurements_id, atmosphere_id):
"""
Calculates wind characteristics at the selected measurement points.
Parameters
----------
measurements_id : str, required
String which identifies measurements instance in the dictionary.
atmosphere_id : str, required
String which identifies atmosphere instance in the dictionary which
is used to calculate wind vector at measurement points
"""
atmosphere = self.atmosphere[atmosphere_id]
measurements = self.measurements[measurements_id]
shear_exponent = atmosphere['model_parameters']['shear_exponent']
reference_height = atmosphere['model_parameters']['reference_height']
gain = (measurements['positions'][:,3] / reference_height)**shear_exponent
u = atmosphere['model_parameters']['eastward_wind'] * gain
v = atmosphere['model_parameters']['northward_wind'] * gain
w = np.full(gain.shape, atmosphere['model_parameters']['upward_air_velocity'])
wind_speed = atmosphere['model_parameters']['wind_speed'] * gain
wind_from_direction = np.full(gain.shape, atmosphere['model_parameters']['wind_from_direction'])
self.__create_wind_ds(atmosphere, measurements,
u, v, w, wind_speed, wind_from_direction)
class Instruments:
"""
A class containing basic methods to operate on instruments dictionary.
"""
__KWARGS = {'uncertainty_model',
'u_estimation',
'u_range',
'u_azimuth',
'u_elevation',
'u_radial',
'range_gain',
'azimuth_gain',
'elevation_gain',
'atmosphere_id',
'measurements_id',
'probing_coordinates',
'radial_velocity',
'coordinate_system',
'coordinates',
'category',
'linked_instruments' }
def __init__(self):
self.instruments = {}
self.verbos = True
@staticmethod
def check_instrument_position(instrument_position):
"""
Validates the position of instrument
Parameters
----------
instrument_position : ndarray
nD array containing data with `float` or `int` type
corresponding to x, y and z coordinates of a lidar.
nD array data are expressed in meters.
Returns
-------
True / False
"""
if(type(instrument_position).__module__ == np.__name__):
if (len(instrument_position.shape) == 1
and instrument_position.shape[0] == 3):
return True
else:
# print('Wrong dimensions! Must be == 3 !')
return False
else:
# print('Input is not numpy array!')
return False
def update_instrument(self, instrument_id, **kwargs):
"""
Updates a instrument instance in dictionary with information in kwargs.
Parameters
----------
instrument_id : str, required
String which identifies instrument in the instrument dictionary.
Other Parameters
-----------------
u_estimation : float, optional
Uncertainty in estimating radial velocity from Doppler spectra.
Unless provided, (default) value is set to 0.1 m/s.
u_range : float, optional
Uncertainty in detecting range at which atmosphere is probed.
Unless provided, (default) value is set to 1 meter.
u_azimuth : float, optional
Uncertainty in the beam steering for the azimuth angle.
Unless provided, (default) value is set to 0.1 degree.
u_elevation : float, optional
Uncertainty in the beam steering for the elevation angle.
Unless provided, (default) value is set to 0.1 degree.
Raises
------
WrongId
If for the provided instrument_id there is no key in the dictionary.
WrongKwargs
If one or more kwargs are incorrect.
Notes
-----
If end-user manually updates keys essential for uncertainty calculation
auto-update of the uncertainty values will not take place!
Therefore, to update uncertainty values end-user must re-execute
calculate_uncertainty method.
TODO
----
- If certain keys are changes/updated trigger the uncertainty re-calc.
"""
if instrument_id not in self.instruments:
raise ValueError("WrongId")
if (len(kwargs) > 0 and not(set(kwargs).issubset(self.__KWARGS))):
raise ValueError("WrongKwargs")
if (len(kwargs) > 0 and set(kwargs).issubset(self.__KWARGS)):
for key in kwargs:
if key in {'u_estimation', 'u_range', 'u_azimuth', 'u_elevation'}:
self.instruments[instrument_id]['intrinsic_uncertainty'][key] = kwargs[key]
class Lidars(Instruments):
"""
A class containing methods and attributes related to wind lidars.
Methods
-------
add_lidar(instrument_id, position, category, **kwargs):
Adds a lidar instance to the instrument dictionary.
"""
def __init__(self):
super().__init__()
def add_lidar(self, instrument_id, position, **kwargs):
"""
Adds a lidar instance to the instrument dictionary.
Parameters
----------
instrument_id : str, required
String which identifies instrument in the instrument dictionary.
position : ndarray, required
nD array containing data with `float` or `int` type corresponding
to Northing, Easting and Height coordinates of the instrument.
nD array data are expressed in meters.
Other Parameters
-----------------
u_estimation : float, optional
Uncertainty in estimating radial velocity from Doppler spectra.
Unless provided, (default) value is set to 0.1 m/s.
u_range : float, optional
Uncertainty in detecting range at which atmosphere is probed.
Unless provided, (default) value is set to 1 m.
u_azimuth : float, optional
Uncertainty in the beam steering for the azimuth angle.
Unless provided, (default) value is set to 0.1 deg.
u_elevation : float, optional
Uncertainty in the beam steering for the elevation angle.
Unless provided, (default) value is set to 0.1 deg.
Raises
------
InappropriatePosition
If the provided position of instrument is not properly provided.
Notes
--------
Instruments can be add one at time.
Currently only the instrument position in UTM coordinate system is supported.
TODO
----
- Support the instrument position in coordinate systems other than UTM
- Integrate e-WindLidar attributes and vocabulary for lidar type
"""
if not(self.check_instrument_position(position)):
raise ValueError("InappropriatePosition")
category="wind_lidar"
instrument_dict = {instrument_id:{
'category': category,
'position': position,
'intrinsic_uncertainty':{
'u_estimation' : 0.1, # default
'u_range' : 1, # default
'u_azimuth': 0.1, # default
'u_elevation': 0.1 # default
}
}
}
self.instruments.update(instrument_dict)
self.update_instrument(instrument_id, **kwargs)
if self.verbos:
print('Instrument \'' + instrument_id + '\' of category \'' +
category +'\' added to the instrument dictionary, ' +
'which now contains ' + str(len(self.instruments)) +
' instrument(s).')
class Uncertainty(Measurements, Lidars):
"""
A class containing methods to calculate single- and dual- Doppler uncertainty.
Methods
-------
add_atmosphere(atmosphere_id, model, model_parameters)
Adds description of the atmosphere to the atmosphere dictionary.
add_instrument(instrument_id, position, category, **kwargs):
Adds an instrument to the instrument dictionary.
add_measurements(measurements_id, category, **kwargs)
Adds desired measurement positions to the measurements dictionary.
calculate_uncertainty(instrument_ids, measurements_id, atmosphere_id, uncertainty_model)
Calculates a measurement uncertainty for a given instrument(s).
"""
def __init__(self):
self.uncertainty = None
Instruments.__init__(self)
Measurements.__init__(self)
self.probing_dict = {}
def _generate_prob_dict(self, instrument_id, measurements_id):
instrument_pos = self.instruments[instrument_id]['position']
measurement_pos = self.measurements[measurements_id]['positions']
coords = generate_beam_coords(instrument_pos,measurement_pos)
self.probing_dict.update({instrument_id:coords})
def __create_rad_ds(self, instrument_id, measurements):
"""
Creates radial wind speed uncertainty xarray object.
Parameters
----------
instrument_id : str
String indicating the instrument in the dictionary to be considered.
measurements : dict
Dictionary containing information on measurements.
Returns
-------
ds : xarray
xarray dataset containing radial wind speed uncertainty.
Notes
----
Currently this method only supports points and horizontal mesh data structures.
The method can be called only when the radial uncertainty has been calculated.
"""
positions = measurements['positions']
category = measurements['category']
intrinsic_uncertainty = self.instruments[instrument_id]['intrinsic_uncertainty']
if category == 'points':
prob_cord = self.__probing_dict[instrument_id]
rad_speed = self.__radial_vel_dict[instrument_id]
azimuth_gain = self.__radial_uncertainty[instrument_id]['azimuth_gain']
elevation_gain = self.__radial_uncertainty[instrument_id]['elevation_gain']
range_gain = self.__radial_uncertainty[instrument_id]['range_gain']
u_radial = self.__radial_uncertainty[instrument_id]['u_radial']
ds = xr.Dataset({'azimuth':(['instrument_id','point'], np.array([prob_cord[:,0]])),
'elevation':(['instrument_id','point'], np.array([prob_cord[:,1]])),
'range':(['instrument_id','point'], np.array([prob_cord[:,2]])),
'radial_speed':(['instrument_id','point'], np.array([rad_speed])),
'azimuth_contribution':(['instrument_id','point'], np.array([azimuth_gain])),
'elevation_contribution':(['instrument_id','point'], np.array([elevation_gain])),
'range_contribution':(['instrument_id','point'], np.array([range_gain.T])),
'radial_speed_uncertainty':(['instrument_id','point'], np.array([u_radial])),
# 'instrument_uncertainty':(['instrument_id'], np.array([intrinsic_uncertainty]))
},
coords={'Easting':(['point'], positions[:,0]),
'Northing':(['point'], positions[:,1]),
'Height': (['point'], positions[:,2]),
'instrument_id': np.array([instrument_id])}
)
return ds
if category == 'horizontal_mesh':
nrows = measurements['nrows']
ncols = measurements['ncols']
prob_cord = self.__probing_dict[instrument_id].reshape(nrows, ncols,3)
rad_speed = self.__radial_vel_dict[instrument_id].reshape(nrows, ncols)
azimuth_gain = self.__radial_uncertainty[instrument_id]['azimuth_gain'].reshape(nrows, ncols)
elevation_gain = self.__radial_uncertainty[instrument_id]['elevation_gain'].reshape(nrows, ncols)
range_gain = self.__radial_uncertainty[instrument_id]['range_gain'].reshape(nrows, ncols)
u_radial = self.__radial_uncertainty[instrument_id]['u_radial'].reshape(nrows, ncols)
ds = xr.Dataset({'azimuth':(['instrument_id', 'Northing', 'Easting'],
np.array([prob_cord[:,:, 0].T])),
'elevation':(['instrument_id', 'Northing', 'Easting'],
np.array([prob_cord[:,:, 1].T])),
'range':(['instrument_id', 'Northing', 'Easting'],
np.array([prob_cord[:,:, 2].T])),
'radial_speed':(['instrument_id', 'Northing', 'Easting'],
np.array([rad_speed.T])),
'azimuth_contribution':(['instrument_id', 'Northing', 'Easting'],
np.array([azimuth_gain.T])),
'elevation_contribution':(['instrument_id', 'Northing', 'Easting'],
np.array([elevation_gain.T])),
'range_contribution':(['instrument_id', 'Northing', 'Easting'],
np.array([range_gain.T])),
'radial_speed_uncertainty':(['instrument_id', 'Northing', 'Easting'],
np.array([u_radial.T])),
'intrinsic_uncertainty':(['instrument_id'], np.array([intrinsic_uncertainty]))
},
coords={'Easting': np.unique(positions[:,0]),
'Northing': np.unique(positions[:,1]),
'instrument_id': np.array([instrument_id]),
'Height': positions[0,2]}
)
return ds
def __create_dd_ds(self, measurements):
"""
Creates dual-Doppler uncertainty xarray object.
Parameters
----------
measurements : dict
Dictionary containing information on measurements.
Returns
-------
ds : xarray
xarray dataset containing dual-Doppler uncertainty.
Notes
----
Currently this method only supports points and horizontal mesh data structures.
The method can be called only when the dual-Doppler uncertainty has been calculated.
"""
positions = measurements['positions']
category = measurements['category']
if category == 'points':
ds = xr.Dataset({'wind_speed_uncertainty':(['point'],
self.__wind_speed_uncertainty),
'wind_from_direction_uncertainty':(['point'],
self.__wind_from_direction_uncertainty),
'between_beam_angle':(['point'], self.__between_beam_angle),
'numerator_of_wind_speed_uncertainty':(['point'], self.__numerator_Vh),
'numerator_of_wind_from_direction_uncertainty':(['point'], self.__numerator_dir),
'denominator_of_wind_speed_uncertainty':(['point'], self.__denominator_Vh),
'denominator_of_wind_from_direction_uncertainty':(['point'], self.__denominator_dir),
},
coords={'Easting':(['point'], positions[:,0]),
'Northing':(['point'], positions[:,1]),
'Height': (['point'], positions[:,2])}
)
if category == 'horizontal_mesh':
ds = xr.Dataset({'wind_speed_uncertainty':(['Northing', 'Easting'], self.__wind_speed_uncertainty),
'wind_from_direction_uncertainty':(['Northing', 'Easting'], self.__wind_from_direction_uncertainty),
'between_beam_angle':(['Northing', 'Easting'], self.__between_beam_angle),
'numerator_of_wind_speed_uncertainty':(['Northing', 'Easting'], self.__numerator_Vh),
'numerator_of_wind_from_direction_uncertainty':(['Northing', 'Easting'], self.__numerator_dir),
'denominator_of_wind_speed_uncertainty':(['Northing', 'Easting'], self.__denominator_Vh),
'denominator_of_wind_from_direction_uncertainty':(['Northing', 'Easting'], self.__denominator_dir),
},
coords={'Easting': np.unique(positions[:,0]),
'Northing': np.unique(positions[:,1]),
'Height': positions[0,2]})
return ds
@staticmethod
def __update_metadata(ds, uncertainty_model):
"""
Updates xarray dataset with metadata.
Parameters
----------
ds : xarray
xarray dataset containing radial and/or dual-Doppler uncertainty.
uncertainty_model : str
String indicating which uncertainty model was used for the uncertainty calculation.
Returns
-------
ds : xarray
xarray dataset updated with metadata.
"""
# Update of metadata here
ds.attrs['title'] = 'Radial speed uncertainty'
ds.attrs['convention'] = 'cf'
ds.attrs['uncertainty_model'] = 'Vasiljevic-Courtney_' + uncertainty_model
ds.azimuth.attrs['units'] = 'degree'
ds.elevation.attrs['units'] = 'degree'
ds.range.attrs['units'] = 'm'
ds.radial_speed.attrs['units'] = 'm s-1'
ds.radial_speed.attrs['standard_name'] = 'radial_velocity_of_scatterers_toward_instrument'
ds.radial_speed_uncertainty.attrs['units'] = 'm s-1'
ds.azimuth_contribution.attrs['units'] = 'm s-1'
ds.elevation_contribution.attrs['units'] = 'm s-1'
ds.range_contribution.attrs['units'] = 'm s-1'
ds.Easting.attrs['units'] = 'm'
ds.Northing.attrs['units'] = 'm'
ds.Height.attrs['units'] = 'm'
if uncertainty_model == 'dual-Doppler':
ds.attrs['title'] = 'Dual-Doppler uncertainty'
ds.attrs['uncertainty_model'] = 'Vasiljevic-Courtney_' + uncertainty_model
ds.wind_from_direction_uncertainty.attrs['units'] = 'degree'
ds.wind_speed_uncertainty.attrs['units'] = 'm s-1'
return ds
def __calculate_elevation_contribution(self, instrument_id):
"""
Calculates the elevation angle uncertainty contribution to the radial uncertainty.
Parameters
----------
instrument_id : str
String indicating the instrument in the dictionary to be considered.
Returns
-------
elevation_contribution : ndarray
nD array of elevation angle uncertainty contribution for each measurement point.
"""
# Necessary parameters extraction:
#
u_elevation = self.instruments[instrument_id]['intrinsic_uncertainty']['u_elevation']
coords = self.__probing_dict[instrument_id]
wind_from_direction = self.wind_field.wind_from_direction.values.reshape(-1)
shear_exponent = self.wind_field.attrs['atmospheric_model_parameters']['shear_exponent']
wind_speed = self.wind_field.attrs['atmospheric_model_parameters']['wind_speed']
reference_height = self.wind_field.attrs['atmospheric_model_parameters']['reference_height']
measurement_height = self.wind_field.Height_agl.values
elevation_contribution = (- shear_exponent * coords[:,2] *
np.cos(np.radians(coords[:,0] - wind_from_direction)) *
np.cos(np.radians(coords[:,1]))**2 *
(measurement_height / reference_height)**(shear_exponent-1) *
(wind_speed / reference_height) +
np.cos(np.radians(coords[:,0] - wind_from_direction)) *
np.sin(np.radians(coords[:,1])) *
wind_speed*(measurement_height / reference_height)**(shear_exponent)
) * u_elevation * (pi/180)
return elevation_contribution
def __calculate_azimuth_contribution(self, instrument_id):
"""
Calculates the azimuth angle uncertainty contribution to the radial uncertainty.
Parameters
----------
instrument_id : str
String indicating the instrument in the dictionary to be considered.
Returns
-------
azimuth_contribution : ndarray
nD array of azimuth angle uncertainty contribution for each measurement point.
"""
# Necessary parameters extraction:
#
u_azimuth = self.instruments[instrument_id]['intrinsic_uncertainty']['u_azimuth']
coords = self.__probing_dict[instrument_id]
wind_from_direction = self.wind_field.wind_from_direction.values.reshape(-1)
shear_exponent = self.wind_field.attrs['atmospheric_model_parameters']['shear_exponent']
wind_speed = self.wind_field.attrs['atmospheric_model_parameters']['wind_speed']
reference_height = self.wind_field.attrs['atmospheric_model_parameters']['reference_height']
measurement_height = self.wind_field.Height_agl.values
azimuth_contribution = - (np.sin(np.radians(coords[:,0] - wind_from_direction)) *
np.cos(np.radians(coords[:,1])) *
wind_speed*(measurement_height / reference_height)**(shear_exponent)) * u_azimuth * (pi/180)
return azimuth_contribution
def __calculate_range_contribution(self, instrument_id):
"""
Calculates the range uncertainty contribution to the radial uncertainty.
Parameters
----------
instrument_id : str
String indicating the instrument in the dictionary to be considered.
Returns
-------
range_contribution : ndarray
nD array of range uncertainty contribution for each measurement point.
"""
# Necessary parameters extraction:
#
u_range = self.instruments[instrument_id]['intrinsic_uncertainty']['u_range']
coords = self.__probing_dict[instrument_id]
wind_from_direction = self.wind_field.wind_from_direction.values.reshape(-1)
shear_exponent = self.wind_field.attrs['atmospheric_model_parameters']['shear_exponent']
wind_speed = self.wind_field.attrs['atmospheric_model_parameters']['wind_speed']
reference_height = self.wind_field.attrs['atmospheric_model_parameters']['reference_height']
measurement_height = self.wind_field.Height_agl.values
range_contribution = (
(shear_exponent/reference_height) *
np.cos(np.radians(coords[:,0] - wind_from_direction)) *
np.cos(np.radians(coords[:,1])) *
np.sin(np.radians(coords[:,1])) *
wind_speed*(measurement_height / reference_height)**(-1 + shear_exponent)
)*u_range
return range_contribution
def __calculate_radial_uncertainty(self, instrument_id):
"""
Calculates the radial wind speed uncertainty.
Parameters
----------
instrument_id : str
String indicating the instrument in the dictionary to be considered.
Returns
-------
dict_out : dict
Dictionary containing selected uncertainty model, calculated radial
uncertainty and gains for each individual uncertainty component.
Notes
--------
The radial wind speed uncertainty, :math:`{u_{V_{radial}}}`, is calculated
using the following mathematical expression:
.. math::
u_{V_{radial}}^2 = u_{estimation}^2 +
(elevation_contribution)^2 +
(azimuth_contribution)^2 +
(range_contribution)^2
"""
u_estimation = self.instruments[instrument_id]['intrinsic_uncertainty']['u_estimation']
azimuth_contrib = self.__calculate_azimuth_contribution(instrument_id)
elevation_contrib = self.__calculate_elevation_contribution(instrument_id)
range_contrib = self.__calculate_range_contribution(instrument_id)
abs_uncertainty = np.sqrt(
(u_estimation)**2 +
(azimuth_contrib)**2 +
(elevation_contrib)**2 +
(range_contrib)**2
)
dict_out = {'azimuth_gain' : azimuth_contrib,
'elevation_gain' : elevation_contrib,
'range_gain': range_contrib,
'u_radial' : abs_uncertainty,
'uncertainty_model' : 'radial_velocity'
}
return dict_out
def __calculate_DD_speed_uncertainty(self, instrument_ids):
"""
Calculates the dual-Doppler wind speed uncertainty.
Parameters
----------
instrument_id : str
String indicating the instrument in the dictionary to be considered.
Notes
--------
The dual-Doppler wind speed uncertainty, :math:`{u_{V_{h}}}`, is calculated
using the following mathematical expression:
.. math::
u_{V_{h}}=\\frac{1}{V_{h} \sin({\\theta}_{1}-{\\theta}_{2})^2} *
\\biggl((V_{radial_{1}}-V_{radial_{2}}\cos({\\theta}_{1}-{\\theta}_{2}))^{2}u_{V_{radial_{1}}}^{2} +
(V_{radial_{2}}-V_{radial_{1}}\cos({\\theta}_{1}-{\\theta}_{2}))^{2}u_{V_{radial_{2}}}^{2}\\biggl)^{\\frac{1}{2}}
where :math:`u_{V_{radial_{1}}}` and :math:`u_{V_{radial_{2}}}` are radial
uncertainties for measurements of radial velocities :math:`{V_{radial_{1}}}`
and :math:`{V_{radial_{2}}}` by a dual-Doppler system (e.g., two lidars),
:math:`{\\theta_{1}}` and :math:`{\\theta_{2}}` are the azimuth angles
of the two intersecting beams at a point of interest, while :math:`{V_{h}}`
is the horizontal wind speed at that point.
"""
azimuth_1 = self.uncertainty.azimuth.sel(instrument_id =instrument_ids[0]).values
azimuth_2 = self.uncertainty.azimuth.sel(instrument_id =instrument_ids[1]).values
angle_dif = np.radians(azimuth_1 - azimuth_2) # in radians
los_1 = self.uncertainty.radial_speed.sel(instrument_id=instrument_ids[0]).values
U_rad1 = self.uncertainty.radial_speed_uncertainty.sel(instrument_id =instrument_ids[0]).values
los_2 = self.uncertainty.radial_speed.sel(instrument_id =instrument_ids[1]).values
U_rad2 = self.uncertainty.radial_speed_uncertainty.sel(instrument_id =instrument_ids[1]).values
wind_speed = self.wind_field.wind_speed.values
# uncertainty =((wind_speed * (np.sin(angle_dif))**2)**-1 *
# np.sqrt((los_1 - los_2 * np.cos(angle_dif))**2 * U_rad1**2 +
# (los_2 - los_1 * np.cos(angle_dif))**2 * U_rad2**2))
numerator = np.sqrt(((los_1 - los_2*np.cos(angle_dif))**2)*U_rad1**2+
((los_2 - los_1* | np.cos(angle_dif) | numpy.cos |
import numpy as np
import h5py
from comancpipeline.MapMaking import MapTypes
import sys
from tqdm import tqdm
from astropy.io import fits
from matplotlib import pyplot
import numpy as np
from matplotlib import pyplot
from comancpipeline.MapMaking import MapTypes
from comancpipeline.Analysis import Statistics,BaseClasses
import h5py
import sys
from tqdm import tqdm
import os
class RepointEdges(BaseClasses.DataStructure):
"""
Scan Edge Split - Each time the telescope stops to repoint this is defined as the edge of a scan
"""
def __init__(self, **kwargs):
self.max_el_current_fraction = 0.7
self.min_sample_distance = 10
self.min_scan_length = 5000 # samples
self.offset_length = 50
for item, value in kwargs.items():
self.__setattr__(item,value)
def __call__(self, data):
"""
Expects a level 2 data structure
"""
return self.getScanPositions(data)
def getScanPositions(self, d):
"""
Finds beginning and ending of scans, creates mask that removes data when the telescope is not moving,
provides indices for the positions of scans in masked array
Notes:
- We may need to check for vane position too
- Iteratively finding the best current fraction may also be needed
"""
features = d['level1/spectrometer/features'][:]
uf, counts = np.unique(features,return_counts=True) # select most common feature
ifeature = np.floor(np.log10(uf[ | np.argmax(counts) | numpy.argmax |
import collections
import numpy as np
from copy import copy
from robosuite.utils import RandomizationError
from robosuite.utils.transform_utils import quat_multiply
from robosuite.models.objects import MujocoObject
class ObjectPositionSampler:
"""
Base class of object placement sampler.
Args:
name (str): Name of this sampler.
mujoco_objects (None or MujocoObject or list of MujocoObject): single model or list of MJCF object models
ensure_object_boundary_in_range (bool): If True, will ensure that the object is enclosed within a given boundary
(should be implemented by subclass)
ensure_valid_placement (bool): If True, will check for correct (valid) object placements
reference_pos (3-array): global (x,y,z) position relative to which sampling will occur
z_offset (float): Add a small z-offset to placements. This is useful for fixed objects
that do not move (i.e. no free joint) to place them above the table.
"""
def __init__(
self,
name,
mujoco_objects=None,
ensure_object_boundary_in_range=True,
ensure_valid_placement=True,
reference_pos=(0, 0, 0),
z_offset=0.,
):
# Setup attributes
self.name = name
if mujoco_objects is None:
self.mujoco_objects = []
else:
# Shallow copy the list so we don't modify the inputted list but still keep the object references
self.mujoco_objects = [mujoco_objects] if isinstance(mujoco_objects, MujocoObject) else copy(mujoco_objects)
self.ensure_object_boundary_in_range = ensure_object_boundary_in_range
self.ensure_valid_placement = ensure_valid_placement
self.reference_pos = reference_pos
self.z_offset = z_offset
def add_objects(self, mujoco_objects):
"""
Add additional objects to this sampler. Checks to make sure there's no identical objects already stored.
Args:
mujoco_objects (MujocoObject or list of MujocoObject): single model or list of MJCF object models
"""
mujoco_objects = [mujoco_objects] if isinstance(mujoco_objects, MujocoObject) else mujoco_objects
for obj in mujoco_objects:
assert obj not in self.mujoco_objects, "Object '{}' already in sampler!".format(obj.name)
self.mujoco_objects.append(obj)
def reset(self):
"""
Resets this sampler. Removes all mujoco objects from this sampler.
"""
self.mujoco_objects = []
def sample(self, fixtures=None, reference=None, on_top=True):
"""
Uniformly sample on a surface (not necessarily table surface).
Args:
fixtures (dict): dictionary of current object placements in the scene as well as any other relevant
obstacles that should not be in contact with newly sampled objects. Used to make sure newly
generated placements are valid. Should be object names mapped to (pos, quat, MujocoObject)
reference (str or 3-tuple or None): if provided, sample relative placement. Can either be a string, which
corresponds to an existing object found in @fixtures, or a direct (x,y,z) value. If None, will sample
relative to this sampler's `'reference_pos'` value.
on_top (bool): if True, sample placement on top of the reference object.
Return:
dict: dictionary of all object placements, mapping object_names to (pos, quat, obj), including the
placements specified in @fixtures. Note quat is in (w,x,y,z) form
"""
raise NotImplementedError
class UniformRandomSampler(ObjectPositionSampler):
"""
Places all objects within the table uniformly random.
Args:
name (str): Name of this sampler.
mujoco_objects (None or MujocoObject or list of MujocoObject): single model or list of MJCF object models
x_range (2-array of float): Specify the (min, max) relative x_range used to uniformly place objects
y_range (2-array of float): Specify the (min, max) relative y_range used to uniformly place objects
rotation (None or float or Iterable):
:`None`: Add uniform random random rotation
:`Iterable (a,b)`: Uniformly randomize rotation angle between a and b (in radians)
:`value`: Add fixed angle rotation
rotation_axis (str): Can be 'x', 'y', or 'z'. Axis about which to apply the requested rotation
ensure_object_boundary_in_range (bool):
:`True`: The center of object is at position:
[uniform(min x_range + radius, max x_range - radius)], [uniform(min x_range + radius, max x_range - radius)]
:`False`:
[uniform(min x_range, max x_range)], [uniform(min x_range, max x_range)]
ensure_valid_placement (bool): If True, will check for correct (valid) object placements
reference_pos (3-array): global (x,y,z) position relative to which sampling will occur
z_offset (float): Add a small z-offset to placements. This is useful for fixed objects
that do not move (i.e. no free joint) to place them above the table.
"""
def __init__(
self,
name,
mujoco_objects=None,
x_range=(0, 0),
y_range=(0, 0),
rotation=None,
rotation_axis='z',
ensure_object_boundary_in_range=True,
ensure_valid_placement=True,
reference_pos=(0, 0, 0),
z_offset=0.,
):
self.x_range = x_range
self.y_range = y_range
self.rotation = rotation
self.rotation_axis = rotation_axis
super().__init__(
name=name,
mujoco_objects=mujoco_objects,
ensure_object_boundary_in_range=ensure_object_boundary_in_range,
ensure_valid_placement=ensure_valid_placement,
reference_pos=reference_pos,
z_offset=z_offset,
)
def _sample_x(self, object_horizontal_radius):
"""
Samples the x location for a given object
Args:
object_horizontal_radius (float): Radius of the object currently being sampled for
Returns:
float: sampled x position
"""
minimum, maximum = self.x_range
if self.ensure_object_boundary_in_range:
minimum += object_horizontal_radius
maximum -= object_horizontal_radius
return np.random.uniform(high=maximum, low=minimum)
def _sample_y(self, object_horizontal_radius):
"""
Samples the y location for a given object
Args:
object_horizontal_radius (float): Radius of the object currently being sampled for
Returns:
float: sampled y position
"""
minimum, maximum = self.y_range
if self.ensure_object_boundary_in_range:
minimum += object_horizontal_radius
maximum -= object_horizontal_radius
return np.random.uniform(high=maximum, low=minimum)
def _sample_quat(self):
"""
Samples the orientation for a given object
Returns:
np.array: sampled (r,p,y) euler angle orientation
Raises:
ValueError: [Invalid rotation axis]
"""
if self.rotation is None:
rot_angle = np.random.uniform(high=2 * np.pi, low=0)
elif isinstance(self.rotation, collections.abc.Iterable):
rot_angle = np.random.uniform(
high=max(self.rotation), low=min(self.rotation)
)
else:
rot_angle = self.rotation
# Return angle based on axis requested
if self.rotation_axis == 'x':
return np.array([np.cos(rot_angle / 2), np.sin(rot_angle / 2), 0, 0])
elif self.rotation_axis == 'y':
return np.array([ | np.cos(rot_angle / 2) | numpy.cos |
from functools import partial
import numpy as np
from matplotlib import pyplot as plt
from os.path import expanduser
# from https://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gpr_noisy_targets.html
def plot_prediction_uncertainty_and_data(x, f, X, y, y_pred, sigma=None, ylim=None, xlim=None, title='', filename='regression_results.png'):
plt.clf()
plt.figure()
plt.plot(x, f(x), 'r:', label=r'$f(x) = objective$')
plt.plot(X, y, 'r.', markersize=10, label='Observations')
if isinstance(y_pred, (tuple, list, np.ndarray)) and isinstance(y_pred[0], (tuple, list, np.ndarray)) and len(y_pred[0]) > 1:
for row_index, y_pred_row in enumerate(y_pred):
plt.plot(x, y_pred_row, 'b-', label='Prediction' if row_index == 0 else None)
else:
plt.plot(x, y_pred, 'b-', label='Prediction')
if sigma is not None:
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
plt.legend(loc='upper left')
plt.title(title)
plt.savefig(expanduser('~/Downloads/' + filename), dpi=300)
plt.show()
def gaussian(X1, X2, widths=None):
sqdist = np.sum(X1 ** 2, 1).reshape(-1, 1) + np.sum(X2 ** 2, 1) - 2 * np.dot(X1, X2.T)
return np.exp(-0.5 / np.array(widths) ** 2 * sqdist)
def dot_product(X1, X2):
return np.outer(X1, X2)
def kernel(X1, X2=None, widths=None, noise_parameter=0.0, mean=0.0, add_constant=False, _normalize=True, multiplier=1):
"""
Isotropic squared exponential kernel.
Args:
X1: Array of m points (m x d).
X2: Array of n points (n x d).
Returns:
(m x n) matrix.
"""
if X2 is None:
self_kernel = True
X2 = X1
else:
self_kernel = False
X2 = X2
core_kernel = gaussian(X1, X2, widths=widths)
if self_kernel:
white_noise = np.eye(len(core_kernel)) * noise_parameter
constant = (np.ones(core_kernel.shape)) * mean
else:
white_noise = np.zeros(core_kernel.shape)
constant = np.ones(core_kernel.shape) * mean
unnormalized_kernel = core_kernel + white_noise ** 2 + constant
if _normalize:
normalized_kernel = (unnormalized_kernel.T / np.sqrt(np.diag(
kernel(X1, widths=widths, noise_parameter=0, mean=0, add_constant=add_constant, _normalize=False)))).T
normalized_kernel = normalized_kernel / np.sqrt(
np.diag(kernel(X2, widths=widths, noise_parameter=0, mean=0, add_constant=add_constant, _normalize=False)))
if add_constant:
return multiplier * np.hstack([np.ones((len(normalized_kernel), 1)), normalized_kernel])
else:
return multiplier * normalized_kernel
else:
if add_constant:
return multiplier * np.hstack([np.ones((len(unnormalized_kernel), 1)), unnormalized_kernel])
else:
return multiplier * unnormalized_kernel
######
# Settings
######
compare_specialized_modules = False # this requires access to our libraries which aren't yet public
basis_domain = (-5, 5)
plot_domain = (-1, 2)
training_domain = (0, 1)
ylim = (-2, 2)
widths = [0.5]
wavelength = 0.5
phase = 0
vertical_shift = 0
objective_scale = 5
noise_parameter_scale = 0.1
number_of_basis_functions = 100
number_of_evaluation_points = 500
number_of_training_points = 20
fit_intercept = True
constant_kernel_weight = 1e5
plot_range = (-4 * objective_scale + vertical_shift, 4 * objective_scale + vertical_shift)
render_plots = True
#####
def objective(X):
return objective_scale * np.sin((X / wavelength + phase) * 2 * np.pi) + vertical_shift
def get_average_subsequent_differences_in_list(ini_list):
diff_list = []
for x, y in zip(ini_list[0::], ini_list[1::]):
diff_list.append(y - x)
return np.average(diff_list)
number_of_dirac_basis_functions = max(1000, number_of_basis_functions)
basis_centers = np.linspace(basis_domain[0], basis_domain[1], number_of_basis_functions).reshape(-1, 1)
dirac_basis_centers = np.linspace(basis_domain[0], basis_domain[1], number_of_dirac_basis_functions).reshape(-1, 1)
dirac_basis_increment = get_average_subsequent_differences_in_list(dirac_basis_centers)
X = np.linspace(plot_domain[0], plot_domain[1], number_of_evaluation_points).reshape(-1, 1)
X_train = np.linspace(training_domain[0], training_domain[1], number_of_training_points).reshape(-1, 1)
Y_train = objective(X_train).reshape(-1, 1)
noise_parameter = max(1e-5, noise_parameter_scale) * objective_scale
prior_noise_parameter = objective_scale
GP_RBF_kernel = partial(kernel, widths=widths, noise_parameter=1e-3, mean=constant_kernel_weight)
GP_kernel = partial(kernel, widths=widths, noise_parameter=noise_parameter / objective_scale,
mean=constant_kernel_weight, multiplier=objective_scale ** 2)
incorrect_kernel = partial(kernel, widths=[x / 5 for x in widths], noise_parameter=noise_parameter)
BLR_basis = partial(kernel, X2=basis_centers,
widths=[x / np.sqrt(2) for x in widths],
mean=0.0,
add_constant=fit_intercept) # note that noise_parameter only applies if X2 is None
dirac_basis = partial(kernel, X2=dirac_basis_centers,
widths=[dirac_basis_increment for x in widths],
mean=0.0) # note that noise_parameter only applies if X2 is None
def normalize_basis(basis, apply_sqrt=True, ignore_first_column=False):
if ignore_first_column:
basis_for_norm = basis[:, 1:]
output_basis = basis.copy()
output_basis[:, 1:] = (basis[:, 1:].T / np.sqrt(np.diag(basis_for_norm @ basis_for_norm.T))).T
return output_basis
else:
return (basis.T / np.sqrt(np.diag(basis @ basis.T))).T
# regular (manual) basis
X_train_basis = BLR_basis(X_train)
X_basis = BLR_basis(X)
normalization_constant = np.average(np.diag(X_train_basis @ X_train_basis.T)) ** -0.5
X_train_basis = normalize_basis(X_train_basis, ignore_first_column=fit_intercept)
X_basis = normalize_basis(X_basis, ignore_first_column=fit_intercept)
# apply eigenbasis
K = GP_RBF_kernel(dirac_basis_centers)
eigenvalues, eigenvectors = np.linalg.eigh(K)
eigenbasis = eigenvectors.T
X_train_dirac_basis = dirac_basis(X_train)
X_dirac_basis = dirac_basis(X)
X_train_dirac_basis = np.square(normalize_basis(X_train_dirac_basis))
X_dirac_basis = np.square(normalize_basis(X_dirac_basis))
X_train_eigenbasis = X_train_dirac_basis @ eigenbasis.T @ np.diag(np.sqrt(eigenvalues))
X_eigenbasis = X_dirac_basis @ eigenbasis.T @ np.diag(np.sqrt(eigenvalues))
eigenvalues, eigenvectors = np.linalg.eigh(noise_parameter ** -2 * X_train_basis.T @ X_train_basis)
number_of_effective_parameters = sum(x / (prior_noise_parameter ** -2 + x) for x in | np.real(eigenvalues) | numpy.real |
import numpy as np
from itertools import permutations
from numpy.linalg import pinv
from six.moves import xrange
from numpy.testing import assert_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_raises_regexp
from tensor_lda.utils.tensor_utils import (_check_1d_vector,
rank_1_tensor_3d,
khatri_rao_prod,
tensor_3d_permute,
tensor_3d_from_matrix_vector,
tensor_3d_from_vector_matrix,
tensor_3d_prod)
def test_check_1d_vectors():
# test check_1d_vectors function
rng = np.random.RandomState(0)
dim = rng.randint(50, 100)
# check (dim, )
a = _check_1d_vector(np.ones((dim,)))
assert_equal(1, len(a.shape))
assert_equal(dim, a.shape[0])
# check (dim, 1)
b = _check_1d_vector(np.ones((dim, 1)))
assert_equal(1, len(b.shape))
assert_equal(dim, b.shape[0])
# check (dim, 2)
c = np.ones((dim, 2))
assert_raises_regexp(ValueError, r"^Vector is not 1-d array:",
_check_1d_vector, c)
def test_create_3d_rank_1_tensor_simple():
# test create_3d_rank_1_tensor
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
c = np.array([7, 8, 9])
result = np.array(
[[28, 35, 42, 32, 40, 48, 36, 45, 54],
[56, 70, 84, 64, 80, 96, 72, 90, 108],
[84, 105, 126, 96, 120, 144, 108, 135, 162]])
tensor = rank_1_tensor_3d(a, b, c)
assert_array_equal(result, tensor)
def test_create_3d_rank_1_tensor_symmetric():
rng = np.random.RandomState(0)
dim = rng.randint(20, 25)
v = rng.rand(dim)
tensor = rank_1_tensor_3d(v, v, v)
for i in xrange(dim):
for j in xrange(i, dim):
for k in xrange(j, dim):
true_val = v[i] * v[j] * v[k]
# check all permutation have same values
for perm in permutations([i, j, k]):
tensor_val = tensor[perm[0], (dim * perm[2]) + perm[1]]
assert_almost_equal(true_val, tensor_val)
def test_create_3d_rank_1_tensor_random():
# test create_3d_rank_1_tensor with random values
rng = np.random.RandomState(0)
dim = rng.randint(20, 25)
a = rng.rand(dim)
b = rng.rand(dim)
c = rng.rand(dim)
tensor = rank_1_tensor_3d(a, b, c)
assert_equal(2, len(tensor.shape))
assert_equal(dim, tensor.shape[0])
assert_equal(dim * dim, tensor.shape[1])
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
true_val = a[i] * b[j] * c[k]
tensor_val = tensor[i, (dim * k) + j]
assert_almost_equal(true_val, tensor_val)
def test_khatri_rao_prod():
# test khatri_rao_prod
rng = np.random.RandomState(0)
dim_row_a = rng.randint(10, 20)
dim_row_b = rng.randint(10, 20)
dim_row_prod = dim_row_a * dim_row_b
dim_col = rng.randint(10, 20)
a = rng.rand(dim_row_a, dim_col)
b = rng.rand(dim_row_b, dim_col)
prod = khatri_rao_prod(a, b)
assert_equal(2, len(prod.shape))
assert_equal(dim_row_prod, prod.shape[0])
assert_equal(dim_col, prod.shape[1])
for i in xrange(dim_row_prod):
for j in xrange(dim_col):
true_val = a[i // dim_row_b, j] * b[i % dim_row_b, j]
assert_almost_equal(true_val, prod[i, j])
def test_khatri_rao_properties():
# test properties of Kron & Khatri-Rao product
# check eq(2) in reference[?]
rng = np.random.RandomState(0)
dim_row_a = rng.randint(20, 50)
dim_row_b = rng.randint(20, 50)
dim_row_c = rng.randint(20, 50)
dim_col = rng.randint(20, 40)
a = rng.rand(dim_row_a, dim_col)
b = rng.rand(dim_row_b, dim_col)
# eq(2) line 1
c = rng.rand(dim_col, dim_row_a)
d = rng.rand(dim_col, dim_row_b)
kr_prod_1 = np.dot(np.kron(a, b), np.kron(d, c))
kr_prod_2 = np.kron(np.dot(a, d), np.dot(b, c))
assert_array_almost_equal(kr_prod_1, kr_prod_2)
# eq(2) line 2
kr_inv = pinv(np.kron(a, b))
kr_inv_reconstruct = np.kron(pinv(a), pinv(b))
assert_array_almost_equal(kr_inv, kr_inv_reconstruct)
# eq(2) line 3
c = rng.rand(dim_row_c, dim_col)
prod_1 = khatri_rao_prod(khatri_rao_prod(a, b), c)
prod_2 = khatri_rao_prod(a, khatri_rao_prod(b, c))
assert_array_almost_equal(prod_1, prod_2)
# eq(2) line 4
prod = khatri_rao_prod(a, b)
result = np.dot(a.T, a) * np.dot(b.T, b)
assert_array_almost_equal(np.dot(prod.T, prod), result)
# eq(2) line 5
prod_inv = pinv(prod)
result_inv = pinv(result)
reconstruct_inv = np.dot(result_inv, prod.T)
assert_array_almost_equal(prod_inv, reconstruct_inv)
def test_tensor_3d_permute():
rng = np.random.RandomState(0)
dim1 = rng.randint(10, 20)
dim2 = rng.randint(10, 20)
dim3 = rng.randint(10, 20)
tensor = rng.rand(dim1, (dim2 * dim3))
#dim1 = 2
#dim2 = 3
#dim3 = 4
#mtx = np.arange(6).reshape(2, 3)
#vector = np.array([7, 8, 9, 10])
#tensor = tensor_3d_from_matrix_vector(mtx, vector)
# test (2, 3, 1) mode
permute_2_3_1 = tensor_3d_permute(tensor, (dim1, dim2, dim3), a=2, b=3, c=1)
assert_equal(dim2, permute_2_3_1.shape[0])
assert_equal(dim3 * dim1, permute_2_3_1.shape[1])
#print tensor
#print permute_2_3_1
for i1 in xrange(dim2):
for i2 in xrange(dim3):
for i3 in xrange(dim1):
val_permute = permute_2_3_1[i1, (dim3 * i3) + i2]
val_origin = tensor[i3, (dim2 * i2) + i1]
assert_equal(val_permute, val_origin)
# TODO: test other mode
def test_tensor_3d_from_matrix_vector():
rng = np.random.RandomState(0)
dim1 = rng.randint(10, 20)
dim2 = rng.randint(10, 20)
dim3 = rng.randint(10, 20)
mtx = rng.rand(dim1, dim2)
vector = rng.rand(dim3)
tensor = tensor_3d_from_matrix_vector(mtx, vector)
for i in xrange(dim1):
for j in xrange(dim2):
for k in xrange(dim3):
val_true = mtx[i, j] * vector[k]
val = tensor[i, (dim2 * k) + j]
assert_equal(val_true, val)
def test_tensor_3d_from_vector_matrix():
rng = np.random.RandomState(3)
dim1 = rng.randint(10, 20)
dim2 = rng.randint(10, 20)
dim3 = rng.randint(10, 20)
vector = rng.rand(dim1)
mtx = rng.rand(dim2, dim3)
tensor = tensor_3d_from_vector_matrix(vector, mtx)
for i in xrange(dim1):
for j in xrange(dim2):
for k in xrange(dim3):
val_true = vector[i] * mtx[j, k]
val = tensor[i, (dim2 * k) + j]
| assert_equal(val_true, val) | numpy.testing.assert_equal |
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import collections
from collections import OrderedDict
import json
import logging
import sys
import random
import types
import torch
import torchvision
import numpy as np
import scipy as scp
import scipy.ndimage
import scipy.misc
import skimage
from skimage import transform as tf
# import skimage
# import skimage.transform
import numbers
# import matplotlib.pyplot as plt
from PIL import Image
from torch.utils import data
try:
from fast_equi import extractEquirectangular_quick
from algebra import Algebra
except ImportError:
pass
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
default_conf = {
'dataset': 'sincity_mini',
'train_file': None,
'val_file': None,
'label_encoding': 'dense',
'ignore_label': 0,
'idx_offset': 1,
'num_classes': None,
'down_label': False,
'transform': {
"equi_crop": {
"do_equi": False,
"equi_chance": 1,
"HFoV_range": [0.8, 2.5],
"VFoV_range": [0.8, 2.5],
"wrap": True,
"plane_f": 0.05
},
'presize': 0.5,
'color_augmentation_level': 1,
'fix_shape': True,
'reseize_image': False,
'patch_size': [480, 480],
'random_roll': False,
'random_crop': True,
'max_crop': 8,
'crop_chance': 0.6,
'random_resize': True,
'lower_fac': 0.5,
'upper_fac': 2,
'resize_sig': 0.4,
'random_flip': True,
'random_rotation': False,
'equirectangular': False,
'normalize': False,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]
},
'num_worker': 4
}
DEBUG = False
def get_data_loader(conf=default_conf, split='train',
lst_file=None, batch_size=4,
pin_memory=True, shuffle=True):
dataset = LocalSegmentationLoader(
conf=conf, split=split, lst_file=lst_file)
data_loader = data.DataLoader(dataset, batch_size=batch_size,
shuffle=shuffle,
num_workers=conf['num_worker'],
pin_memory=pin_memory)
return data_loader
class LocalSegmentationLoader(data.Dataset):
"""Face Landmarks dataset."""
def __init__(self, conf=default_conf, split="train", lst_file=None):
"""
Args:
conf (dict): Dict containing configuration parameters
split (string): Directory with all the images.
"""
self.conf = conf
self.split = split
self.select_dataset(conf)
if lst_file is None:
if split == "train":
self.lst_file = conf['train_file']
elif split == "val":
self.lst_file = conf['val_file']
else:
raise NotImplementedError
else:
self.lst_file = lst_file
if self.conf['mask_file'] is not None:
data_base_path = os.path.dirname(__file__)
data_file = os.path.join(data_base_path,
self.conf['mask_file'])
self.mask_table = json.load(open(data_file))
else:
self.mask_table = None
self.root_dir = os.environ['TV_DIR_DATA']
self.img_list = self._read_lst_file()
self.num_classes = conf['num_classes']
assert self.conf['label_encoding'] in ['dense', 'spatial_2d']
if self.conf['label_encoding'] == 'spatial_2d':
assert self.conf['grid_dims'] in [2, 3]
if self.conf['grid_dims'] == 2:
self.root_classes = int(np.ceil( | np.sqrt(self.num_classes) | numpy.sqrt |
import logging
import numpy as np
from matplotlib.patches import Ellipse, FancyArrow
from mot.common.state import Gaussian
logging.getLogger("matplotlib").setLevel(logging.WARNING)
class BasicPlotter:
@staticmethod
def plot_point(
ax,
x,
y,
label=None,
marker="o",
color="b",
marker_size=50,
):
scatter = ax.scatter(x, y, marker=marker, color=color, label=label, s=marker_size, edgecolors="k")
return scatter
@staticmethod
def plot_covariance_ellipse(ax, mean, covariance, color="b"):
assert mean.shape == (2,), f"mean has {mean.shape} shape"
covariance = covariance[:2, :2]
assert covariance.shape == (2, 2), f"covariance has {covariance.shape} shape"
lambda_, v = np.linalg.eig(covariance)
lambda_ = | np.sqrt(lambda_) | numpy.sqrt |
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
class NoiseRemover:
def __init__(self, threshold: int = 0.6):
self.threshold = threshold
def set_threshold(self: None, threshold_value: int) -> None:
self.threshold = threshold_value
def detect_noise(self: None,
X: pd.DataFrame,
y: pd.Series,
method: str = 'rf') -> np.ndarray:
if method == 'rf':
rf = RandomForestClassifier(oob_score=True).fit(X, y)
noise_prob = 1 - rf.oob_decision_function_[range(len(y)), y]
return np.argwhere(noise_prob > self.threshold).reshape(-1)
elif method == 'knn':
knn = KNeighborsClassifier().fit(X, y)
noise_prob = 1 - knn.predict_proba(X)[range(len(X)), y]
return np.argwhere(noise_prob > self.threshold).reshape(-1)
else:
raise AttributeError('Method not recognized. Choose \{rf, knn\}')
def labelfilter_noisy_indexes(noisy_indexes: np.ndarray,
data: pd.DataFrame,
label: str) -> np.ndarray:
noisy_data = data.loc[noisy_indexes]
return noisy_data.loc[noisy_data.label == label, :].index
# --- Main Function --- #
def remove_noise(self: None,
X: pd.DataFrame,
y: pd.Series) -> pd.DataFrame:
unique_labels = y.unique()
n_labels = len(unique_labels)
data = X.copy()
data['label'] = y
if n_labels == 2:
key_label = unique_labels[0]
new_y = y.apply(lambda x: 1 if x == key_label else 0)
noisy_indexes = self.detect_noise(
data.drop('label', axis=1), new_y)
clean_noisy_indexes = self.labelfilter_noisy_indexes(
noisy_indexes, data, key_label)
return data.loc[data.index.difference(clean_noisy_indexes)]
else:
all_noisy_indexes = | np.array([]) | numpy.array |
import numpy as np
import pyhdf.SD as h4
import h5py as h5
def rdh5(h5_filename):
x = np.array([])
y = np.array([])
z = np.array([])
f = np.array([])
h5file = h5.File(h5_filename, 'r')
f = h5file['Data']
dims = f.shape
ndims = np.ndim(f)
#Get the scales if they exist:
for i in range(0,ndims):
if i == 0:
if (len(h5file['Data'].dims[0].keys())!=0):
x = h5file['Data'].dims[0][0]
elif i == 1:
if (len(h5file['Data'].dims[1].keys())!=0):
y = h5file['Data'].dims[1][0]
elif i == 2:
if (len(h5file['Data'].dims[2].keys())!=0):
z = h5file['Data'].dims[2][0]
x = np.array(x)
y = np.array(y)
z = np.array(z)
f = np.array(f)
h5file.close()
return (x,y,z,f)
def rdhdf(hdf_filename):
if (hdf_filename.endswith('h5')):
x,y,z,f = rdh5(hdf_filename)
return (x,y,z,f)
x = np.array([])
y = np.array([])
z = np.array([])
f = np.array([])
# Open the HDF file
sd_id = h4.SD(hdf_filename)
#Read dataset. In all PSI hdf4 files, the
#data is stored in "Data-Set-2":
sds_id = sd_id.select('Data-Set-2')
f = sds_id.get()
#Get number of dimensions:
ndims = np.ndim(f)
# Get the scales. Check if theys exist by looking at the 3rd
# element of dim.info(). 0 = none, 5 = float32, 6 = float64.
# see http://pysclint.sourceforge.net/pyhdf/pyhdf.SD.html#SD
# and http://pysclint.sourceforge.net/pyhdf/pyhdf.SD.html#SDC
for i in range(0,ndims):
dim = sds_id.dim(i)
if dim.info()[2] != 0:
if i == 0:
x = dim.getscale()
elif i == 1:
y = dim.getscale()
elif i == 2:
z = dim.getscale()
sd_id.end()
x = np.array(x)
y = np.array(y)
z = np.array(z)
f = np.array(f)
return (x,y,z,f)
def rdhdf_1d(hdf_filename):
x,y,z,f = rdhdf(hdf_filename)
return (x,f)
def rdhdf_2d(hdf_filename):
x,y,z,f = rdhdf(hdf_filename)
if (hdf_filename.endswith('h5')):
return(x,y,f)
return (y,x,f)
def rdhdf_3d(hdf_filename):
x,y,z,f = rdhdf(hdf_filename)
if (hdf_filename.endswith('h5')):
return(x,y,z,f)
return (z,y,x,f)
def wrh5(h5_filename, x, y, z, f):
h5file = h5.File(h5_filename, 'w')
# Create the dataset (Data is the name used by the psi data)).
h5file.create_dataset("Data", data=f)
# Make sure the scales are desired by checking x type, which can
# be None or None converted by np.asarray (have to trap seperately)
if x is None:
x = np.array([], dtype=f.dtype)
y = np.array([], dtype=f.dtype)
z = np.array([], dtype=f.dtype)
if x.any() == None:
x = np.array([], dtype=f.dtype)
y = np.array([], dtype=f.dtype)
z = np.array([], dtype=f.dtype)
# Make sure scales are the same precision as data.
x=x.astype(f.dtype)
y=y.astype(f.dtype)
z=z.astype(f.dtype)
#Get number of dimensions:
ndims = np.ndim(f)
#Set the scales:
for i in range(0,ndims):
if i == 0 and len(x) != 0:
dim = h5file.create_dataset("dim1", data=x)
h5file['Data'].dims.create_scale(dim,'dim1')
h5file['Data'].dims[0].attach_scale(dim)
h5file['Data'].dims[0].label = 'dim1'
if i == 1 and len(y) != 0:
dim = h5file.create_dataset("dim2", data=y)
h5file['Data'].dims.create_scale(dim,'dim2')
h5file['Data'].dims[1].attach_scale(dim)
h5file['Data'].dims[1].label = 'dim2'
elif i == 2 and len(z) != 0:
dim = h5file.create_dataset("dim3", data=z)
h5file['Data'].dims.create_scale(dim,'dim3')
h5file['Data'].dims[2].attach_scale(dim)
h5file['Data'].dims[2].label = 'dim3'
# Close the file:
h5file.close()
def wrhdf(hdf_filename, x, y, z, f):
if (hdf_filename.endswith('h5')):
wrh5(hdf_filename, x, y, z, f)
return
# Create an HDF file
sd_id = h4.SD(hdf_filename, h4.SDC.WRITE | h4.SDC.CREATE | h4.SDC.TRUNC)
if f.dtype == np.float32:
ftype = h4.SDC.FLOAT32
elif f.dtype == np.float64:
ftype = h4.SDC.FLOAT64
# Create the dataset (Data-Set-2 is the name used by the psi data)).
sds_id = sd_id.create("Data-Set-2", ftype, f.shape)
#Get number of dimensions:
ndims = np.ndim(f)
# Make sure the scales are desired by checking x type, which can
# be None or None converted by np.asarray (have to trap seperately)
if x is None:
x = np.array([], dtype=f.dtype)
y = np.array([], dtype=f.dtype)
z = np.array([], dtype=f.dtype)
if x.any() == None:
x = np.array([], dtype=f.dtype)
y = np.array([], dtype=f.dtype)
z = np.array([], dtype=f.dtype)
#Set the scales (or don't if x is none or length zero)
for i in range(0,ndims):
dim = sds_id.dim(i)
if i == 0 and len(x) != 0:
if x.dtype == np.float32:
stype = h4.SDC.FLOAT32
elif x.dtype == np.float64:
stype = h4.SDC.FLOAT64
dim.setscale(stype,x)
elif i == 1 and len(y) != 0:
if y.dtype == np.float32:
stype = h4.SDC.FLOAT32
elif y.dtype == np.float64:
stype = h4.SDC.FLOAT64
dim.setscale(stype,y)
elif i == 2 and len(z) != 0:
if z.dtype == np.float32:
stype = h4.SDC.FLOAT32
elif z.dtype == np.float64:
stype = h4.SDC.FLOAT64
dim.setscale(stype,z)
# Write the data:
sds_id.set(f)
# Close the dataset:
sds_id.endaccess()
# Flush and close the HDF file:
sd_id.end()
def wrhdf_1d(hdf_filename,x,f):
x = np.asarray(x)
y = np.array([])
z = np.array([])
f = np.asarray(f)
wrhdf(hdf_filename,x,y,z,f)
def wrhdf_2d(hdf_filename,x,y,f):
x = np.asarray(x)
y = np.asarray(y)
z = np.array([])
f = np.asarray(f)
if (hdf_filename.endswith('h5')):
wrhdf(hdf_filename,x,y,z,f)
return
wrhdf(hdf_filename,y,x,z,f)
def wrhdf_3d(hdf_filename,x,y,z,f):
x = np.asarray(x)
y = np.asarray(y)
z = | np.asarray(z) | numpy.asarray |
"""Provides certain utilities for the streamlit demo."""
from abc import ABC, abstractmethod
from itertools import product, repeat
from typing import List, Iterable, Tuple, Optional
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
from streamlit.delta_generator import DeltaGenerator
from tnmf.TransformInvariantNMF import TransformInvariantNMF, MiniBatchAlgorithm
from tnmf.utils.signals import generate_pulse_train, generate_block_image
HELP_CHANNEL = \
'''The **number of channels** each input signal comprises. In contrast to the remaining signal dimensions, which are
treated shift-invariant (meaning that atom placement is flexible), channels represent the inflexible part of the
factorization in the sense that each atom always covers all channels.'''
def explanation(text: str, verbose: bool):
if verbose:
st.sidebar.caption(text)
def st_define_nmf_params(default_params: dict, have_ground_truth: bool = True, verbose: bool = True) -> dict:
"""
Defines all necessary NMF parameters via streamlit widgets.
Parameters
----------
default_params : dict
Contains the default parameters that are used if the created streamlit checkbox is True.
have_ground_truth : bool
If True, the parameters in default_params are considered being ground truth values and
the respective explanatory texts are modified accordingly.
verbose : bool
If True, show detailed information.
Returns
-------
nmf_params : dict
A dictionary containing the selected NMF parameters.
"""
st.sidebar.markdown('# TNMF settings')
help_n_atoms = 'The **number of atoms**.'
if have_ground_truth:
# decide if ground truth atom number shall be used
help_n_atoms += ' To use the ground truth dictionary size, tick the checkbox above.'
help_use_n_atoms = \
'''If selected, the **number of atoms** used by the model is set to the actual number of atoms used
for signal generation.'''
use_n_atoms = st.sidebar.checkbox('Use ground truth number of atoms', True, help=help_use_n_atoms)
explanation(help_use_n_atoms, verbose)
else:
use_n_atoms = False
n_atoms_default = default_params['n_atoms']
n_atoms = st.sidebar.number_input('# Atoms', value=n_atoms_default, min_value=1,
help=help_n_atoms) if not use_n_atoms else n_atoms_default
explanation(help_n_atoms, verbose and not use_n_atoms)
help_atom_shape = 'The **size of each atom** dimension.'
if have_ground_truth:
# decide if ground truth atom shape shall be used
help_atom_shape += ' To use the ground truth atom size, tick the checkbox above.'
help_use_atom_shape = \
'''If selected, the **size of the atoms** used by the model is set the actual size of the atoms used
for signal generation.'''
use_atom_shape = st.sidebar.checkbox('Use ground truth atom size', True, help=help_use_atom_shape)
explanation(help_use_atom_shape, verbose)
else:
use_atom_shape = False
default_atom_shape = default_params['atom_shape']
atom_shape = tuple([st.sidebar.number_input('Atom size',
value=default_atom_shape[0], min_value=1,
help=help_atom_shape)] * len(default_params['atom_shape'])
) if not use_atom_shape else default_atom_shape
explanation(help_atom_shape, verbose and not use_atom_shape)
help_sparsity_H = 'The strength of the **L1 activation sparsity regularization** imposed on the optimization problem.'
sparsity_H = st.sidebar.number_input('Activation sparsity', min_value=0.0, value=0.0, step=0.01,
help=help_sparsity_H)
explanation(help_sparsity_H, verbose)
help_inhibition_strength = \
'''The strength of the **same-atom lateral activation sparsity regularization** imposed on the optimization problem.
The parameter controls how strong the activation of an atom at a particular shift location suppresses the activation
of *the same atom* at neighboring locations.'''
inhibition_strength = st.sidebar.number_input('Lateral activation inhibition (same atom)',
min_value=0.0, value=0.1, step=0.01,
help=help_inhibition_strength)
explanation(help_inhibition_strength, verbose)
help_cross_atom_inhibition_strength = \
'''The strength of the **cross-atom lateral activation sparsity regularization** imposed on the optimization problem.
The parameter controls how strong the activation of an atom at a particular shift location suppresses the activation
of *all other atoms* at the same and neighboring locations.'''
cross_atom_inhibition_strength = st.sidebar.number_input('Lateral activation inhibition (cross-atom)',
min_value=0.0, value=0.1, step=0.01,
help=help_cross_atom_inhibition_strength)
explanation(help_cross_atom_inhibition_strength, verbose)
help_minibatch = \
'''Process the samples in **minibatches** instead of the full data set at once.'''
minibatch_updates = st.sidebar.checkbox('Minibatch updates', value=True, help=help_minibatch)
explanation(help_minibatch, verbose)
if not minibatch_updates:
help_n_iterations = '''The **number of multiplicative updates** to the atom dictionary and activation tensors.'''
n_iterations = st.sidebar.number_input('# Iterations', value=100, min_value=1, help=help_n_iterations)
explanation(help_n_iterations, verbose)
else:
help_algorithm = '''The **minibatch update algorithm** to be used.'''
algorithm = st.sidebar.radio('Minibatch algorithm', [
'4 - Cyclic MiniBatch for MU rules',
'5 - Asymmetric SG MiniBatch MU rules (ASG-MU)',
'6 - Greedy SG MiniBatch MU rules (GSG-MU)',
'7 - Asymmetric SAG MiniBatch MU rules (ASAG-MU)',
'8 - Greedy SAG MiniBatch MU rules (GSAG-MU)'],
1, help=help_algorithm)
algorithm = MiniBatchAlgorithm(int(algorithm[0]))
explanation(help_algorithm, verbose)
help_epoch = '''The number of **passes through the whole data set**.'''
n_epochs = st.sidebar.number_input('# Epochs', value=100, min_value=1, help=help_epoch)
explanation(help_epoch, verbose)
help_batch_size = '''The number of **samples per batch**.'''
batch_size = st.sidebar.number_input('# Batch size', value=3, min_value=1, help=help_batch_size)
explanation(help_batch_size, verbose)
sag_lambda = None
if algorithm in (MiniBatchAlgorithm.ASAG_MU, MiniBatchAlgorithm.GSAG_MU):
help_sag_lambda = \
'''The **exponential forgetting factor** for for the stochastic **average** gradient updates. A value of 1.0
means that only the latest minibatch is used for the update. The smaller the value, the more weight is put on
older minibatches.'''
sag_lambda = st.sidebar.number_input('Lambda', min_value=0.0, max_value=1., value=0.2, step=0.01,
help=help_sag_lambda)
explanation(help_sag_lambda, verbose)
help_backend = \
'''The **optimization backend** for computing the multiplicative gradients.
**Note:** All backends yield the same results. Within the scope of this demo, switching between backends is thus
for speed comparisons only.'''
backend = st.sidebar.selectbox('Backend', ['numpy', 'numpy_fft', 'numpy_caching_fft', 'pytorch', 'pytorch_fft'], 4,
help=help_backend)
explanation(help_backend, verbose)
help_reconstruction_mode = \
'''Defines the **convolution mode** for the signal reconstruction.\
**valid:** The activation tensor
is smaller than the input by the atom size along the shift dimensions, so that the convolution of atoms and
activations matches the size of the input.\
**full:** The activation tensor is larger than the input by the atom size along the shift dimensions. Compared to the
'valid' reconstruction mode, this also creates shifted versions of each atom that only partially overlap with the input
array. The convolution result is trimmed to the appropriate size.\
**circular:** The activation tensor is of the same
size as the input. Other than in 'full' mode, parts of the convolution result that are outside the range of the
input array are inserted circularly on the respective other side of the array.'''
reconstruction_mode = st.sidebar.selectbox('Reconstruction', ['valid', 'full', 'circular'], 2,
help=help_reconstruction_mode)
explanation(help_reconstruction_mode, verbose)
nmf_params = dict(
n_atoms=n_atoms,
atom_shape=atom_shape,
backend=backend,
reconstruction_mode=reconstruction_mode,
)
if not minibatch_updates:
fit_params = dict(
n_iterations=n_iterations,
sparsity_H=sparsity_H,
inhibition_strength=inhibition_strength,
cross_atom_inhibition_strength=cross_atom_inhibition_strength,
)
else:
fit_params = dict(
algorithm=algorithm,
n_epochs=n_epochs,
batch_size=batch_size,
sag_lambda=sag_lambda,
sparsity_H=sparsity_H,
inhibition_strength=inhibition_strength,
cross_atom_inhibition_strength=cross_atom_inhibition_strength,
)
return nmf_params, fit_params
class SignalTool(ABC):
"""An abstract base class that serves as a factory for creating specialized objects that facilitate the handling of
different signal types."""
def __new__(cls, n_dims: int):
"""
Parameters
----------
n_dims : int
The dimensionality of the signals to be managed.
* n_dims=1 for time series.
* n_dims=2 for image data.
"""
if n_dims == 1:
return super(SignalTool, cls).__new__(SignalTool1D)
if n_dims == 2:
return super(SignalTool, cls).__new__(SignalTool2D)
raise ValueError("'n_dims' must be in {1, 2}")
@classmethod
def st_generate_input(cls, verbose: bool = True) -> Tuple[np.ndarray, dict]:
"""
Defines all signal parameters via streamlit widgets and returns a generated input matrix V for the NMF together
with a dictionary containing details of the used NMF atoms.
Parameters
----------
verbose : bool
If True, show detailed information.
Returns
-------
V : np.ndarray
The generated input for the NMF.
nmf_params : dict
Ground truth NMF atom parameters that were used for the signal generation.
"""
st.sidebar.markdown('# Signal settings')
# define the number input signals
help_n_signals = \
'''The **number of generated signals** passed as input to the algorithm.
All signals have the same shape and number of channels.'''
n_signals = st.sidebar.number_input('# Signals', min_value=1, value=10, help=help_n_signals)
explanation(help_n_signals, verbose)
signal_params = cls.st_define_signal_params(verbose=verbose)
# create the input
V = []
for _ in range(n_signals):
signal, W = cls.generate_signal(signal_params)
V.append(signal)
V = | np.stack(V) | numpy.stack |
import numpy as np
def cube_gradient(cube, idx_min, steps):
grad = np.array( | np.gradient(cube, *steps) | numpy.gradient |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 12:10:11 2019
@author: Omer
"""
## File handler
## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf
## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it.
## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format.
import numpy as np
from scipy.linalg import circulant
#import matplotlib.pyplot as plt
import scipy.io
import common
import hashlib
import os
projectDir = os.environ.get('LDPC')
if projectDir == None:
import pathlib
projectDir = pathlib.Path(__file__).parent.absolute()
## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything.
import sys
sys.path.insert(1, projectDir)
FILE_HANDLER_INT_DATA_TYPE = np.int32
GENERAL_CODE_MATRIX_DATA_TYPE = np.int32
NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
def nibbleToHex(inputArray):
n = NIBBLE_CONVERTER.dot(inputArray)
if n == 10:
h = 'A'
elif n== 11:
h = 'B'
elif n== 12:
h = 'C'
elif n== 13:
h = 'D'
elif n== 14:
h = 'E'
elif n== 15:
h = 'F'
else:
h = str(n)
return h
def binaryArraytoHex(inputArray):
d1 = len(inputArray)
assert (d1 % 4 == 0)
outputArray = np.zeros(d1//4, dtype = str)
outputString = ''
for j in range(d1//4):
nibble = inputArray[4 * j : 4 * j + 4]
h = nibbleToHex(nibble)
outputArray[j] = h
outputString = outputString + h
return outputArray, outputString
def hexStringToBinaryArray(hexString):
outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
for i in hexString:
if i == '0':
nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '1':
nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '2':
nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '3':
nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '4':
nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '5':
nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '6':
nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '7':
nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '8':
nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '9':
nibble = np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'A':
nibble = np.array([1,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'B':
nibble = np.array([1,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'C':
nibble = np.array([1,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'D':
nibble = np.array([1,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'E':
nibble = np.array([1,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'F':
nibble = | np.array([1,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) | numpy.array |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the Masci-tools package. #
# (Material science tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/masci-tools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
"""
Here are general and special bokeh plots to use
"""
from .bokeh_plotter import BokehPlotter
from .parameters import ensure_plotter_consistency, NestedPlotParameters
from .data import process_data_arguments
import pandas as pd
import numpy as np
import warnings
from pprint import pprint
################## Helpers ################
plot_params = BokehPlotter()
def set_bokeh_plot_defaults(**kwargs):
"""
Set defaults for bokeh backend
according to the given keyword arguments
Available defaults can be seen in :py:class:`~masci_tools.vis.bokeh_plotter.BokehPlotter`
"""
plot_params.set_defaults(**kwargs)
def reset_bokeh_plot_defaults():
"""
Reset the defaults for bokeh backend
to the hardcoded defaults
Available defaults can be seen in :py:class:`~masci_tools.vis.bokeh_plotter.BokehPlotter`
"""
plot_params.reset_defaults()
def show_bokeh_plot_defaults():
"""
Show the currently set defaults for bokeh backend
Available defaults can be seen in :py:class:`~masci_tools.vis.bokeh_plotter.BokehPlotter`
"""
pprint(plot_params.get_dict())
def get_bokeh_help(key):
"""
Print the decription of the given key in the bokeh backend
Available defaults can be seen in :py:class:`~masci_tools.vis.bokeh_plotter.BokehPlotter`
"""
plot_params.get_description(key)
def load_bokeh_defaults(filename='plot_bokeh_defaults.json'):
"""
Load defaults for the bokeh backend from a json file.
:param filename: filename,from where the defaults should be taken
"""
plot_params.load_defaults(filename)
def save_bokeh_defaults(filename='plot_bokeh_defaults.json', save_complete=False):
"""
Save the current defaults for the matplotlib backend to a json file.
:param filename: filename, where the defaults should be stored
:param save_complete: bool if True not only the overwritten user defaults
but also the unmodified harcoded defaults are stored
"""
plot_params.save_defaults(filename, save_complete=save_complete)
##################################### general plots ##########################
@ensure_plotter_consistency(plot_params)
def bokeh_scatter(x,
y=None,
*,
xlabel='x',
ylabel='y',
title='',
figure=None,
data=None,
saveas='scatter',
copy_data=False,
**kwargs):
"""
Create an interactive scatter plot with bokeh
:param x: arraylike or key for data for the x-axis
:param y: arraylike or key for data for the y-axis
:param data: source for the data of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis
:param ylabel: label for the y-axis
:param title: title of the figure
:param figure: bokeh figure (optional), if provided the plot will be added to this figure
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:class:`masci_tools.vis.bokeh_plotter.BokehPlotter`.
If the arguments are not recognized they are passed on to the bokeh function `scatter`
"""
from bokeh.models import ColumnDataSource
if isinstance(x, (dict, pd.DataFrame, ColumnDataSource)) or x is None:
warnings.warn(
'Passing the source as first argument is deprecated. Please pass in source by the keyword data'
'and xdata and ydata as the first arguments', DeprecationWarning)
data = x
x = kwargs.pop('xdata', 'x')
y = kwargs.pop('ydata', 'y')
plot_data = process_data_arguments(data=data,
x=x,
y=y,
copy_data=copy_data,
single_plot=True,
same_length=True,
use_column_source=True)
entry, source = plot_data.items(first=True)
plot_params.set_defaults(default_type='function', name=entry.y)
kwargs = plot_params.set_parameters(continue_on_error=True, **kwargs)
p = plot_params.prepare_figure(title, xlabel, ylabel, figure=figure)
plot_kwargs = plot_params.plot_kwargs(plot_type='scatter')
res = p.scatter(x=entry.x, y=entry.y, source=source, **plot_kwargs, **kwargs)
plot_params.add_tooltips(p, res, entry)
if plot_params['level'] is not None:
res.level = plot_params['level']
plot_params.draw_straight_lines(p)
plot_params.set_limits(p)
plot_params.save_plot(p, saveas)
return p
@ensure_plotter_consistency(plot_params)
def bokeh_multi_scatter(x,
y=None,
*,
data=None,
figure=None,
xlabel='x',
ylabel='y',
title='',
saveas='scatter',
copy_data=False,
set_default_legend=True,
**kwargs):
"""
Create an interactive scatter (muliple data sets possible) plot with bokeh
:param x: arraylike or key for data for the x-axis
:param y: arraylike or key for data for the y-axis
:param data: source for the data of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis
:param ylabel: label for the y-axis
:param title: title of the figure
:param figure: bokeh figure (optional), if provided the plot will be added to this figure
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
:param set_default_legend: bool if True the data names are used to generate default legend labels
Kwargs will be passed on to :py:class:`masci_tools.vis.bokeh_plotter.BokehPlotter`.
If the arguments are not recognized they are passed on to the bokeh function `scatter`
"""
from bokeh.models import ColumnDataSource
if isinstance(x, (dict, pd.DataFrame, ColumnDataSource)) or x is None:
warnings.warn(
'Passing the source as first argument is deprecated. Please pass in source by the keyword data'
'and xdata and ydata as the first arguments', DeprecationWarning)
data = x
x = kwargs.pop('xdata', 'x')
y = kwargs.pop('ydata', 'y')
plot_data = process_data_arguments(data=data,
x=x,
y=y,
same_length=True,
copy_data=copy_data,
use_column_source=True)
plot_params.single_plot = False
plot_params.num_plots = len(plot_data)
if plot_data.distinct_datasets('x') == 1:
default_legend_label = plot_data.get_keys('y')
else:
default_legend_label = plot_data.get_keys('x')
if set_default_legend:
plot_params.set_defaults(default_type='function', legend_label=default_legend_label)
plot_params.set_defaults(default_type='function', name=default_legend_label)
kwargs = plot_params.set_parameters(continue_on_error=True, **kwargs)
p = plot_params.prepare_figure(title, xlabel, ylabel, figure=figure)
#Process the given color arguments
plot_params.set_color_palette_by_num_plots()
plot_kwargs = plot_params.plot_kwargs(plot_type='scatter')
for indx, ((entry, source), plot_kw) in enumerate(zip(plot_data.items(), plot_kwargs)):
res = p.scatter(x=entry.x, y=entry.y, source=source, **plot_kw, **kwargs)
plot_params.add_tooltips(p, res, entry)
if plot_params[('level', indx)] is not None:
res.level = plot_params[('level', indx)]
plot_params.draw_straight_lines(p)
plot_params.set_limits(p)
plot_params.set_legend(p)
plot_params.save_plot(p, saveas)
return p
@ensure_plotter_consistency(plot_params)
def bokeh_line(x,
y=None,
*,
data=None,
figure=None,
xlabel='x',
ylabel='y',
title='',
saveas='line',
plot_points=False,
area_curve=0,
copy_data=False,
set_default_legend=True,
**kwargs):
"""
Create an interactive multi-line plot with bokeh
:param x: arraylike or key for data for the x-axis
:param y: arraylike or key for data for the y-axis
:param data: source for the data of the plot (optional) (pandas Dataframe for example)
:param xlabel: label for the x-axis
:param ylabel: label for the y-axis
:param title: title of the figure
:param figure: bokeh figure (optional), if provided the plot will be added to this figure
:param outfilename: filename of the output file
:param plot_points: bool, if True also plot the points with a scatterplot on top
:param copy_data: bool, if True the data argument will be copied
:param set_default_legend: bool if True the data names are used to generate default legend labels
Kwargs will be passed on to :py:class:`masci_tools.vis.bokeh_plotter.BokehPlotter`.
If the arguments are not recognized they are passed on to the bokeh function `line`
"""
from bokeh.models import ColumnDataSource
if isinstance(x, (dict, pd.DataFrame, ColumnDataSource)) or x is None:
warnings.warn(
'Passing the source as first argument is deprecated. Please pass in source by the keyword data'
'and xdata and ydata as the first arguments', DeprecationWarning)
data = x
x = kwargs.pop('xdata', 'x')
y = kwargs.pop('ydata', 'y')
plot_data = process_data_arguments(data=data,
x=x,
y=y,
shift=area_curve,
same_length=True,
copy_data=copy_data,
use_column_source=True)
plot_params.single_plot = False
plot_params.num_plots = len(plot_data)
if plot_data.distinct_datasets('x') == 1:
default_legend_label = plot_data.get_keys('y')
else:
default_legend_label = plot_data.get_keys('x')
if set_default_legend:
plot_params.set_defaults(default_type='function', legend_label=default_legend_label)
plot_params.set_defaults(default_type='function', name=default_legend_label)
kwargs = plot_params.set_parameters(continue_on_error=True, **kwargs)
p = plot_params.prepare_figure(title, xlabel, ylabel, figure=figure)
#Process the given color arguments
plot_params.set_color_palette_by_num_plots()
plot_kw_line = plot_params.plot_kwargs(plot_type='line')
plot_kw_scatter = plot_params.plot_kwargs(plot_type='scatter')
plot_kw_area = plot_params.plot_kwargs(plot_type='area')
area_curve = kwargs.pop('area_curve', None)
for indx, ((entry, source), kw_line, kw_scatter,
kw_area) in enumerate(zip(plot_data.items(), plot_kw_line, plot_kw_scatter, plot_kw_area)):
if plot_params[('area_plot', indx)]:
if plot_params[('area_vertical', indx)]:
p.harea(y=entry.y, x1=entry.x, x2=entry.shift, **kw_area, source=source)
else:
p.varea(x=entry.x, y1=entry.y, y2=entry.shift, **kw_area, source=source)
res = p.line(x=entry.x, y=entry.y, source=source, **kw_line, **kwargs)
plot_params.add_tooltips(p, res, entry)
res2 = None
if plot_points:
res2 = p.scatter(x=entry.x, y=entry.y, source=source, **kw_scatter)
if plot_params[('level', indx)] is not None:
res.level = plot_params[('level', indx)]
if res2 is not None:
res2.level = plot_params[('level', indx)]
plot_params.draw_straight_lines(p)
plot_params.set_limits(p)
plot_params.set_legend(p)
plot_params.save_plot(p, saveas)
return p
@ensure_plotter_consistency(plot_params)
def bokeh_dos(energy_grid,
dos_data=None,
*,
data=None,
energy_label=r'E-E_F [eV]',
dos_label=r'DOS [1/eV]',
title=r'Density of states',
xyswitch=False,
e_fermi=0,
saveas='dos_plot',
copy_data=False,
**kwargs):
"""
Create an interactive dos plot (non-spinpolarized) with bokeh
Both horizontal or vertical orientation are possible
:param energy_grid: arraylike or key data for the energy grid
:param spin_up_data: arraylike or key data for the DOS
:param data: source for the DOS data (optional) of the plot (pandas Dataframe for example)
:param energy_label: label for the energy-axis
:param dos_label: label for the dos-axis
:param title: title of the figure
:param xyswitch: bool if True, the energy will be plotted along the y-direction
:param e_fermi: float, determines, where to put the line for the fermi energy
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:func:`bokeh_line()`
"""
from bokeh.models import ColumnDataSource
if isinstance(energy_grid, (dict, pd.DataFrame, ColumnDataSource)) or energy_grid is None:
warnings.warn(
'Passing the dataframe as first argument is deprecated. Please pass in source by the keyword data'
'and energy_grid and dos_data as the first arguments', DeprecationWarning)
data = energy_grid
energy_grid = kwargs.pop('energy', 'energy_grid')
dos_data = kwargs.pop('ynames', None)
if dos_data is None and data is not None:
dos_data = set(data.keys()) - set([energy_grid] if isinstance(energy_grid, str) else energy_grid)
dos_data = sorted(dos_data)
plot_data = process_data_arguments(data=data,
energy=energy_grid,
dos=dos_data,
same_length=True,
copy_data=copy_data,
use_column_source=True)
plot_params.single_plot = False
plot_params.num_plots = len(plot_data)
if 'limits' in kwargs:
limits = kwargs.pop('limits')
if 'x' not in limits and 'y' not in limits:
if xyswitch:
limits['x'], limits['y'] = limits.pop('dos', None), limits.pop('energy', None)
else:
limits['x'], limits['y'] = limits.pop('energy', None), limits.pop('dos', None)
kwargs['limits'] = {k: v for k, v in limits.items() if v is not None}
lines = {'horizontal': 0}
lines['vertical'] = e_fermi
if xyswitch:
lines['vertical'], lines['horizontal'] = lines['horizontal'], lines['vertical']
plot_params.set_defaults(default_type='function',
straight_lines=lines,
tooltips=[('Name', '$name'), ('Energy', '@{x}{{0.0[00]}}'),
('DOS value', '@$name{{0.00}}')],
figure_kwargs={
'width': 1000,
})
if xyswitch:
x, y = plot_data.get_keys('dos'), plot_data.get_keys('energy')
xlabel, ylabel = dos_label, energy_label
plot_params.set_defaults(default_type='function', area_vertical=True)
else:
xlabel, ylabel = energy_label, dos_label
x, y = plot_data.get_keys('energy'), plot_data.get_keys('dos')
p = bokeh_line(x,
y,
data=plot_data.data,
xlabel=xlabel,
ylabel=ylabel,
title=title,
name=y,
saveas=saveas,
**kwargs)
return p
@ensure_plotter_consistency(plot_params)
def bokeh_spinpol_dos(energy_grid,
spin_up_data=None,
spin_dn_data=None,
*,
data=None,
spin_dn_negative=True,
energy_label=r'E-E_F [eV]',
dos_label=r'DOS [1/eV]',
title=r'Density of states',
xyswitch=False,
e_fermi=0,
spin_arrows=True,
saveas='dos_plot',
copy_data=False,
**kwargs):
"""
Create an interactive dos plot (spinpolarized) with bokeh
Both horizontal or vertical orientation are possible
:param energy_grid: arraylike or key data for the energy grid
:param spin_up_data: arraylike or key data for the DOS spin-up
:param spin_dn_data: arraylike or key data for the DOS spin-dn
:param data: source for the DOS data (optional) of the plot (pandas Dataframe for example)
:param spin_dn_negative: bool, if True (default), the spin down components are plotted downwards
:param energy_label: label for the energy-axis
:param dos_label: label for the dos-axis
:param title: title of the figure
:param xyswitch: bool if True, the energy will be plotted along the y-direction
:param e_fermi: float, determines, where to put the line for the fermi energy
:param spin_arrows: bool, if True (default) small arrows will be plotted on the left side of the plot indicating
the spin directions (if spin_dn_negative is True)
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:func:`bokeh_line()`
"""
from bokeh.models import NumeralTickFormatter, Arrow, NormalHead
from bokeh.models import ColumnDataSource
if isinstance(energy_grid, (dict, pd.DataFrame, ColumnDataSource)) or energy_grid is None:
warnings.warn(
'Passing the dataframe as first argument is deprecated. Please pass in source by the keyword data'
'and energy_grid and dos_data as the first arguments', DeprecationWarning)
data = energy_grid
energy_grid = kwargs.pop('energy', 'energy_grid')
spin_up_data = kwargs.pop('ynames', None)
spin_up_data, spin_dn_data = spin_up_data[:len(spin_up_data) // 2], spin_up_data[len(spin_up_data) // 2:]
if spin_up_data is None and data is not None:
spin_up_data = set(key for key in data.keys() if '_up' in key)
spin_up_data = sorted(spin_up_data)
spin_dn_data = set(key for key in data.keys() if '_dn' in key)
spin_dn_data = sorted(spin_dn_data)
plot_data = process_data_arguments(data=data,
energy=energy_grid,
spin_up=spin_up_data,
spin_dn=spin_dn_data,
same_length=True,
copy_data=copy_data,
use_column_source=True)
plot_params.single_plot = False
plot_params.num_plots = len(plot_data)
if 'limits' in kwargs:
limits = kwargs.pop('limits')
if 'x' not in limits and 'y' not in limits:
if xyswitch:
limits['x'], limits['y'] = limits.pop('dos', None), limits.pop('energy', None)
else:
limits['x'], limits['y'] = limits.pop('energy', None), limits.pop('dos', None)
kwargs['limits'] = {k: v for k, v in limits.items() if v is not None}
lines = {'horizontal': 0}
lines['vertical'] = e_fermi
if spin_dn_negative:
plot_data.apply('spin_dn', lambda x: -x)
if xyswitch:
lines['vertical'], lines['horizontal'] = lines['horizontal'], lines['vertical']
plot_params.set_defaults(default_type='function',
straight_lines=lines,
tooltips=[('DOS Name', '$name'), ('Energy', '@{x}{{0.0[00]}}'),
('Value', '@$name{{(0,0.00)}}')],
figure_kwargs={'width': 1000})
#Create the full data for the scatterplot
energy_entries = plot_data.get_keys('energy') * 2
dos_entries = plot_data.get_keys('spin_up') + plot_data.get_keys('spin_dn')
sources = plot_data.data
if isinstance(sources, list):
sources = sources * 2
if xyswitch:
x, y = dos_entries, energy_entries
xlabel, ylabel = dos_label, energy_label
plot_params.set_defaults(default_type='function',
area_vertical=True,
x_axis_formatter=NumeralTickFormatter(format='(0,0)'))
else:
xlabel, ylabel = energy_label, dos_label
x, y = energy_entries, dos_entries
plot_params.set_defaults(default_type='function',
area_vertical=True,
y_axis_formatter=NumeralTickFormatter(format='(0,0)'))
plot_params.set_parameters(color=kwargs.pop('color', None), color_palette=kwargs.pop('color_palette', None))
plot_params.set_color_palette_by_num_plots()
#Double the colors for spin up and down
kwargs['color'] = list(plot_params['color'].copy())
kwargs['color'].extend(kwargs['color'])
if 'legend_label' not in kwargs:
kwargs['legend_label'] = dos_entries
else:
if isinstance(kwargs['legend_label'], list):
if len(kwargs['legend_label']) == len(plot_data):
kwargs['legend_label'].extend(kwargs['legend_label'])
if 'show' in kwargs:
plot_params.set_parameters(show=kwargs.pop('show'))
if 'save_plots' in kwargs:
plot_params.set_parameters(show=kwargs.pop('save_plots'))
with NestedPlotParameters(plot_params):
p = bokeh_line(x,
y,
xlabel=xlabel,
ylabel=ylabel,
title=title,
data=sources,
name=dos_entries,
show=False,
save_plots=False,
**kwargs)
if spin_arrows and spin_dn_negative:
#These are hardcoded because the parameters are not
#reused anywhere (for now)
x_pos = 50
length = 70
pad = 30
height = p.plot_height - 100
alpha = 0.5
p.add_layout(
Arrow(x_start=x_pos,
x_end=x_pos,
y_start=height - pad - length,
y_end=height - pad,
start_units='screen',
end_units='screen',
line_width=2,
line_alpha=alpha,
end=NormalHead(line_width=2, size=10, fill_alpha=alpha, line_alpha=alpha)))
p.add_layout(
Arrow(x_start=x_pos,
x_end=x_pos,
y_start=pad + length,
y_end=pad,
start_units='screen',
end_units='screen',
line_width=2,
line_alpha=alpha,
end=NormalHead(line_width=2, size=10, fill_alpha=alpha, line_alpha=alpha)))
plot_params.save_plot(p, saveas)
return p
@ensure_plotter_consistency(plot_params)
def bokeh_bands(kpath,
bands=None,
*,
data=None,
size_data=None,
color_data=None,
xlabel='',
ylabel=r'E-E_F [eV]',
title='',
special_kpoints=None,
markersize_min=3.0,
markersize_scaling=10.0,
saveas='bands_plot',
scale_color=True,
separate_bands=False,
line_plot=False,
band_index=None,
copy_data=False,
**kwargs):
"""
Create an interactive bandstructure plot (non-spinpolarized) with bokeh
Can make a simple plot or weight the size and color of the points against a given weight
:param kpath: arraylike or key data for the kpoint data
:param bands: arraylike or key data for the eigenvalues
:param size_data: arraylike or key data the weights to emphasize (optional)
:param color_data: str or arraylike, data for the color values with a colormap (optional)
:param data: source for the bands data (optional) of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis (default no label)
:param ylabel: label for the y-axis
:param title: title of the figure
:param special_kpoints: list of tuples (str, float), place vertical lines at the given values
and mark them on the x-axis with the given label
:param e_fermi: float, determines, where to put the line for the fermi energy
:param markersize_min: minimum value used in scaling points for weight
:param markersize_scaling: factor used in scaling points for weight
:param outfilename: filename of the output file
:param scale_color: bool, if True (default) the weight will be additionally shown via a colormapping
:param line_plot: bool, if True the bandstructure will be plotted with lines
Here no weights are supported
:param separate_bands: bool, if True the bandstructure will be separately plotted for each band
allows more specific parametrization
:param band_index: data for which eigenvalue belongs to which band (needed for line_plot and separate_bands)
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:func:`bokeh_multi_scatter()` or :py:func:`bokeh_line()`
"""
from bokeh.transform import linear_cmap
from bokeh.models import ColumnDataSource
if 'size_scaling' in kwargs:
warnings.warn('size_scaling is deprecated. Use markersize_scaling instead', DeprecationWarning)
markersize_scaling = kwargs.pop('size_scaling')
if 'size_min' in kwargs:
warnings.warn('size_min is deprecated. Use markersize_min instead', DeprecationWarning)
markersize_min = kwargs.pop('size_min')
if isinstance(kpath, (dict, pd.DataFrame, ColumnDataSource)) or kpath is None:
warnings.warn(
'Passing the dataframe as first argument is deprecated. Please pass in source by the keyword data'
'and kpath and bands as the first arguments', DeprecationWarning)
data = kpath
kpath = kwargs.pop('k_label', 'kpath')
bands = kwargs.pop('eigenvalues', 'eigenvalues_up')
if 'weight' in kwargs:
warnings.warn('The weight argument is deprecated. Use size_data and color_data instead', DeprecationWarning)
size_data = kwargs.pop('weight')
plot_data = process_data_arguments(single_plot=True,
data=data,
kpath=kpath,
bands=bands,
size=size_data,
color=color_data,
band_index=band_index,
copy_data=copy_data,
use_column_source=True)
if line_plot and size_data is not None:
raise ValueError('Bandstructure with lines and size scaling not supported')
if line_plot and color_data is not None:
raise ValueError('Bandstructure with lines and color mapping not supported')
if line_plot or separate_bands:
if band_index is None:
raise ValueError('The data for band indices are needed for separate_bands and line_plot')
plot_data.group_data('band_index')
plot_data.sort_data('kpath')
if scale_color and size_data is not None:
if color_data is not None:
raise ValueError('color_data should not be provided when scale_color is True')
plot_data.copy_data('size', 'color', rename_original=True)
if color_data is not None:
kwargs['color'] = plot_data.get_keys('color')
entries = plot_data.keys(first=True)
if entries.size is not None:
ylimits = (-15, 15)
if 'limits' in kwargs:
if 'y' in kwargs['limits']:
ylimits = kwargs['limits']['y']
data = plot_data.values(first=True)
mask = np.logical_and(data.bands > ylimits[0], data.bands < ylimits[1])
weight_max = plot_data.max('size', mask=mask)
plot_params.set_defaults(default_type='function', marker_size=entries.size)
if scale_color:
plot_params.set_defaults(default_type='function',
color=linear_cmap(entries.color, 'Blues256', weight_max, -0.05))
transform = lambda size: markersize_min + markersize_scaling * size / weight_max
plot_data.apply('size', transform)
else:
plot_params.set_defaults(default_type='function', color='black')
if special_kpoints is None:
special_kpoints = []
xticks = []
xticklabels = {}
for label, pos in special_kpoints:
#if label in ('Gamma', 'g'): Latex label missing for bokeh
# label = r'$\Gamma$'
if pos.is_integer():
xticklabels[int(pos)] = label
xticklabels[pos] = label
xticks.append(pos)
lines = {'horizontal': 0}
lines['vertical'] = xticks
limits = {'y': (-15, 15)}
plot_params.set_defaults(default_type='function',
straight_lines=lines,
x_ticks=xticks,
x_ticklabels_overwrite=xticklabels,
figure_kwargs={
'width': 1280,
'height': 720
},
x_range_padding=0.0,
y_range_padding=0.0,
legend_label='Eigenvalues',
limits=limits)
if line_plot:
return bokeh_line(plot_data.get_keys('kpath'),
plot_data.get_keys('bands'),
data=plot_data.data,
xlabel='',
ylabel=ylabel,
title=title,
set_default_legend=False,
saveas=saveas,
**kwargs)
return bokeh_multi_scatter(plot_data.get_keys('kpath'),
plot_data.get_keys('bands'),
data=plot_data.data,
xlabel='',
ylabel=ylabel,
title=title,
set_default_legend=False,
saveas=saveas,
**kwargs)
@ensure_plotter_consistency(plot_params)
def bokeh_spinpol_bands(kpath,
bands_up=None,
bands_dn=None,
*,
size_data=None,
color_data=None,
data=None,
xlabel='',
ylabel=r'E-E_F [eV]',
title='',
special_kpoints=None,
markersize_min=3.0,
markersize_scaling=10.0,
saveas='bands_plot',
scale_color=True,
line_plot=False,
separate_bands=False,
band_index=None,
copy_data=False,
**kwargs):
"""
Create an interactive bandstructure plot (spinpolarized) with bokeh
Can make a simple plot or weight the size and color of the points against a given weight
:param kpath: arraylike or key data for the kpoint data
:param bands_up: arraylike or key data for the eigenvalues spin-up
:param bands_dn: arraylike or key data for the eigenvalues spin-dn
:param size_data: arraylike or key data the weights to emphasize (optional)
:param color_data: str or arraylike, data for the color values with a colormap (optional)
:param data: source for the bands data (optional) of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis (default no label)
:param ylabel: label for the y-axis
:param title: title of the figure
:param special_kpoints: list of tuples (str, float), place vertical lines at the given values
and mark them on the x-axis with the given label
:param e_fermi: float, determines, where to put the line for the fermi energy
:param markersize_min: minimum value used in scaling points for weight
:param markersize_scaling: factor used in scaling points for weight
:param outfilename: filename of the output file
:param scale_color: bool, if True (default) the weight will be additionally shown via a colormapping
:param line_plot: bool, if True the bandstructure will be plotted with lines
Here no weights are supported
:param separate_bands: bool, if True the bandstructure will be separately plotted for each band
allows more specific parametrization
:param band_index: data for which eigenvalue belongs to which band (needed for line_plot and separate_bands)
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:func:`bokeh_multi_scatter()` or :py:func:`bokeh_line()`
"""
from bokeh.transform import linear_cmap
from bokeh.models import ColumnDataSource
if 'size_scaling' in kwargs:
warnings.warn('size_scaling is deprecated. Use markersize_scaling instead', DeprecationWarning)
markersize_scaling = kwargs.pop('size_scaling')
if 'size_min' in kwargs:
warnings.warn('size_min is deprecated. Use markersize_min instead', DeprecationWarning)
markersize_min = kwargs.pop('size_min')
if isinstance(kpath, (dict, pd.DataFrame, ColumnDataSource)) or kpath is None:
warnings.warn(
'Passing the dataframe as first argument is deprecated. Please pass in source by the keyword data'
'and kpath and bands_up and bands_dn as the first arguments', DeprecationWarning)
data = kpath
kpath = kwargs.pop('k_label', 'kpath')
bands_up = kwargs.pop('eigenvalues', ['eigenvalues_up', 'eigenvalues_down'])
bands_up, bands_dn = bands_up[0], bands_up[1]
if 'weight' in kwargs:
warnings.warn('The weight argument is deprecated. Use size_data and color_data instead', DeprecationWarning)
size_data = kwargs.pop('weight')
plot_data = process_data_arguments(data=data,
kpath=kpath,
bands=[bands_up, bands_dn],
size=size_data,
color=color_data,
band_index=band_index,
copy_data=copy_data,
use_column_source=True)
plot_params.single_plot = False
plot_params.num_plots = len(plot_data)
if len(plot_data) != 2:
raise ValueError('Wrong number of plots specified (Only 2 permitted)')
if line_plot and size_data is not None:
raise ValueError('Bandstructure with lines and size scaling not supported')
if line_plot and color_data is not None:
raise ValueError('Bandstructure with lines and color mapping not supported')
if line_plot or separate_bands:
if band_index is None:
raise ValueError('The data for band indices are needed for separate_bands and line_plot')
plot_data.group_data('band_index')
plot_data.sort_data('kpath')
if scale_color and size_data is not None:
if color_data is not None:
raise ValueError('color_data should not be provided when scale_color is True')
plot_data.copy_data('size', 'color', rename_original=True)
if color_data is not None:
kwargs['color'] = plot_data.get_keys('color')
if any(entry.size is not None for entry in plot_data.keys()):
ylimits = (-15, 15)
if 'limits' in kwargs:
if 'y' in kwargs['limits']:
ylimits = kwargs['limits']['y']
data = plot_data.values()
mask = [ | np.logical_and(col.bands > ylimits[0], col.bands < ylimits[1]) | numpy.logical_and |
import datetime
import numpy as np
import matplotlib.pyplot as plt
from numpy.lib.function_base import append
import sympy as sp
from multiprocessing import Pool
import os
import cppsolver as cs
from tqdm import tqdm
from ..filter import Magnet_UKF, Magnet_KF
from ..solver import Solver, Solver_jac
class Simu_Data:
def __init__(self, gt, snr, result):
self.gt = gt
self.snr = snr
self.result = result
def __len__(self):
return self.gt.shape[0]
def store(self):
np.savez('result/test.npz', gt=self.gt, data=self.result)
class expression:
def __init__(self, mag_count=1):
if mag_count == 1:
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy],
VecB, 'numpy')
elif mag_count == 2:
x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs = sp.symbols(
'x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
x = [x0, x1]
y = [y0, y1]
z = [z0, z1]
M = [M0, M1]
theta = [theta0, theta1]
phy = [phy0, phy1]
VecB = G
for i in range(mag_count):
vecR = sp.Matrix(
[xs - x[i], ys - y[i], zs - z[i]]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0] ** 2 + vecR[1] ** 2 + vecR[2] ** 2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecMi = 1e-7 * sp.exp(M[i]) * sp.Matrix([sp.sin(theta[i]) * sp.cos(
phy[i]), sp.sin(theta[i]) * sp.sin(phy[i]), sp.cos(theta[i])])
VecBi = 3 * vecR * (VecMi.T * vecR) / \
dis ** 5 - VecMi / dis ** 3
VecB += VecBi
VecB = 1e6 * VecB
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x0, y0, z0, M0, theta0, phy0, x1, y1,
z1, M1, theta1, phy1],
VecB, 'numpy')
class Result_Handler:
def __init__(self, simu_data, scale):
self.track_result = []
self.simu_data = simu_data
self.scale = scale
def __add__(self, new):
self.track_result.append(new)
return self
def get_gt_result(self):
a = self.simu_data.gt
b = []
for i in range(len(self.track_result)):
b.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
b = np.stack(b)
return [a, b]
def cal_loss(self):
dist = []
loss = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
point_estimate = np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
])
dist.append(np.linalg.norm(point_gt, 2))
loss.append(np.linalg.norm(point_gt - point_estimate, 2))
dist = 1e2 * np.array(dist)
loss = 1e2 * np.array(loss)
return [self.scale, dist, loss]
def gt_and_route(self):
dist = []
route = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
dist.append(np.linalg.norm(point_gt, 2))
route.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
dist = np.array(dist)
route = np.stack(route, axis=0)
idx = np.argsort(dist)
gt = self.simu_data.gt[idx]
route = route[idx]
return [gt, route]
# plt.plot(dist, loss, label='scale = {}'.format(self.scale))
# plt.legend()
# print('debug')
class Simu_Test:
def __init__(self, start, stop, scales, pSensor=None, resolution=100):
self.scales = scales
self.M = 2.7
self.build_route(start, stop, resolution)
if pSensor is None:
self.build_psensor()
else:
self.pSensor = pSensor
# self.build_expression()
self.params = {
'm': np.log(self.M),
'theta': 0,
'phy': 0,
'gx': 50 / np.sqrt(2) * 1e-6,
'gy': 50 / np.sqrt(2) * 1e-6,
'gz': 0,
}
def build_expression(self):
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy], VecB, 'numpy')
def build_route(self, start, stop, resolution):
# linear route
theta = 90 / 180.0 * np.pi
route = np.linspace(start, stop, resolution)
route = np.stack([route * np.cos(theta), route * np.sin(theta)]).T
route = np.pad(route, ((0, 0), (1, 0)),
mode='constant',
constant_values=0)
self.route = 1e-2 * route
# curvy route
tmp = np.linspace(start, stop, resolution)
route = np.stack([np.sin((tmp-start)/(stop-start) * np.pi * 5),
np.cos((tmp-start)/(stop-start) * np.pi * 5), tmp], axis=0).T
self.route = 1e-2 * route
def build_psensor(self):
self.pSensor = 1e-2 * np.array([
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
])
def simulate_process(self, scale):
print(scale)
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.cal_loss()
def gt_and_result(self):
pSensori = 1 * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.get_gt_result()
def compare_noise_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
if choice == 1:
simu = self.estimate_B(pSensori)
elif choice == 0:
simu = self.estimate_B_even_noise(pSensori)
elif choice == 2:
simu = self.estimate_B_singular_noise(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def compare_3_noise(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['Even Noise', 'Raw Noise', 'Single Noise']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/butterfly.jpg', dpi=900)
def compare_noise_type(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['ALL Noise', 'Only Noise', 'Only Precision']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/compare_noise_type.jpg', dpi=900)
def compare_noise_type_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori, choice)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def simulate(self, loop=1):
results = []
pool = Pool()
for scale in self.scales:
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.simulate_process(scale)
results.append(
pool.apply_async(self.simulate_process, args=(scale, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label='scale = {} cm'.format(int(key) * 2))
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
plt.savefig('result/compare_scale/{}.jpg'.format(name), dpi=900)
def simu_readings(self, pSensor):
simu = self.estimate_B(pSensor, noise_type=3)
simu.store()
def simu_gt_and_result(self, pSensor, route, path, name):
pSensori = pSensor
simu = self.estimate_B(pSensori, route=route)
# simu.store()
# params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
# self.M), 1e-2 * route[0, 0], 1e-2 * (route[0, 1]), 1e-2 * (route[0,
# 2]), 0, 0])
model = Solver_jac(1, route[0, 0], route[0, 1], route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
gt_ang = []
rec_ang = []
results = Result_Handler(simu, 1)
for i in tqdm(range(simu.result.shape[0])):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
gt_ang.append(np.array([0, 0, 1]))
t1 = result['theta0'].value
t2 = result['phy0'].value
rec_ang.append(
np.array(
[np.sin(t1) * np.cos(t2),
np.sin(t1) * np.sin(t2),
np.cos(t1)]))
[gt, route] = results.gt_and_route()
gt_ang = np.stack(gt_ang)
rec_ang = np.stack(rec_ang)
if not os.path.exists(path):
os.makedirs(path)
np.savez(os.path.join(path, name), gt=gt * 1e2, result=route *
1e2, gt_ang=gt_ang, result_ang=rec_ang)
def compare_layout_thread(self, index, pSensori):
overall_noise = np.random.randn(3)
simu = self.estimate_B(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [index, dist, loss]
def compare_layouts(self, pSensors, loop=1):
results = []
pool = Pool()
for index, pSensor in enumerate(pSensors):
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.calculate_process(scale)
# self.compare_layout_thread(index, pSensor)
results.append(
pool.apply_async(self.compare_layout_thread,
args=(index, pSensor)))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
# msg = ['Plane Layout(MIT)', 'Our Current Layout', 'Cube Layout']
msg = ['Best Layout', 'Current Layout']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# plt.savefig('result/compare_layout/{}.jpg'.format(name), dpi=900)
plt.show()
def estimate_B(
self,
pSensor,
route=None,
noise_type=0,
overall_noise=None):
# noise type: 0: noise+precision, 1:only noise, 2: only precision
# 3:none
result = []
exp = expression()
if route is None:
route = self.route
for i in range(route.shape[0]):
routei = route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = 0.8 * np.random.randn(result.shape[0])
Noise_y = 0.8 * np.random.randn(result.shape[0])
Noise_z = 1.2 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
if noise_type != 3:
if noise_type != 2:
result += Noise
if overall_noise is not None:
result += overall_noise
# add sensor resolution
if noise_type != 1:
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * | np.log(signal_power / noise_power) | numpy.log |
#
#
import numpy as np
import pandas as pd
def test_lib():
'''To test if the library was properly loaded.'''
print('Library loaded and ready!')
def join_trim(l):
'''Join nested array.'''
return ' '.join(' '.join(l).split())
def assign_to_rois(
rois, bed,
keep_unassigned_rows,
keep_marginal_overlaps,
keep_including,
use_name,
collapse_method = None,
floatValues = None
):
'''Assign rows from bed to regions in rois.
Args:
rois (pd.DataFrame): bed file with regions of interest.
bed (pd.DataFrame): bed file with regions to be assigned to ROIs.
keep_unassigned_rows (bool): keep rows that do not belong to any ROI.
keep_marginal_overlaps (bool): assign to partial overlaps.
keep_including (bool): assign to included ROIs.
use_name (bool): also use ROIs name.
collapse_method (string): collapse method, default: sum.
Returns:
pd.DataFrame: bed file with added rois column or collapsed.
'''
# Assign collapse method for np.ndarray
collapse_methods = {
'min' : lambda x: np.min(x),
'mean' : lambda x: np.mean(x),
'median' : lambda x: np.median(x),
'max' : lambda x: np.max(x),
'count' : lambda x: x.shape[0],
'sum' : lambda x: np.sum(x)
}
if not type(None) == type(collapse_method):
collapse = collapse_methods[collapse_method]
if type(None) == type(floatValues):
floatValues = False
# Add regions column
bed['rois'] = pd.Series(np.array(['' for i in range(bed.shape[0])]),
index = bed.index)
# Assign reads to rows
chr_set = set(bed['chr'])
for chri in chr_set:
# Select rois and rows
chr_bed = bed.iloc[np.where(bed['chr'] == chri)[0], :]
chr_roi = rois.iloc[np.where(rois['chr'] == chri)[0], :]
if 0 == chr_bed.shape[0] or 0 == chr_roi.shape[0]:
continue
# Prepare roi label
chr_roi_labels = np.array(chr_roi['chr']).astype('str').tolist()
chr_roi_labels = np.core.defchararray.add(chr_roi_labels, ':')
chr_roi_labels = np.core.defchararray.add(chr_roi_labels,
np.array(chr_roi['start']).astype('str').tolist())
chr_roi_labels = np.core.defchararray.add(chr_roi_labels, '-')
chr_roi_labels = np.core.defchararray.add(chr_roi_labels,
np.array(chr_roi['end']).astype('str').tolist())
if use_name:
chr_roi_labels = np.core.defchararray.add(chr_roi_labels, ':')
chr_roi_labels = np.core.defchararray.add(chr_roi_labels,
np.array(chr_roi['name']).astype('str').tolist())
# Build matrix ---------------------------------------------------------
bed_hash_start = [hash(i) for i in chr_bed['start']]
bed_hash_end = [hash(i) for i in chr_bed['end']]
roi_hash_start = [hash(i) for i in chr_roi['start']]
roi_hash_end = [hash(i) for i in chr_roi['end']]
# Start should be higher than the region start
condition_start = np.greater_equal.outer(bed_hash_start, roi_hash_start)
# End should be lower than the region end
condition_end = np.logical_not(
np.greater.outer(bed_hash_end, roi_hash_end))
# Perfectly contained (in)
condition_in = np.logical_and(condition_start, condition_end)
if keep_marginal_overlaps or keep_including:
# Start should be lower than the region start
condition_left_start = np.logical_not(condition_start)
# End should be higher than the region end
condition_right_end = | np.logical_not(condition_end) | numpy.logical_not |
import sys
import numpy as np
import random
from os.path import join
from seisflows.tools import unix
from seisflows.workflow.inversion import inversion
from scipy.fftpack import fft, fftfreq
from seisflows.tools.array import loadnpy, savenpy
from seisflows.tools.seismic import setpar, setpararray
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
system = sys.modules['seisflows_system']
solver = sys.modules['seisflows_solver']
optimize = sys.modules['seisflows_optimize']
class inversion_se(inversion):
""" Waveform inversion with source encoding
"""
def check(self):
super().check()
# get random source
if 'RANDOM_OVER_IT' not in PAR:
setattr(PAR, 'RANDOM_OVER_IT', 1)
# increase frequency over iterations
if 'FREQ_INCREASE_PER_IT' not in PAR:
setattr(PAR, 'FREQ_INCREASE_PER_IT', 0)
# maximum frequency shift over iterations
if 'MAX_FREQ_SHIFT' not in PAR:
setattr(PAR, 'MAX_FREQ_SHIFT', None)
# number of frequency per event
if 'NFREQ_PER_EVENT' not in PAR:
setattr(PAR, 'NFREQ_PER_EVENT', 1)
# default number of super source
if 'NSRC' not in PAR:
setattr(PAR, 'NSRC', 1)
# number of timesteps after steady state
NTPSS = int(round(1/((PAR.FREQ_MAX-PAR.FREQ_MIN)/PAR.NEVT/PAR.NFREQ_PER_EVENT)/PAR.DT))
if 'NTPSS' in PAR:
assert(PATH.NTPSS == NTPSS)
else:
setattr(PAR, 'NTPSS', NTPSS)
print('Number of timesteps after steady state:', NTPSS)
def setup(self):
super().setup()
unix.mkdir(join(PATH.FUNC, 'residuals'))
unix.mkdir(join(PATH.GRAD, 'residuals'))
def initialize(self):
""" Prepares for next model update iteration
"""
self.write_model(path=PATH.GRAD, suffix='new')
if PAR.RANDOM_OVER_IT or optimize.iter == 1:
self.get_random_frequencies()
print('Generating synthetics')
system.run('solver', 'eval_func',
hosts='all',
path=PATH.GRAD)
self.write_misfit(path=PATH.GRAD, suffix='new')
def clean(self):
super().clean()
unix.mkdir(join(PATH.FUNC, 'residuals'))
unix.mkdir(join(PATH.GRAD, 'residuals'))
def get_random_frequencies(self):
""" Randomly assign a unique frequency for each source
"""
# ref preprocess/ortho.py setup()
ntpss = PAR.NTPSS
dt = PAR.DT
nt = PAR.NT
nrec = PAR.NREC
nevt = PAR.NEVT
nfpe = PAR.NFREQ_PER_EVENT
nsrc = nevt * nfpe
freq_min = float(PAR.FREQ_MIN)
freq_max = float(PAR.FREQ_MAX)
# read data processed py ortho
freq_idx = loadnpy(PATH.ORTHO + '/freq_idx')
freq = loadnpy(PATH.ORTHO + '/freq')
sff_obs = loadnpy(PATH.ORTHO + '/sff_obs')
ft_obs = loadnpy(PATH.ORTHO + '/ft_obs')
nfreq = len(freq_idx)
# ntrace = ft_obs.shape[3]
# declaring arrays
ft_obs_se = np.zeros((nfreq, nrec), dtype=complex) # encoded frequency of observed seismpgram
# frequency processing
# TODO freq_mask
freq_mask_se = np.ones((nfreq, nrec))
freq_shift = (optimize.iter - 1) * PAR.FREQ_INCREASE_PER_IT
if PAR.MAX_FREQ_SHIFT != None:
freq_shift = min(freq_shift, PAR.MAX_FREQ_SHIFT)
# random frequency
freq_range = np.linspace(freq_min + freq_shift, freq_max + freq_shift, nsrc + 1)[:-1]
freq_thresh = (freq_max - freq_min) / nsrc / 20
rdm_idx = random.sample(range(0, nsrc), nsrc) # randomly assign frequencies
freq_rdm = freq_range[rdm_idx]
# assign frequencies
stf_filenames = [None] * nsrc
for ifpe in range(nfpe):
for ievt in range(nevt):
isrc = ifpe * nevt + ievt # index of sourrce
f0 = freq_rdm[isrc] # central frequency of source
# get sinus source time function
T = 2 * np.pi * dt * np.linspace(0, nt - 1, nt) * f0
sinus = 1000 * np.sin(T) # synthetic sinus source
sff_syn = fft(sinus[-ntpss:])[freq_idx]
# find and encode matching frequencies
for ifreq in range(nfreq):
if abs(abs(f0) - abs(freq[ifreq])) < freq_thresh:
# TODO freq_mask
pshift = sff_syn[ifreq] / sff_obs[ifreq, ievt]
pshift /= abs(pshift)
ft_obs_se[ifreq, :] = ft_obs[ifreq, ievt, :] * pshift
# determine the filename to save current sinus source time function
# make sure that source time function files does not change over iterations
jevt = rdm_idx[isrc] % nevt
jfpe = int((rdm_idx[isrc] - jevt) / nevt)
jsrc = jfpe * nevt + jevt
filename = PATH.SOLVER + '/000000/DATA/STF_' + str(jevt) + '_' + str(jfpe)
stf_filenames[isrc] = filename
# save source time function file
if optimize.iter == 1:
stf_syn = | np.zeros([nt, 2]) | numpy.zeros |
#!/usr/bin/env python3
import time
import math
from datetime import datetime
from time import sleep
import numpy as np
import random
import cv2
import os
import argparse
import torch
from math import sin,cos,acos
import matplotlib.pyplot as plt
import sys
sys.path.append('./Eval')
sys.path.append('./')
from env import Engine
from utils_env import get_view,safe_path,cut_frame,point2traj,get_gripper_pos,backup_code
def angleaxis2quaternion(angleaxis):
angle = np.linalg.norm(angleaxis)
axis = angleaxis / (angle + 0.00001)
q0 = cos(angle/2)
qx,qy,qz = axis * sin(angle/2)
return np.array([qx,qy,qz,q0])
def quaternion2angleaxis(quater):
angle = 2 * acos(quater[3])
axis = quater[:3]/(sin(angle/2)+0.00001)
angleaxis = axis * angle
return np.array(angleaxis)
class Engine108(Engine):
def __init__(self, worker_id, opti, p_id, taskId=5, maxSteps=15, n_dmps=3, cReward=True):
super(Engine108,self).__init__(opti, wid=worker_id, p_id=p_id, maxSteps=maxSteps, taskId=taskId, n_dmps=n_dmps, cReward=cReward,robot_model=None)
self.opti = opti
def init_obj(self):
self.obj_file = os.path.join(self.resources_dir,"urdf/objmodels/nut.urdf")
self.obj_position = [0.3637 + 0.06, -0.06, 0.35]
self.obj_scaling = 2
self.obj_orientation = self.p.getQuaternionFromEuler([math.pi/2+0.2, -math.pi/2, -0.3])
self.obj_id = self.p.loadURDF(fileName=self.obj_file, basePosition=self.obj_position,baseOrientation=self.obj_orientation,
globalScaling=self.obj_scaling)#,physicsClientId=self.physical_id
self.box_file = os.path.join (self.resources_dir, "urdf/openbox/openbox.urdf")
self.box_position = [0.27, 0.00, -0.30]
self.box_scaling = 0.00044
self.box_orientation = self.p.getQuaternionFromEuler ([0, math.pi, -math.pi/2])
self.box_id = self.p.loadURDF (fileName=self.box_file, basePosition=self.box_position,
baseOrientation=self.box_orientation,
globalScaling=self.box_scaling,useFixedBase=True)
self.p.changeVisualShape (self.obj_id, -1, rgbaColor=[38/255.,0.,128/255.0,1])
self.p.changeDynamics(self.obj_id,-1,mass=2.0)
def reset_obj(self):
self.p.resetBasePositionAndOrientation(self.obj_id, self.obj_position, self.obj_orientation)
def init_motion(self):
self.data_q = np.load (os.path.join(self.robot_recordings_dir,"47-4/q.npy"))
self.data_gripper = np.load (self.configs_dir + '/init/gripper.npy')
self.robot.setJointValue(self.data_q[0],gripper=self.data_gripper[0])
def init_grasp(self):
self.box_position[2] = -.30
self.p.resetBasePositionAndOrientation(self.box_id,self.box_position,self.box_orientation)
self.robot.gripperControl(0)
qlist = np.load( os.path.join(self.robot_recordings_dir, "47-4/q.npy"))
glist = np.load( os.path.join(self.robot_recordings_dir, "47-4/gripper.npy"))
num_q = len(qlist[0])
self.fix_orn = np.load (os.path.join (self.configs_dir, 'init', 'orn.npy'))
self.null_q = qlist[180]
self.robot.setJointValue(qlist[40],glist[40])
for i in range(40,180,1):
glist[i] = min(120,glist[i])
self.robot.jointPositionControl(qlist[i],gripper=glist[i])
pos = self.robot.getEndEffectorPos()
pos[2] += 0.15
orn = self.robot.getEndEffectorOrn()
for i in range(109):
self.robot.operationSpacePositionControl(pos,orn,null_pose=self.null_q,gripperPos=130)
# time.sleep(3)
self.start_pos = self.p.getLinkState (self.robotId, 7)[0]
self.box_position[2] *= -1.0
self.p.resetBasePositionAndOrientation(self.box_id,self.box_position,self.box_orientation)
def step_dmp(self,action,f_w,coupling,reset):
if reset:
action = action.squeeze()
self.start_pos = self.robot.getEndEffectorPos()
self.start_orn = quaternion2angleaxis(self.robot.getEndEffectorOrn())
self.start_gripper_pos = self.robot.getGripperPos()
self.start_status = np.array([self.start_pos[0],self.start_pos[1],self.start_pos[2],self.start_orn[0],self.start_orn[1],self.start_orn[2],0.0]).reshape((-1,))
self.dmp.set_start(np.array(self.start_status)[:self.dmp.n_dmps])
dmp_end_pos = [x+y for x,y in zip(self.start_status,action)]
self.dmp.set_goal(dmp_end_pos)
if f_w is not None:
self.dmp.set_force(f_w)
self.dmp.reset_state()
#self.traj = self.dmp.gen_traj()
self.actual_traj = []
p1 = self.start_pos
p1 = np.array(p1)
self.dmp.timestep = 0
small_observation = self.step_within_dmp (coupling)
#for idx, small_action in enumerate(self.traj):
# if idx < 7:
# for i in range(4):
# small_observation = self.step_within_dmp (small_action)
# else:
# small_observation = self.step_within_dmp (small_action)
#self.actual_traj.append(tmp_pos)
#self.a_traj = np.array(self.actual_traj)
#p2 = self.robot.getEndEffectorPos()
#p2 = np.array(p2)
lenT = len(self.dmp.force[:,0])
if self._wid == 0:
fig = plt.figure(1)
# plt.plot(np.arange(0,lenT),self.traj[:,0],'--',color='r')
# plt.plot(np.arange(0,lenT),self.traj[:,1],'--',color='g')
# plt.plot(np.arange(0,lenT),self.traj[:,2],'--',color='b')
# plt.plot(np.arange(0,lenT),self.a_traj[:,0],color='red')
# plt.plot(np.arange(0,lenT),self.a_traj[:,1],color='green')
# plt.plot(np.arange(0,lenT),self.a_traj[:,2],color='blue')
plt.plot(np.arange(0,lenT),self.dmp.force[:,0],color='red')
plt.plot( | np.arange(0,lenT) | numpy.arange |
"""asg4.py
This assignment is based on the Linear Regression - Least Squares
theory. The present script is associated with the text document
available from the course web page.
Do not touch the imports, and specifically, do not import matplotlib
in this file! Use the provided file draw.py for visualization. You can
run it by executing the script from command line:
python3 draw.py
The imports listed below should be enough to accomplish all tasks.
The functions /docstring/s contain some real examples and usage of the
functions. You can run these examples by executing the script from
command line:
python3 asg4.py
Note that the unit tests for the final grading may contain different
tests, and that certain requirements given below are not tested in the
testing before the final testing.
"""
import numpy as np
np.set_printoptions(precision=3)
from scipy import linalg as la
def least_squares(A, b):
"""Calculate the least squares solutions to Ax = b.
You should do this by using the QR decomposition.
YOUR ARE NOT ALLOWED TO USE THE FUNCTION lstsq() FROM NUMPY or SCIPY
Parameters:
A ((m,n) ndarray): A matrix
b ((m, ) ndarray): A vector of length m.
Returns:
x ((n, ) ndarray): The solution to the normal equations.
Examples
--------
>>> A = np.array([[4, 1],[1, 1],[8, 9],[6, 9],[5, 2],[7, 7],[7, 1],[5, 1]])
>>> b = np.array([[8],[4],[1],[8],[4],[6],[7],[8]])
>>> np.array(least_squares(A,b))
array([[ 1.236],
[-0.419]])
"""
M = np.asmatrix(A); #Stores A as a matrix in M without copying it
return np.asarray(((M.T @ M)**-1) @ M.T @ b); #Calculate solution to least squares, cast as ndarray
def linear_model(x,y):
"""Find the a and b coefficients of the least squares line y = ax + b.
Parameters
----------
x : np.ndarray : a numpy array of floats for the input (predictor variables)
y : np.ndarray : a numpy array of floats for the output (response variable)
Returns
-------
(a,b) : a tuple containing the coefficients of the line y = ax + b.
Examples
--------
>>> x = np.array([2, 3, 4, 5, 6, 7, 8, 9])
>>> y = np.array([1.75, 1.91, 2.03, 2.13, 2.22, 2.30, 2.37, 2.43])
>>> np.array(linear_model(x,y))
array([0.095, 1.621])
"""
coefm = least_squares(np.vander(x, 2), y)
return (coefm[0,0], coefm[0,1])
def exponential_model(x,y):
"""Find the a and b coefficients of the best fitting curve y = ae^(bx).
Parameters
----------
x : np.ndarray : a numpy array of floats for the input (predictor variables)
y : np.ndarray : a numpy array of floats for the output (response variable)
Returns
-------
(a,b) : a tuple containing the coefficients of the model y = ae^(bx).
Examples
--------
>>> x = np.array([2, 3, 4, 5, 6, 7, 8, 9])
>>> y = np.array([1.75, 1.91, 2.03, 2.13, 2.22, 2.30, 2.37, 2.43])
>>> np.array(exponential_model(x,y))
array([1.662, 0.045])
"""
yl = np.log(y)
out = least_squares(np.vander(x,2),yl)
return (np.exp(out[0,1]), out[0,0])
def power_model(x,y):
"""Find the a and b coefficients of the best fitting curve y = a x^b.
Parameters
----------
x : np.ndarray : a numpy array of floats for the input (predictor variables)
y : np.ndarray : a numpy array of floats for the output (response variable)
Returns
-------
(a,b) : a tuple containing the coefficients of the model y = a x^b.
Examples
--------
>>> x = np.array([2, 3, 4, 5, 6, 7, 8, 9])
>>> y = np.array([1.75, 1.91, 2.03, 2.13, 2.22, 2.30, 2.37, 2.43])
>>> np.array(power_model(x,y))
array([1.501, 0.219])
"""
xl = np.log(x)
yl = np.log(y)
out = least_squares( | np.vander(xl,2) | numpy.vander |
'''
Author: jianzhnie
Date: 2021-11-15 18:31:40
LastEditTime: 2022-02-24 12:10:09
LastEditors: jianzhnie
Description:
'''
import math
from typing import Callable, Dict
import numpy as np
from scipy.special import softmax
from sklearn.metrics import auc, confusion_matrix, f1_score, matthews_corrcoef, mean_absolute_error, mean_squared_error, precision_recall_curve, roc_auc_score
from transformers import EvalPrediction
def build_compute_metrics_fn(
task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
if task_name == 'classification':
preds_labels = np.argmax(p.predictions, axis=1)
if p.predictions.shape[-1] == 2:
pred_scores = softmax(p.predictions, axis=1)[:, 1]
else:
pred_scores = softmax(p.predictions, axis=1)
return calc_classification_metrics(pred_scores, preds_labels,
p.label_ids)
elif task_name == 'regression':
preds = np.squeeze(p.predictions)
return calc_regression_metrics(preds, p.label_ids)
else:
return {}
return compute_metrics_fn
def calc_classification_metrics(pred_scores, pred_labels, labels):
if len( | np.unique(labels) | numpy.unique |
from unittest import TestCase
import numpy as np
import nibabel as nib
from unet3d.utils.resample import resample
from unet3d.utils.augment import scale_affine, generate_permutation_keys, permute_data
class TestAugmentation(TestCase):
def setUp(self):
self.shape = (4, 4, 4)
self.affine = np.diag(np.ones(4))
self.data = np.arange(np.prod(self.shape), dtype=np.float).reshape(self.shape)
self.image = nib.Nifti1Image(self.data, self.affine)
def test_scale_affine(self):
scale = (0.5, 0.5, 0.5)
new_affine = scale_affine(self.affine, self.shape, scale)
new_image = resample(self.image, target_affine=new_affine, target_shape=self.shape)
new_data = new_image.get_data()
self.assertEqual(np.sum(new_data[:1]), 0)
self.assertEqual(np.sum(new_data[-1:]), 0)
self.assertEqual(np.sum(new_data[:, :1]), 0)
self.assertEqual(np.sum(new_data[:, -1:]), 0)
self.assertEqual(np.sum(new_data[..., :1]), 0)
self.assertEqual( | np.sum(new_data[..., -1:]) | numpy.sum |
import os, numpy as np, matplotlib.pyplot as plt
import torch, torch.nn as nn, torchvision as tv
import numpy as np
import random
"""==================================================="""
seed = 1
torch.backends.cudnn.deterministic=True; np.random.seed(seed); random.seed(seed)
torch.manual_seed(seed); torch.cuda.manual_seed(seed); torch.cuda.manual_seed_all(seed)
"""==================================================="""
ppline = 100
n_lines = 4
noise_perc = 0.15
intervals = [(0.1,0.3), (0.35,0.55), (0.6,0.8), (0.85,1.05)]
lines = [np.stack([np.linspace(intv[0],intv[1],ppline), np.linspace(intv[0],intv[1],ppline)])[:,np.random.choice(ppline, int(ppline*noise_perc), replace=False)] for intv in intervals]
cls = [x*np.ones(int(ppline*noise_perc)) for x in range(n_lines)]
train_lines = np.concatenate(lines, axis=1).T
train_cls = np.concatenate(cls)
x_test_line1 = np.stack([0.2*np.ones(ppline), np.linspace(0.2,0.4,ppline)])[:,np.random.choice(ppline, int(ppline*noise_perc), replace=False)]
x_test_line2 = np.stack([0.2*np.ones(ppline), np.linspace(0.55,0.85,ppline)])[:,np.random.choice(ppline, int(ppline*noise_perc), replace=False)]
y_test_line1 = np.stack([np.linspace(0.4,0.6,ppline), 0.2*np.ones(ppline)])[:,np.random.choice(ppline, int(ppline*noise_perc), replace=False)]
y_test_line2 = np.stack([np.linspace(0.7,0.9,ppline), 0.2*np.ones(ppline)])[:,np.random.choice(ppline, int(ppline*noise_perc), replace=False)]
# for line in lines:
# plt.plot(line[0,:], line[1,:], '.', markersize=6)
# plt.plot(x_test_line1[0,:], x_test_line1[1,:])
# plt.plot(x_test_line2[0,:], x_test_line2[1,:])
# plt.plot(y_test_line1[0,:], y_test_line1[1,:])
# plt.plot(y_test_line2[0,:], y_test_line2[1,:])
###############
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= '1'
###############
import itertools as it
from tqdm import tqdm
import torch.nn.functional as F
bs = 24
lr = 0.03
neg_margin = 0.1
train_iter = 200
device = torch.device('cpu')
###############
class Backbone(nn.Module):
def __init__(self):
super().__init__()
self.backbone = nn.Sequential(nn.Linear(2,30), nn.ReLU(), nn.Linear(30,30), nn.ReLU(), nn.Linear(30,2))
def forward(self, x):
return torch.nn.functional.normalize(self.backbone(x),dim=1)
###############
base_net = Backbone()
main_reg_net = Backbone()
###############
def train(net2train, p_switch=0):
device = torch.device('cpu')
_ = net2train.train()
_ = net2train.to(device)
optim = torch.optim.Adam(net2train.parameters(), lr=lr)
loss_collect = []
for i in range(train_iter):
idxs = np.random.choice(len(train_lines), bs, replace=False)
batch = torch.from_numpy(train_lines[idxs,:]).to(torch.float).to(device)
train_labels = train_cls[idxs]
embed = net2train(batch)
unique_cls = np.unique(train_labels)
indices = np.arange(len(batch))
class_dict = {i:indices[train_labels==i] for i in unique_cls}
sampled_triplets = [list(it.product([x],[x],[y for y in unique_cls if x!=y])) for x in unique_cls]
sampled_triplets = [x for y in sampled_triplets for x in y]
sampled_triplets = [[x for x in list(it.product(*[class_dict[j] for j in i])) if x[0]!=x[1]] for i in sampled_triplets]
sampled_triplets = [x for y in sampled_triplets for x in y]
anchors = [triplet[0] for triplet in sampled_triplets]
positives = [triplet[1] for triplet in sampled_triplets]
negatives = [triplet[2] for triplet in sampled_triplets]
if p_switch>0:
negatives = [p if np.random.choice(2, p=[1-p_switch, p_switch]) else n for n,p in zip(negatives, positives)]
neg_dists = torch.mean(F.relu(neg_margin - nn.PairwiseDistance(p=2)(embed[anchors,:], embed[negatives,:])))
loss = neg_dists
else:
pos_dists = torch.mean(F.relu(nn.PairwiseDistance(p=2)(embed[anchors,:], embed[positives,:])))
neg_dists = torch.mean(F.relu(neg_margin - nn.PairwiseDistance(p=2)(embed[anchors,:], embed[negatives,:])))
loss = pos_dists + neg_dists
optim.zero_grad()
loss.backward()
optim.step()
loss_collect.append(loss.item())
return loss_collect
###############
base_loss = train(base_net)
_ = train(main_reg_net, p_switch=0.001)
###############
def get_embeds(net):
_ = net.eval()
with torch.no_grad():
train_embed = net(torch.from_numpy(train_lines).to(torch.float).to(device)).cpu().detach().numpy()
x_embed_test_line1 = net(torch.from_numpy(x_test_line1.T).to(torch.float).to(device)).cpu().detach().numpy()
x_embed_test_line2 = net(torch.from_numpy(x_test_line2.T).to(torch.float).to(device)).cpu().detach().numpy()
y_embed_test_line1 = net(torch.from_numpy(y_test_line1.T).to(torch.float).to(device)).cpu().detach().numpy()
y_embed_test_line2 = net(torch.from_numpy(y_test_line2.T).to(torch.float).to(device)).cpu().detach().numpy()
_, s, _ = np.linalg.svd(train_embed)
s = s/np.sum(s)
return train_embed, x_embed_test_line1, x_embed_test_line2, y_embed_test_line1, y_embed_test_line2, s
###############
base_embed, x_base_t1, x_base_t2, y_base_t1, y_base_t2, base_s = get_embeds(base_net)
sp = get_embeds(main_reg_net)
###
theta = np.radians(np.linspace(0,360,300))
x_2 = np.cos(theta)
y_2 = | np.sin(theta) | numpy.sin |
import warnings
import cvxpy as cp
import numpy as np
import numpy.linalg as la
import pandas as pd
import scipy.stats as st
from _solver_fast import _cd_solver
from linearmodels.iv import IV2SLS, compare
from patsy import dmatrices
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.utils.validation import (
check_array,
check_is_fitted,
check_random_state,
check_X_y,
)
from statsmodels.api import add_constant
class Rlasso(BaseEstimator, RegressorMixin):
"""
Rigorous Lasso and Sqrt-Lasso estimator with
theoretically motivated and data-driven penalty level.
Parameters
----------
post: bool, default=True
If True, post-lasso is used to estimate betas,
meaning that features selected by rlasso are
estimated by OLS in the final model, as outlined
in [2]_.
sqrt: bool, default=False
If True, square-root lasso criterion is minimized
is minimized instead of normal lasso. See [1]_ and
notes below for details.
fit_intercept: bool, default=True
If True, an unpenalized intercept is estimated
by mean centering the data prior to estimation.
cov_type: str, default="nonrobust"
Type of covariance matrix. Right now the
supported types are: "nonrobust", "robust".
x_dependent: bool, default=False
If True, the alternative and less conservative lambda
is estimated by simulation using the conditional
distribution of the design matrix.
n_sim: int, default=5000
Number of simulations to be performed for x-dependent
lambda calculation.
random_state: int, default=None
Random seed used for simulations if `x_dependent` is
set to `True`.
lasso_psi: bool, default=False
By default post-lasso is the default method for obtaining
residuals in the
prestd: bool, default=False
If True, the data is prestandardized instead of
on the fly by penalty loadings. Currently only
supports homoscedastic case.
n_corr: int, default=5
Number of most correlated variables to be used in the
for initial calculation of the residuals.
c: float, default=1.1
Slack parameter used in the lambda calculation. From
[3]_ "c needs to be greater than 1 for the regularization
event to hold asymptotically, but not too high as the
shrinkage bias is increasing in c."
gamma: float, optional=None
Regularization parameter, where the probability of
selecting the correct model is given by 1-gamma.
If not specified, the the value is set to:
0.1 / np.log(n)
max_iter: int, default=2
Maximum number of iterations to perform in the iterative
estimation procedure to obtain the Rlasso estimates.
conv_tol: float, default=1e-4
Tolerance for the convergence of the iterative estimation
procedure.
solver: str, default="cd"
Solver to be used for the iterative estimation procedure.
Alternatives are:
"cd" - coordinate descent method.
"cvxpy" - cvxpy solver.
cd_max_iter: int, default=10000
Maximum number of iterations to be perform by the coordinate
descent algorithm before stopping.
cd_tol: float, default=1e-10
Convergence tolerance for the coordinate descent algorithm.
cvxpy_opts: dict, default=None
Additional options to be passed to the cvxpy solver. See cvxpy
documentation for more details:
https://www.cvxpy.org/tutorial/advanced/index.html#solve-method-options
zero_tol: float, default=1e-4
Tolerance for the rounding of estimated coefficients to zero.
Attributes
----------
coef_: numpy.array, shape (n_features,)
Estimated coefficients.
intercept_: float
Estimated intercept.
lambd_: float
Estimated lambda/overall penalty level.
psi_: numpy.array, shape (n_features, n_features)
Estimated penalty loadings.
n_iter_: int
Number of iterations performed by the rlasso algorithm.
n_features_in_: int
Number of features in the input data.
n_samples_: int
Number of samples/observations in the input data.
feature_names_in_: str
Feature names of ``X``. Only stored if
the input data is of type ``pd.DataFrame``.
Notes
-----
Rlasso minimizes the following loss function:
.. math:: \widehat{\\beta} = \\arg \min \\frac{1}{n} \lVert y_i - x_i'\\beta \\rVert_2^2 +\\frac{\lambda}{n} \sum^p_{j=1}\psi_j|\\beta_j|
Or in the case of square-root lasso when ``sqrt=True``:
.. math:: \widehat{\\beta} = \\arg \min \\frac{1}{\sqrt{n}} \lVert y_i - x_i'\\beta \\rVert_2 + \\frac{\lambda}{n} \sum^p_{j=1}\psi_j|\\beta_j|
Where :math:`\psi_{j}` are regressor specific penalty loadings and
:math:`\lambda` is the overall penalty level. For an introduction to
the rigorous lasso algorithm to estimate the penalty loadings and
the overall penalty level see [3]_ and [4]_.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2011).
Square-root lasso: pivotal recovery of sparse signals via conic programming.
Biometrika, 98(4), 791-806.
.. [2] <NAME>., & <NAME>. (2013). Least squares after model selection
in high-dimensional sparse models. Bernoulli, 19(2), 521-547.
.. [3] <NAME>., <NAME>., & <NAME>. (2020). lassopack: Model
selection and prediction with regularized regression in Stata.
The Stata Journal, 20(1), 176-235.
.. [4] <NAME>., <NAME>., & <NAME>. (2016).
hdm: High-dimensional metrics. arXiv preprint arXiv:1608.00354.
Examples
--------
>>> import numpy as np
>>> from rlasso import Rlasso
>>> X = np.random.randn(100, 5)
>>> y = np.random.randn(100)
>>> rlasso = Rlasso()
>>> rlasso.fit(X, y)
"""
def __init__(
self,
*,
post=True,
sqrt=False,
fit_intercept=True,
cov_type="nonrobust",
x_dependent=False,
random_state=None,
lasso_psi=False,
prestd=False,
n_corr=5,
max_iter=2,
conv_tol=1e-4,
n_sim=5000,
c=1.1,
gamma=None,
solver="cd",
cd_max_iter=1000,
cd_tol=1e-10,
cvxpy_opts=None,
zero_tol=1e-4,
):
self.post = post
self.sqrt = sqrt
self.fit_intercept = fit_intercept
self.cov_type = cov_type
self.x_dependent = x_dependent
self.random_state = random_state
self.lasso_psi = lasso_psi
self.prestd = prestd
self.n_corr = n_corr
self.max_iter = max_iter
self.conv_tol = conv_tol
self.n_sim = n_sim
self.c = c
self.gamma = gamma
self.solver = solver
self.cd_max_iter = cd_max_iter
self.cd_tol = cd_tol
self.zero_tol = zero_tol
self.cvxpy_opts = cvxpy_opts
def _psi_calc(self, X, n, v=None):
"""Calculate the penalty loadings."""
# TODO Implement cluster robust covariance
# if prestandardized X, set loadings to ones
if self.prestd:
psi = np.ones(self.n_features_in_)
# sqrt case
elif self.sqrt:
if self.cov_type == "nonrobust":
psi = np.sqrt(np.mean(X**2, axis=0))
# heteroscedastic robust case
elif self.cov_type == "robust" and v is not None:
Xv2 = np.einsum("ij, i -> j", X**2, v**2)
psi_1 = np.sqrt(np.mean(X**2, axis=0))
psi_2 = np.sqrt(Xv2 / np.sum(v**2))
psi = np.maximum(psi_1, psi_2)
# clustered
else:
raise NotImplementedError("Cluster robust loadings not implemented")
elif self.cov_type == "nonrobust":
psi = np.sqrt(np.mean(X**2, axis=0))
elif self.cov_type == "robust" and v is not None:
Xe2 = np.einsum("ij, i -> j", X**2, v**2)
psi = np.sqrt(Xe2 / n)
else:
raise NotImplementedError("Cluster robust loadings not implemented")
if self.nopen_idx_:
psi[self.nopen_idx_] = 0.0
return psi
def _lambd_calc(
self,
n,
p,
X,
*,
v=None,
s1=None,
psi=None,
): # sourcery skip: remove-redundant-if
"""Calculate the lambda/overall penalty level."""
# TODO Always return both lambda and lambda scaled by RMSE
# for the purpose of comparison between specifications.
# TODO: Implement cluster robust case
# empirical gamma if not provided
gamma = self.gamma or 0.1 / np.log(n)
if psi is not None:
psi = np.diag(psi)
if self.sqrt:
lf = self.c
# x-independent (same for robust and nonrobust)
if not self.x_dependent:
prob = st.norm.ppf(1 - (gamma / (2 * p)))
lambd = lf * np.sqrt(n) * prob
elif self.cov_type == "nonrobust":
Xpsi = X @ la.inv(psi)
sims = np.empty(self.n_sim)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sg = np.mean(g**2)
sims[r] = sg * np.max(np.abs(np.sum(Xpsi * g, axis=0)))
lambd = lf * np.quantile(sims, 1 - gamma)
elif self.cov_type == "robust":
Xpsi = X @ la.inv(psi)
sims = np.empty(self.n_sim)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sg = np.mean(g**2)
sims[r] = sg * np.max(np.abs(np.sum(Xpsi * v[:, None] * g, axis=0)))
lambd = lf * np.quantile(sims, 1 - gamma)
else:
raise NotImplementedError("Cluster robust penalty not implemented")
else:
lf = 2 * self.c
# homoscedasticity and x-independent case
if self.cov_type == "nonrobust" and not self.x_dependent:
assert s1 is not None
proba = st.norm.ppf(1 - (gamma / (2 * p)))
# homoscedastic/non-robust case
lambd = lf * s1 * np.sqrt(n) * proba
elif self.cov_type == "nonrobust" and self.x_dependent:
assert psi is not None
sims = np.empty(self.n_sim)
Xpsi = X @ la.inv(psi)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sims[r] = np.max(np.abs(np.sum(Xpsi * g, axis=0)))
lambd = lf * s1 * np.quantile(sims, 1 - gamma)
# heteroscedastic/cluster robust and x-independent case
elif self.cov_type in ("robust", "cluster") and not self.x_dependent:
proba = st.norm.ppf(1 - (gamma / (2 * p)))
lambd = lf * np.sqrt(n) * proba
# heteroscedastic/cluster robust and x-dependent case
elif self.cov_type == "robust" and self.x_dependent:
sims = np.empty(self.n_sim)
Xpsi = X @ la.inv(psi)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sims[r] = np.max(np.abs(np.sum(Xpsi * v[:, None] * g, axis=0)))
lambd = lf * np.quantile(sims, 1 - gamma)
# heteroscedastic/cluster robust and x-dependent case
else:
raise NotImplementedError("Cluster robust penalty not implemented")
return lambd
def _cvxpy_solver(
self,
X,
y,
lambd,
psi,
n,
p,
):
"""
Solve the lasso problem using cvxpy
"""
beta = cp.Variable(p)
if self.sqrt:
loss = cp.norm2(y - X @ beta) / cp.sqrt(n)
else:
loss = cp.sum_squares(y - X @ beta) / n
reg = (lambd / n) * cp.norm1(np.diag(psi) @ beta)
objective = cp.Minimize(loss + reg)
prob = cp.Problem(objective)
prob.solve(**self.cvxpy_opts or {})
# round beta to zero if below threshold
beta = beta.value
beta[np.abs(beta) < self.zero_tol] = 0.0
return beta
def _OLS(self, X, y):
"""
Solve the OLS problem
"""
# add dim if X is 1-d
if X.ndim == 1:
X = X[:, None]
try:
return la.solve(X.T @ X, X.T @ y)
except la.LinAlgError:
warnings.warn("Singular matrix encountered. invoking lstsq solver for OLS")
return la.lstsq(X, y, rcond=None)[0]
def _post_lasso(self, beta, X, y):
"""Replace the non-zero lasso coefficients by OLS."""
nonzero_idx = np.where(beta != 0)[0]
X_sub = X[:, nonzero_idx]
post_beta = self._OLS(X_sub, y)
beta[nonzero_idx] = post_beta
return beta
def _starting_values(self, XX, Xy, lambd, psi):
"""Calculate starting values for the lasso."""
if self.sqrt:
return la.solve(XX + lambd * np.diag(psi**2), Xy)
else:
return la.solve(XX * 2 + lambd * np.diag(psi**2), Xy * 2)
def _fit(self, X, y, *, nopen_idx=None):
"""Helper function to fit the model."""
if self.max_iter < 0:
raise ValueError("`max_iter` cannot be negative")
if self.cov_type not in ("nonrobust", "robust"):
raise ValueError("cov_type must be one of 'nonrobust', 'robust'")
if self.solver not in ("cd", "cvxpy"):
raise ValueError("solver must be one of 'cd', 'cvxpy'")
if self.c < 1:
warnings.warn(
"c should be greater than 1 for the regularization"
" event to hold asymptotically"
)
if self.prestd and self.cov_type in ("robust", "cluster"):
warnings.warn(
"prestd is not implemented for robust penalty. "
"Data is assumed to be homoscedastic."
)
if nopen_idx is not None and not isinstance(nopen_idx, (list, np.ndarray)):
raise ValueError("nopen_idx must be a list or numpy array")
X, y = check_X_y(X, y, accept_sparse=False, ensure_min_samples=2)
self.nopen_idx_ = nopen_idx
p = self.n_features_in_ = X.shape[1]
n = X.shape[0]
# check random state
self.random_state_ = check_random_state(self.random_state)
# intercept and pre-standardization handling
if self.fit_intercept or self.prestd:
X_mean, y_mean = np.mean(X, axis=0), np.mean(y)
X, y = X - X_mean, y - y_mean
if self.prestd:
X_std, y_std = np.std(X, axis=0), np.std(y)
X, y = X / X_std, y / y_std
# pre-allocate arrays for coordinate descent solver
if self.solver == "cd":
# precompute XX and Xy crossprods
XX = X.T @ X
Xy = X.T @ y
# make matrices fortran contiguous
XX = np.asfortranarray(XX, dtype=np.float64)
X = np.asfortranarray(X, dtype=np.float64)
Xy = np.asfortranarray(Xy, dtype=np.float64)
y = np.asfortranarray(y, dtype=np.float64)
# sqrt used under homoscedastic is one-step estimator
if self.sqrt and self.cov_type == "nonrobust" and not self.x_dependent:
psi = self._psi_calc(X, n)
lambd = self._lambd_calc(n=n, p=p, X=X)
if self.solver == "cd":
beta_ridge = self._starting_values(XX, Xy, lambd, psi)
beta = _cd_solver(
X=X,
y=y,
XX=XX,
Xy=Xy,
lambd=lambd,
psi=psi,
starting_values=beta_ridge,
sqrt=self.sqrt,
fit_intercept=self.fit_intercept,
max_iter=self.cd_max_iter,
opt_tol=self.cd_tol,
zero_tol=self.zero_tol,
)
else:
beta = self._cvxpy_solver(
X=X,
y=y,
lambd=lambd,
psi=psi,
n=n,
p=p,
)
if self.post:
beta = self._post_lasso(beta, X, y)
# rescale beta
if self.prestd:
beta *= y_std / X_std
self.intercept_ = y_mean - X_mean @ beta if self.fit_intercept else 0.0
self.nonzero_idx_ = np.where(beta != 0)[0]
self.coef_ = beta
self.n_iter_ = 1
self.lambd_ = lambd
self.psi_ = psi
return
# calculate error based on initial
# highly correlated vars
r = np.empty(p)
for k in range(p):
r[k] = np.abs(st.pearsonr(X[:, k], y)[0])
X_top = X[:, np.argsort(r)[-self.n_corr :]]
beta0 = self._OLS(X_top, y)
v = y - X_top @ beta0
s1 = np.sqrt(np.mean(v**2))
psi = self._psi_calc(X=X, v=v, n=n)
lambd = self._lambd_calc(
n=n,
p=p,
v=v,
s1=s1,
X=X,
psi=psi,
)
# get initial estimates k=0
if self.solver == "cd":
beta_ridge = self._starting_values(XX, Xy, lambd, psi)
beta = _cd_solver(
X=X,
y=y,
XX=XX,
Xy=Xy,
lambd=lambd,
psi=psi,
starting_values=beta_ridge,
sqrt=self.sqrt,
fit_intercept=self.fit_intercept,
max_iter=self.cd_max_iter,
opt_tol=self.cd_tol,
zero_tol=self.zero_tol,
)
else:
beta = self._cvxpy_solver(
X=X,
y=y,
lambd=lambd,
psi=psi,
n=n,
p=p,
)
for k in range(self.max_iter):
s0 = s1
# post lasso handling
if not self.lasso_psi:
beta = self._post_lasso(beta, X, y)
# error refinement
v = y - X @ beta
s1 = np.sqrt(np.mean(v**2))
# if convergence not reached get new estimates of lambd and psi
psi = self._psi_calc(X=X, v=v, n=n)
lambd = self._lambd_calc(
n=n,
p=p,
v=v,
s1=s1,
X=X,
psi=psi,
)
if self.solver == "cd":
beta = _cd_solver(
X=X,
y=y,
XX=XX,
Xy=Xy,
lambd=lambd,
psi=psi,
starting_values=beta_ridge,
sqrt=self.sqrt,
fit_intercept=self.fit_intercept,
max_iter=self.cd_max_iter,
opt_tol=self.cd_tol,
zero_tol=self.zero_tol,
)
else:
beta = self._cvxpy_solver(
X=X,
y=y,
lambd=lambd,
psi=psi,
n=n,
p=p,
)
# check convergence
if np.abs(s1 - s0) < self.conv_tol:
break
# end of algorithm
if self.post and not self.lasso_psi:
beta = self._post_lasso(beta, X, y)
# rescale beta if standardized
if self.prestd:
beta *= y_std / X_std
self.intercept_ = y_mean - X_mean @ beta if self.fit_intercept else 0.0
self.nonzero_idx_ = np.where(beta != 0)[0]
self.coef_ = beta
self.n_iter_ = k + 1 if self.max_iter > 0 else 1
self.lambd_ = lambd
self.psi_ = psi
def fit(self, X, y, *, nopen_idx=None):
"""
Fit the model to the data.
parameters
----------
X: array-like, shape (n_samples, n_features)
Design matrix.
y: array-like, shape (n_samples,)
Target vector.
returns
-------
self: object
Returns self.
"""
# store feature names if dataset is pandas
if isinstance(X, pd.DataFrame):
self.feature_names_ = X.columns
self._fit(X, y, nopen_idx=nopen_idx)
# sklearn estimator must return self
return self
def fit_formula(self, formula, data):
"""
Fit the the model to the data using fomula language.
Parameters
----------
formula: str
Formula to fit the model. Ex: "y ~ x1 + x2 + x3"
data: Union[pandas.DataFrame, numpy.recarray, dict]
Dataset to fit the model.
Returns
-------
self: object
Returns self.
"""
y, X = dmatrices(formula, data)
self.feature_names_in_ = X.design_info.column_names
X, y = np.asarray(X), np.asarray(y)
y = y.flatten()
# check if intercept is in data
if "Intercept" in self.feature_names_in_:
if not self.fit_intercept:
raise ValueError(
(
"Intercept is in data but fit_intercept is False."
" Set fit_intercept to True to fit intercept or"
" update the formula to remove the intercept"
)
)
# drop column of ones from X
# since intercept calculated in _fit
# by partialing out
X = X[:, 1:]
self._fit(X, y)
# sklearn estimator must return self
return self
def predict(self, X):
"""
Use fitted model to predict on new data.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Design matrix.
returns
-------
y_pred: array-like, shape (n_samples,)
Predicted target values.
"""
# check if fitted
check_is_fitted(self)
X = check_array(X)
return self.intercept_ + X @ self.coef_
class RlassoLogit(BaseEstimator, ClassifierMixin):
def __init__(
self,
post=True,
fit_intercept=True,
c=1.1,
gamma=0.05,
zero_tol=1e-4,
solver_opts=None,
):
"""Rigorous Lasso Logistic Regression."""
self.post = post
self.fit_intercept = fit_intercept
self.c = c
self.gamma = gamma
self.zero_tol = zero_tol
self.solver_opts = solver_opts
def _criterion_function(self, X, y, beta, lambd, n, regularization=True):
"""Criterion function for the penalized Lasso Logistic Regression."""
ll = cp.sum(cp.multiply(y, X @ beta) - cp.logistic(X @ beta)) / n
if not regularization:
return -ll
reg = (lambd / n) * cp.norm1(beta)
return -(ll - reg)
def _cvxpy_solve(self, X, y, lambd, n, p):
"""Solve the problem using cvxpy."""
beta = cp.Variable(p)
obj = cp.Minimize(self._criterion_function(X, y, beta, lambd, n))
prob = cp.Problem(obj)
# solve problem and return beta
prob.solve(**self.solver_opts or {})
beta = beta.value
beta[np.abs(beta) < self.zero_tol] = 0.0
return beta
def _decision_function(self, X, beta):
"""Compute the decision function of the model."""
return 1 / (1 + np.exp(-X @ beta))
def _lambd_calc(self, n, p):
lambd0 = (self.c / 2) * np.sqrt(n) * st.norm.ppf(1 - self.gamma / (2 * p))
lambd = lambd0 / (2 * n)
return lambd0, lambd
def _fit(self, X, y, *, gamma=None):
n, p = X.shape
if gamma is None:
gamma = 0.1 / np.log(n)
lambd0, lambd = self._lambd_calc(n, p)
beta = self._cvxpy_solve(X, y, lambd, n, p)
return {"beta": beta, "lambd0": lambd0, "lambd": lambd}
def fit(self, X, y, *, gamma=None):
"""Fit the model to the data.
parameters
----------
X: array-like, shape (n_samples, n_features)
Design matrix.
y: array-like, shape (n_samples,)
Target vector.
gamma: float, optional (default: 0.1 / np.log(n_samples))
returns
-------
self: object
Returns self.
"""
# check inputs
X, y = check_X_y(X, y, accept_sparse=True, ensure_2d=True)
# assert y is binary
if | np.unique(y) | numpy.unique |
from __future__ import division
import numpy as np
import scipy
import scipy.stats
import scipy.fftpack
import scipy.optimize
import stingray.lightcurve as lightcurve
import stingray.utils as utils
from stingray.exceptions import StingrayError
from stingray.gti import cross_two_gtis, bin_intervals_from_gtis, check_gtis
__all__ = ["Crossspectrum", "AveragedCrossspectrum", "coherence"]
def coherence(lc1, lc2):
"""
Estimate coherence function of two light curves.
Parameters
----------
lc1: lightcurve.Lightcurve object
The first light curve data for the channel of interest.
lc2: lightcurve.Lightcurve object
The light curve data for reference band
Returns
-------
coh : np.ndarray
Coherence function
"""
if not isinstance(lc1, lightcurve.Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, lightcurve.Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
return cs.coherence()
class Crossspectrum(object):
def __init__(self, lc1=None, lc2=None, norm='none', gti=None):
"""
Make a cross spectrum from a (binned) light curve.
You can also make an empty Crossspectrum object to populate with your
own fourier-transformed data (this can sometimes be useful when making
binned periodograms).
Parameters
----------
lc1: lightcurve.Lightcurve object, optional, default None
The first light curve data for the channel/band of interest.
lc2: lightcurve.Lightcurve object, optional, default None
The light curve data for the reference band.
norm: {'frac', 'abs', 'leahy', 'none'}, default 'none'
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
[[gti0_0, gti0_1], [gti1_0, gti1_1], ...] -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra (complex numbers)
df: float
The frequency resolution
m: int
The number of averaged cross-spectra amplitudes in each bin.
n: int
The number of data points/time bins in one segment of the light
curves.
nphots1: float
The total number of photons in light curve 1
nphots2: float
The total number of photons in light curve 2
"""
if isinstance(norm, str) is False:
raise TypeError("norm must be a string")
if norm.lower() not in ["frac", "abs", "leahy", "none"]:
raise ValueError("norm must be 'frac', 'abs', 'leahy', or 'none'!")
self.norm = norm.lower()
# check if input data is a Lightcurve object, if not make one or
# make an empty Crossspectrum object if lc1 == None or lc2 == None
if lc1 is None or lc2 is None:
if lc1 is not None or lc2 is not None:
raise TypeError("You can't do a cross spectrum with just one "
"light curve!")
else:
self.freq = None
self.power = None
self.df = None
self.nphots1 = None
self.nphots2 = None
self.m = 1
self.n = None
return
self.gti = gti
self.lc1 = lc1
self.lc2 = lc2
self._make_crossspectrum(lc1, lc2)
def _make_crossspectrum(self, lc1, lc2):
# make sure the inputs work!
if not isinstance(lc1, lightcurve.Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, lightcurve.Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
# Then check that GTIs make sense
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
if self.gti.shape[0] != 1:
raise TypeError("Non-averaged Cross Spectra need "
"a single Good Time Interval")
lc1 = lc1.split_by_gti()[0]
lc2 = lc2.split_by_gti()[0]
# total number of photons is the sum of the
# counts in the light curve
self.nphots1 = np.float64(np.sum(lc1.counts))
self.nphots2 = np.float64(np.sum(lc2.counts))
self.meancounts1 = np.mean(lc1.counts)
self.meancounts2 = np.mean(lc2.counts)
# the number of data points in the light curve
if lc1.n != lc2.n:
raise StingrayError("Light curves do not have same number "
"of time bins per segment.")
if lc1.dt != lc2.dt:
raise StingrayError("Light curves do not have "
"same time binning dt.")
self.n = lc1.n
# the frequency resolution
self.df = 1.0/lc1.tseg
# the number of averaged periodograms in the final output
# This should *always* be 1 here
self.m = 1
# make the actual Fourier transform and compute cross spectrum
self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)
# If co-spectrum is desired, normalize here. Otherwise, get raw back
# with the imaginary part still intact.
self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
def _fourier_cross(self, lc1, lc2):
"""
Fourier transform the two light curves, then compute the cross spectrum.
Computed as CS = lc1 x lc2* (where lc2 is the one that gets
complex-conjugated)
Parameters
----------
lc1: lightcurve.Lightcurve object
One light curve to be Fourier transformed. Ths is the band of
interest or channel of interest.
lc2: lightcurve.Lightcurve object
Another light curve to be Fourier transformed.
This is the reference band.
Returns
-------
fr: numpy.ndarray
The squared absolute value of the Fourier amplitudes
"""
fourier_1 = scipy.fftpack.fft(lc1.counts) # do Fourier transform 1
fourier_2 = scipy.fftpack.fft(lc2.counts) # do Fourier transform 2
freqs = scipy.fftpack.fftfreq(lc1.n, lc1.dt)
cross = fourier_1[freqs > 0] * np.conj(fourier_2[freqs > 0])
return freqs[freqs > 0], cross
def rebin(self, df, method="mean"):
"""
Rebin the cross spectrum to a new frequency resolution df.
Parameters
----------
df: float
The new frequency resolution
Returns
-------
bin_cs = Crossspectrum object
The newly binned cross spectrum
"""
# rebin cross spectrum to new resolution
binfreq, bincs, step_size = utils.rebin_data(self.freq,
self.power, df,
method=method)
# make an empty cross spectrum object
# note: syntax deliberate to work with subclass Powerspectrum
bin_cs = self.__class__()
# store the binned periodogram in the new object
bin_cs.freq = binfreq
bin_cs.power = bincs
bin_cs.df = df
bin_cs.n = self.n
bin_cs.norm = self.norm
bin_cs.nphots1 = self.nphots1
bin_cs.nphots2 = self.nphots2
bin_cs.m = int(step_size)*self.m
return bin_cs
def _normalize_crossspectrum(self, unnorm_power, tseg):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
# The "effective" counst/bin is the geometrical mean of the counts/bin
# of the two light curves
log_nphots1 = np.log(self.nphots1)
log_nphots2 = np.log(self.nphots2)
actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))
actual_mean = np.sqrt(self.meancounts1 * self.meancounts2)
assert actual_mean > 0.0, \
"Mean count rate is <= 0. Something went wrong."
if self.norm.lower() == 'leahy':
c = unnorm_power.real
power = c * 2. / actual_nphots
elif self.norm.lower() == 'frac':
c = unnorm_power.real / np.float(self.n**2.)
power = c * 2. * tseg / (actual_mean**2.0)
elif self.norm.lower() == 'abs':
c = unnorm_power.real / np.float(self.n**2.)
power = c * (2. * tseg)
elif self.norm.lower() == 'none':
power = unnorm_power
else:
raise Exception("Normalization not recognized!")
return power
def rebin_log(self, f=0.01):
"""
Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency
modified by a factor f:
dnu_j = dnu_{j-1}*(1+f)
Parameters
----------
f: float, optional, default 0.01
parameter that steers the frequency resolution
Returns
-------
binfreq: numpy.ndarray
the binned frequencies
binpower: numpy.ndarray
the binned powers
nsamples: numpy.ndarray
the samples of the original periodogram included in each
frequency bin
"""
minfreq = self.freq[1] * 0.5 # frequency to start from
maxfreq = self.freq[-1] # maximum frequency to end
binfreq = [minfreq, minfreq + self.df] # first
df = self.freq[1] # the frequency resolution of the first bin
# until we reach the maximum frequency, increase the width of each
# frequency bin by f
while binfreq[-1] <= maxfreq:
binfreq.append(binfreq[-1] + df*(1.0+f))
df = binfreq[-1] - binfreq[-2]
# compute the mean of the powers that fall into each new frequency bin.
# we cast to np.double due to scipy's bad handling of longdoubles
binpower, bin_edges, binno = scipy.stats.binned_statistic(
self.freq.astype(np.double), self.power.astype(np.double),
statistic="mean", bins=binfreq)
# compute the number of powers in each frequency bin
nsamples = np.array([len(binno[np.where(binno == i)[0]])
for i in range(np.max(binno))])
# the frequency resolution
df = np.diff(binfreq)
# shift the lower bin edges to the middle of the bin and drop the
# last right bin edge
binfreq = binfreq[:-1] + df/2
return binfreq, binpower, nsamples
def coherence(self):
"""
Compute Coherence function of the cross spectrum. Coherence is a
Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Returns
-------
coh : numpy.ndarray
Coherence function
References
----------
.. [1] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
ps1 = Crossspectrum(self.lc1, self.lc1)
ps2 = Crossspectrum(self.lc2, self.lc2)
return self.unnorm_power/(ps1.unnorm_power * ps2.unnorm_power)
def _phase_lag(self):
"""Return the fourier phase lag of the cross spectrum."""
return np.angle(self.power)
def time_lag(self):
"""
Calculate the fourier time lag of the cross spectrum. The time lag is
calculate using the center of the frequency bins.
"""
if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:
ph_lag = self._phase_lag()
return ph_lag / (2 * np.pi * self.freq)
else:
raise AttributeError("Object has no attribute named 'time_lag' !")
class AveragedCrossspectrum(Crossspectrum):
def __init__(self, lc1=None, lc2=None, segment_size=None,
norm='none', gti=None):
"""
Make an averaged cross spectrum from a light curve by segmenting two
light curves, Fourier-transforming each segment and then averaging the
resulting cross spectra.
Parameters
----------
lc1: lightcurve.Lightcurve object OR
iterable of lightcurve.Lightcurve objects
One light curve data to be Fourier-transformed. This is the band
of interest or channel of interest.
lc2: lightcurve.Lightcurve object OR
iterable of lightcurve.Lightcurve objects
Second light curve data to be Fourier-transformed. This is the
reference band.
segment_size: float
The size of each segment to average. Note that if the total
duration of each Lightcurve object in lc1 or lc2 is not an
integer multiple of the segment_size, then any fraction left-over
at the end of the time series will be lost. Otherwise you introduce
artefacts.
norm: {'frac', 'abs', 'leahy', 'none'}, default 'none'
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
[[gti0_0, gti0_1], [gti1_0, gti1_1], ...] -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra
df: float
The frequency resolution
m: int
The number of averaged cross spectra
n: int
The number of time bins per segment of light curve?
nphots1: float
The total number of photons in the first (interest) light curve
nphots2: float
The total number of photons in the second (reference) light curve
gti: 2-d float array
[[gti0_0, gti0_1], [gti1_0, gti1_1], ...] -- Good Time intervals.
They are calculated by taking the common GTI between the
two light curves
"""
self.type = "crossspectrum"
if segment_size is not None:
if not np.isfinite(segment_size):
raise ValueError("segment_size must be finite")
self.segment_size = segment_size
Crossspectrum.__init__(self, lc1, lc2, norm, gti=gti)
return
def _make_segment_spectrum(self, lc1, lc2, segment_size):
# TODO: need to update this for making cross spectra.
assert isinstance(lc1, lightcurve.Lightcurve)
assert isinstance(lc2, lightcurve.Lightcurve)
if lc1.dt != lc2.dt:
raise ValueError("Light curves do not have same time binning dt.")
if lc1.tseg != lc2.tseg:
raise ValueError("Lightcurves do not have same tseg.")
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
cs_all = []
nphots1_all = []
nphots2_all = []
start_inds, end_inds = \
bin_intervals_from_gtis(self.gti, segment_size, lc1.time)
for start_ind, end_ind in zip(start_inds, end_inds):
time_1 = lc1.time[start_ind:end_ind]
counts_1 = lc1.counts[start_ind:end_ind]
time_2 = lc2.time[start_ind:end_ind]
counts_2 = lc2.counts[start_ind:end_ind]
lc1_seg = lightcurve.Lightcurve(time_1, counts_1)
lc2_seg = lightcurve.Lightcurve(time_2, counts_2)
cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm)
cs_all.append(cs_seg)
nphots1_all.append(np.sum(lc1_seg.counts))
nphots2_all.append(np.sum(lc2_seg.counts))
return cs_all, nphots1_all, nphots2_all
def _make_crossspectrum(self, lc1, lc2):
# chop light curves into segments
if isinstance(lc1, lightcurve.Lightcurve) and \
isinstance(lc2, lightcurve.Lightcurve):
if self.type == "crossspectrum":
self.cs_all, nphots1_all, nphots2_all = \
self._make_segment_spectrum(lc1, lc2, self.segment_size)
elif self.type == "powerspectrum":
self.cs_all, nphots1_all = \
self._make_segment_spectrum(lc1, self.segment_size)
else:
self.cs_all, nphots1_all, nphots2_all = [], [], []
# TODO: should be using izip from iterables if lc1 or lc2 could
# be long
for lc1_seg, lc2_seg in zip(lc1, lc2):
if self.type == "crossspectrum":
cs_sep, nphots1_sep, nphots2_sep = \
self._make_segment_spectrum(lc1_seg, lc2_seg,
self.segment_size)
nphots2_all.append(nphots2_sep)
elif self.type == "powerspectrum":
cs_sep, nphots1_sep = \
self._make_segment_spectrum(lc1_seg, self.segment_size)
else:
raise Exception("Type of spectrum not recognized!")
self.cs_all.append(cs_sep)
nphots1_all.append(nphots1_sep)
self.cs_all = np.hstack(self.cs_all)
nphots1_all = np.hstack(nphots1_all)
if self.type == "crossspectrum":
nphots2_all = np.hstack(nphots2_all)
m = len(self.cs_all)
nphots1 = np.mean(nphots1_all)
power_avg = np.zeros_like(self.cs_all[0].power)
for cs in self.cs_all:
power_avg += cs.power
power_avg /= np.float(m)
self.freq = self.cs_all[0].freq
self.power = power_avg
self.m = m
self.df = self.cs_all[0].df
self.n = self.cs_all[0].n
self.nphots1 = nphots1
if self.type == "crossspectrum":
self.nphots1 = nphots1
nphots2 = np.mean(nphots2_all)
self.nphots2 = nphots2
def coherence(self):
"""
Compute an averaged Coherence function of cross spectrum by computing
coherence function of each segment and averaging them. The return type
is a tuple with first element as the coherence function and the second
element as the corresponding uncertainty[1] associated with it.
Note : The uncertainty in coherence function is strictly valid for
Gaussian statistics only.
Returns
-------
tuple : tuple of np.ndarray
Tuple of coherence function and uncertainty.
References
----------
.. [1] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if self.m < 50:
utils.simon("Number of segments used in averaging is "
"significantly low. The result might not follow the "
"expected statistical distributions.")
# Calculate average coherence
unnorm_power_avg = np.zeros_like(self.cs_all[0].unnorm_power)
for cs in self.cs_all:
unnorm_power_avg += cs.unnorm_power
unnorm_power_avg /= self.m
num = np.abs(unnorm_power_avg)**2
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
aps1 = AveragedCrossspectrum(self.lc1, self.lc1,
segment_size=self.segment_size)
aps2 = AveragedCrossspectrum(self.lc2, self.lc2,
segment_size=self.segment_size)
unnorm_powers_avg_1 = np.zeros_like(aps1.cs_all[0].unnorm_power)
for ps in aps1.cs_all:
unnorm_powers_avg_1 += ps.unnorm_power
unnorm_powers_avg_2 = | np.zeros_like(aps2.cs_all[0].unnorm_power) | numpy.zeros_like |
import math
import numpy as np
import pandas as pd
from multiprocessing import Pool
from scipy.special import expit
from scipy.stats import beta
from opaque.beta_regression import BetaRegressor
from opaque.stats import equal_tailed_interval, KL_beta
from opaque.simulations.prevalence import run_trial_for_theta
class EndtoEndSimulator:
def __init__(
self,
sens_coefs_mean,
sens_coefs_disp,
spec_coefs_mean,
spec_coefs_disp,
sens_noise_mean=0.0,
sens_noise_disp=0.0,
spec_noise_mean=0.0,
spec_noise_disp=0.0,
cov=None,
n_mean=6.0,
n_sigma=1.0,
random_state=None,
n_jobs=1,
):
if cov is None:
cov = np.diag(np.full(len(sens_coefs_mean) - 1, 1.0))
else:
cov = np.array(cov)
if random_state is None:
self.random_state = np.random.RandomState()
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
assert len(sens_coefs_mean) == len(sens_coefs_disp) == cov.shape[0] + 1
assert len(spec_coefs_mean) == len(spec_coefs_disp) == cov.shape[0] + 1
self.sens_coefs_mean = | np.array(sens_coefs_mean) | numpy.array |
from __future__ import division
import numpy as np
from resample.utils import eqf
from scipy.stats import (norm, laplace,
gamma, f as F,
t, beta, lognorm,
pareto, logistic,
invgauss, poisson)
def jackknife(a, f=None):
"""
Calculate jackknife estimates for a given sample
and estimator, return leave-one-out samples
if estimator is not specified
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y | X : np.array
Jackknife estimates
"""
arr = np.asarray([a] * len(a))
X = np.asarray([np.delete(x, i, 0) for i, x in enumerate(arr)])
if f is None:
return X
else:
return np.asarray([f(x) for x in X])
def jackknife_bias(a, f):
"""
Calculate jackknife estimate of bias
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y : float
Jackknife estimate of bias
"""
return (len(a) - 1) * np.mean(jackknife(a, f) - f(a))
def jackknife_variance(a, f):
"""
Calculate jackknife estimate of variance
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y : float
Jackknife estimate of variance
"""
x = jackknife(a, f)
return (len(a) - 1) * np.mean((x - np.mean(x))**2)
def empirical_influence(a, f):
"""
Calculate the empirical influence function for a given
sample and estimator using the jackknife method
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y : np.array
Empirical influence values
"""
return (len(a) - 1) * (f(a) - jackknife(a, f))
def bootstrap(a, f=None, b=100, method="balanced", family=None,
strata=None, smooth=False, random_state=None):
"""
Calculate function values from bootstrap samples or
optionally return bootstrap samples themselves
Parameters
----------
a : array-like
Original sample
f : callable or None
Function to be bootstrapped
b : int
Number of bootstrap samples
method : string
* 'ordinary'
* 'balanced'
* 'parametric'
family : string or None
* 'gaussian'
* 't'
* 'laplace'
* 'logistic'
* 'F'
* 'gamma'
* 'log-normal'
* 'inverse-gaussian'
* 'pareto'
* 'beta'
* 'poisson'
strata : array-like or None
Stratification labels, ignored when method
is parametric
smooth : boolean
Whether or not to add noise to bootstrap
samples, ignored when method is parametric
random_state : int or None
Random number seed
Returns
-------
y | X : np.array
Function applied to each bootstrap sample
or bootstrap samples if f is None
"""
np.random.seed(random_state)
a = np.asarray(a)
n = len(a)
# stratification not meaningful for parametric sampling
if strata is not None and (method != "parametric"):
strata = np.asarray(strata)
if len(strata) != len(a):
raise ValueError("a and strata must have"
" the same length")
# recursively call bootstrap without stratification
# on the different strata
masks = [strata == x for x in np.unique(strata)]
boot_strata = [bootstrap(a=a[m],
f=None,
b=b,
method=method,
strata=None,
random_state=random_state) for m in masks]
# concatenate resampled strata along first column axis
X = np.concatenate(boot_strata, axis=1)
else:
if method == "ordinary":
# i.i.d. sampling from ecdf of a
X = np.reshape(a[np.random.choice(range(a.shape[0]),
a.shape[0] * b)],
newshape=(b,) + a.shape)
elif method == "balanced":
# permute b concatenated copies of a
r = np.reshape([a] * b,
newshape=(b * a.shape[0],) + a.shape[1:])
X = np.reshape(r[np.random.permutation(range(r.shape[0]))],
newshape=(b,) + a.shape)
elif method == "parametric":
if len(a.shape) > 1:
raise ValueError("a must be one-dimensional")
# fit parameters by maximum likelihood and sample
if family == "gaussian":
theta = norm.fit(a)
arr = norm.rvs(size=n*b,
loc=theta[0],
scale=theta[1],
random_state=random_state)
elif family == "t":
theta = t.fit(a, fscale=1)
arr = t.rvs(size=n*b,
df=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "laplace":
theta = laplace.fit(a)
arr = laplace.rvs(size=n*b,
loc=theta[0],
scale=theta[1],
random_state=random_state)
elif family == "logistic":
theta = logistic.fit(a)
arr = logistic.rvs(size=n*b,
loc=theta[0],
scale=theta[1],
random_state=random_state)
elif family == "F":
theta = F.fit(a, floc=0, fscale=1)
arr = F.rvs(size=n*b,
dfn=theta[0],
dfd=theta[1],
loc=theta[2],
scale=theta[3],
random_state=random_state)
elif family == "gamma":
theta = gamma.fit(a, floc=0)
arr = gamma.rvs(size=n*b,
a=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "log-normal":
theta = lognorm.fit(a, floc=0)
arr = lognorm.rvs(size=n*b,
s=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "inverse-gaussian":
theta = invgauss.fit(a, floc=0)
arr = invgauss.rvs(size=n*b,
mu=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "pareto":
theta = pareto.fit(a, floc=0)
arr = pareto.rvs(size=n*b,
b=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "beta":
theta = beta.fit(a)
arr = beta.rvs(size=n*b,
a=theta[0],
b=theta[1],
loc=theta[2],
scale=theta[3],
random_state=random_state)
elif family == "poisson":
theta = np.mean(a)
arr = poisson.rvs(size=n*b,
mu=theta,
random_state=random_state)
else:
raise ValueError("Invalid family")
X = | np.reshape(arr, newshape=(b, n)) | numpy.reshape |
"""
script to compute the number
of jsons created for each
word. The output is a .csv
file where for each word the
number of jsons corresponding
to train, val, and test subsets
is mentioned
"""
import os
import json
import re
import numpy as np
def get_rect_and_landmarks(rect, landmarks):
# converts and returns the face rectangle and landmarks
# in formats appropriate for the display function
x = rect["left"]
y = rect["top"]
w = rect["width"]
h = rect["height"]
if landmarks is not None:
temp_agg = list()
for i in range(len(landmarks)):
temp = list()
temp.append(landmarks["point-" + str(i+1)]["x"])
temp.append(landmarks["point-" + str(i+1)]["y"])
temp_agg.append(temp)
return (x, y, w, h), np.asarray(temp_agg)
else:
return (x, y, w, h), | np.empty((0, 0)) | numpy.empty |
"""
Geogrophic Index
----------------
"""
from typing import Any, Dict, Iterable, List, Optional, Tuple
import json
import numpy
import xarray
from . import lock
from . import storage
from . import converter
from .. import geodetic
from ..core import geohash
class GeoHash:
"""Geogrophic index based on GeoHash encoding.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
precision (int): Accuracy of the index. By default the precision is 3
characters. The table below gives the correspondence between the
number of characters (i.e. the ``precision`` parameter of this
constructor), the size of the boxes of the grid at the equator and
the total number of boxes.
========= =============== ==========
precision lng/lat (km) samples
========= =============== ==========
1 4950/4950 32
2 618.75/1237.50 1024
3 154.69/154.69 32768
4 19.34/38.67 1048576
5 4.83/4.83 33554432
6 0.60/1.21 1073741824
========= =============== ==========
synchronizer (lock.Synchronizer, optional): Write synchronizer.
"""
PROPERTIES = b'.properties'
def __init__(self,
store: storage.AbstractMutableMapping,
precision: int = 3,
synchronizer: Optional[lock.Synchronizer] = None) -> None:
self._store = store
self._precision = precision
self._synchronizer = synchronizer or lock.PuppetSynchronizer()
@property
def store(self) -> storage.AbstractMutableMapping:
"""Gets the object hndling the storage of this instance."""
return self._store
@property
def precision(self) -> int:
"""Accuracy of this instance."""
return self._precision
def set_properties(self) -> None:
"""Definition of index properties."""
if self.PROPERTIES in self._store:
raise RuntimeError("index already initialized")
self._store[self.PROPERTIES] = json.dumps(
{'precision': self._precision})
@classmethod
def get_properties(cls, store) -> Dict[str, Any]:
"""Reading index properties.
Returns:
dict: Index properties (number of character used to encode a
position).
"""
precision = store[cls.PROPERTIES]
if isinstance(precision, list):
precision = precision[0]
return json.loads(precision)
def encode(self,
lon: numpy.ndarray,
lat: numpy.ndarray,
normalize: bool = True,
unicode: bool = False) -> numpy.ndarray:
"""Encode points into geohash with the given precision
Args:
lon (numpy.ndarray): Longitudes in degrees of the positions to be
encoded.
lat (numpy.ndarray): Latitudes in degrees of the positions to be
encoded.
normalize (bool): If true, normalize longitude between [-180, 180[
unicode (bool): If true, transforms GeoHash codes into unicode
strings.
Returns:
numpy.ndarray: geohash code for each coordinates of the points
read from the vectors provided.
"""
if normalize:
lon = (lon + 180) % 360 - 180
result = geohash.encode(lon, lat, precision=self._precision)
if unicode:
return result.astype('U')
return result
def update(self, other: Iterable[Tuple[bytes, Any]]) -> None:
"""Update the index with the key/value pairs from data, overwriting
existing keys.
Args:
other (iterable): Geohash codes associated with the values to be
stored in the database.
"""
with self._synchronizer:
geohash_map = dict()
geohash.update_dict(geohash_map, other)
self._store.update(geohash_map.items())
def extend(self, other: Iterable[Tuple[bytes, Any]]) -> None:
"""Update the index with the key/value pairs from data, appending
existing keys with the new data.
Args:
other (iterable): Geohash codes associated with the values to be
updated in the database.
"""
with self._synchronizer:
geohash_map = dict()
geohash.update_dict(geohash_map, other)
self._store.extend(geohash_map.items())
def keys(self, box: Optional[geodetic.Box] = None) -> Iterable[bytes]:
"""Returns all hash defined in the index.
Args:
box (pyinterp.geodetic.Box, optional): If true, the method returns
the codes defined in the supplied area, otherwise all the codes
stored in the index.
Returns:
iterable: keys selected in the index.
"""
result = filter(lambda item: item != self.PROPERTIES,
self._store.keys())
if box is None:
return result
return set(geohash.bounding_boxes(
box, precision=self._precision)).intersection(set(result))
def box(self, box: Optional[geodetic.Box] = None) -> List[Any]:
"""Selection of all data within the defined geographical area.
Args:
box (pyinterp.geodetic.Box): Bounding box used for data selection.
Returns:
list: List of data contained in the database for all positions
located in the selected geographic region.
"""
return list(
filter(
lambda item: len(item) != 0,
self._store.values(
list(geohash.bounding_boxes(box,
precision=self._precision)))))
def values(self, keys: Optional[Iterable[bytes]] = None) -> List[Any]:
"""Returns the list of values defined in the index.
Args:
keys (iterable, optional): The list of keys to be selected. If
this parameter is undefined, the method returns all values
defined in the index.
Returns:
list: values selected in the index.
"""
keys = keys or self.keys()
return self._store.values(list(keys))
def items(
self,
keys: Optional[Iterable[bytes]] = None) -> List[Tuple[bytes, Any]]:
"""Returns the list of pair (key, value) defined in the index.
Args:
keys (iterable, optional): The list of keys to be selected. If
this parameter is undefined, the method returns all items
defined in the index.
Returns:
list: items selected in the index.
"""
keys = keys or self.keys()
return self._store.items(list(keys))
def to_xarray(self,
box: Optional[geodetic.Box] = None) -> xarray.DataArray:
"""Get the XArray containing the data selected in the index.
Args:
box (pyinterp.geodetic.Box): Bounding box used for data selection.
Returns:
list: items selected in the index.
"""
keys = list(self.keys(box))
if len(keys) == 0:
hashs = numpy.array([], dtype="S1")
data = | numpy.array([]) | numpy.array |
import numpy as np
from os.path import join
import cv2
import kdtree as KDT
import os
import glob
import time
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h * 6.0)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
def map_lidar_points_onto_image(image_orig, lidar, flag, pixel_size=3):
image = np.copy(image_orig)
# get rows and cols
index = [lidar['points'][:,2]<-1.4]
rows = lidar['row'][index].astype(np.int)
cols = lidar['col'][index].astype(np.int)
if flag=='height':
points_ = lidar['points'][:,2]
else:
points_ = lidar[flag]
MIN_DISTANCE = np.min(points_)
MAX_DISTANCE = np.max(points_)
distances = points_[index]
# determine point colours from distance
colours = (distances - MIN_DISTANCE) / (MAX_DISTANCE - MIN_DISTANCE)
colours = np.asarray([np.asarray(hsv_to_rgb(0.75 * c, 1.0, 1.0)) for c in colours])
pixel_rowoffs = np.indices([pixel_size, pixel_size])[0] - pixel_size // 2
pixel_coloffs = np.indices([pixel_size, pixel_size])[1] - pixel_size // 2
canvas_rows = image.shape[0]
canvas_cols = image.shape[1]
for i in range(len(rows)):
pixel_rows = | np.clip(rows[i] + pixel_rowoffs, 0, canvas_rows - 1) | numpy.clip |
import numpy as np
import matplotlib.pyplot as plt
from py_wake.deficit_models.gaussian import BastankhahGaussian, ZongGaussian, ZongGaussianDeficit
from py_wake.deflection_models import GCLHillDeflection
from py_wake.examples.data.hornsrev1 import V80
from py_wake.flow_map import XYGrid
from py_wake.site.xrsite import UniformSite
from py_wake.turbulence_models.crespo import CrespoHernandez
from py_wake.wind_turbines._wind_turbines import WindTurbine
from py_wake.wind_turbines.power_ct_functions import PowerCtFunction
from py_wake.tests import npt
from py_wake.tests.check_speed import timeit
def test_torque_result():
"""Reproduce case from
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Yaw induced wake deflection - a full-scale validation study.
Journal of Physics - Conference Series, 1618, [062047].
https://doi.org/10.1088/1742-6596/1618/6/062047
Note that the implementation used in the paper differs from the actual implementation by:
- using rotor average deficit instead of peak deficit
- using ainslie deficit model + static "meander distribution"
- dismiss Ua (downwind distance reduction) term in integration formula
Hence the result is not expected to match
"""
site = UniformSite(p_wd=[1], ti=0.06)
x, y = [0], [0]
def power_ct_function(ws, yaw, run_only):
return (np.zeros_like(ws), np.where(yaw == 17.5, .86, 0.83))[run_only]
D = 52
v52 = WindTurbine(name="V52", diameter=D, hub_height=44,
powerCtFunction=PowerCtFunction(input_keys=['ws', 'yaw'],
power_ct_func=power_ct_function,
power_unit='w', additional_models=[]))
wfm = ZongGaussian(site, v52,
deflectionModel=GCLHillDeflection(),
turbulenceModel=CrespoHernandez())
x_ref = | np.arange(2, 12, 2) | numpy.arange |
from torchvision.utils import draw_bounding_boxes
from torch import Tensor, max, from_numpy, as_tensor, int64
from torchvision.transforms import ToTensor, ToPILImage
from numpy import ndarray, array
from PIL.Image import Image
from torchvision.transforms import Compose, RandomPosterize, \
RandomAdjustSharpness, RandomAutocontrast, GaussianBlur
import matplotlib.pyplot as plt
from numpy.random import uniform
from torch.nn import Module
from torchvision.transforms.functional import pil_to_tensor, convert_image_dtype
def obj_detect_visualization(image,
coord,
wh_to_diag_coord = True,
input_labels = None,
input_colors = None,
input_fill = False,
input_width = 1,
input_font = None,
input_font_size = 10):
"""
Visualization function for object detection:
-image(torch.Tensor/np.ndarray/PIL.Image.Image/list): PIL image, numpy tensor, python built-in list or torch tensor.
-coord(torch.Tensor/np.ndarray/list): coordinate of bounding boxes in form of top left coordinate
and lower right coordinate, input using 2D torch tensor, 2D numpy
array or Python 3 array.
-wh_to_diag_coord (bool, Default = True): convert coord form top left
coordinate with wide and height of bounding box to top left coordinate
and lower right coordinate.
-input_labels (List[str]): List containing the labels of bounding boxes.
-input_colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
be represented as `str` or `Tuple[int, int, int]`.
-input_fill (bool): If `True` fills the bounding box with specified color.
-input_width (int): Width of bounding box.
-input_font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
`/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
-input_font_size (int): The requested font size in points.
Returns:
PIL image with ploted box.
"""
def to_diag_coord(tensors):
for tensor in tensors:
tensor[2] = tensor[2].add(tensor[0])
tensor[3] = tensor[3].add(tensor[1])
return tensors
#Convert data type to uint8 torch.Tensor
# For image
if isinstance(image, list):
true_input = (Tensor(image)*255).byte()
elif isinstance(image, Image):
true_input = (ToTensor()(image)*255).byte()
elif isinstance(image, Tensor):
if (max(image)>1): true_input = image.byte()
else: true_input = (image*255).byte()
elif isinstance(image, ndarray):
temp = from_numpy(image)
if (max(temp)>1): true_input = temp.byte()
else: true_input = (temp*255).byte()
#For coordinate
if isinstance(coord, list):
coordinate = Tensor(coord)
elif isinstance(coord, ndarray):
coordinate = from_numpy(coord)
elif isinstance(image, Tensor):
coordinate = coord
#Coordinate transformation
if wh_to_diag_coord:
true_coord = to_diag_coord(coordinate)
#Apply bounding box
result = draw_bounding_boxes(true_input,
true_coord,
labels = input_labels,
colors = input_colors,
fill = input_fill,
width = input_width,
font = input_font,
font_size = input_font_size)
return ToPILImage()(result)
class RandomGausianBlur(Module):
def __init__(self,
input_kernel_size,
input_sigma,
random_threshold=0.5):
super().__init__()
self.g_blur = GaussianBlur(kernel_size=input_kernel_size,
sigma=input_sigma)
self.random_threshold = random_threshold
def forward(self, img):
if | uniform() | numpy.random.uniform |
# Implement code for Locality Sensitive Hashing here!
import numpy as np
from collections import defaultdict, Counter
import utils
def gen_hash(length, bucket_width=None, hashing_type='min'):
if hashing_type == 'min':
mapper = min_hash_mapper(length)
return gen_min_hash(length, mapper)
elif hashing_type == 'hamming':
return gen_hamming_hash(length)
elif hashing_type == 'e2lsh':
assert bucket_width is not None, "E2LSH hash requires a bucket width"
return gen_e2lsh_hash(length, bucket_width=bucket_width)
def gen_hamming_hash(length):
c = np.random.choice(length, 1)[0]
return lambda x: x[c]
def gen_hash_band(r, length, bucket_width=None, hashing_type='min'):
b = [gen_hash(length, hashing_type=hashing_type, bucket_width=bucket_width) for _ in range(r)]
return lambda x: [f(x) for f in b]
def min_hash_mapper(length):
return np.random.choice(np.int(np.log2(length))**2, length)
def gen_min_hash(length, mapper):
order = np.arange(length)
np.random.shuffle(order)
return lambda x: mapper[order[np.min(np.where(x[order] == 1))]]
def gen_e2lsh_hash(length, bucket_width):
r = np.random.normal(size=(length,))
b = np.random.uniform(bucket_width)
return lambda x: np.round((np.dot(x, r) + b) / bucket_width)
def gen_bandfs(length, b, r, hashing_type='min', bucket_width=None):
return [gen_hash_band(r, length, hashing_type=hashing_type, bucket_width=bucket_width) for _ in range(b)]
def init_bands(x, y, b, r, hashing_type='min', sep='|', bucket_width=None):
bandfs = gen_bandfs(x.shape[1], b, r, hashing_type=hashing_type, bucket_width=bucket_width)
bands = []
for band in bandfs:
item = {'bandfn': band, 'hashes': defaultdict(list)}
for ind, i in enumerate(x):
ihash = sep.join(str(j) for j in band(i))
item['hashes'][ihash].append(ind)
bands.append(item)
return bands
def classify(q, y, bands, sep='|'):
apx_neighbors = []
missed_points = 0
for band in bands:
ihash = sep.join(str(j) for j in band['bandfn'](q))
apx_neighbors.extend(band['hashes'][ihash])
if not apx_neighbors:
return
res = Counter(y[ | np.unique(apx_neighbors) | numpy.unique |
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
import pandas as pd
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import matplotlib.artist as martist
from matplotlib.offsetbox import AnchoredText
from math import pi
import seaborn as sns
os.chdir('/Users/pauline/Documents/Python')
df = pd.read_csv("Tab-Morph.csv")
params = {'figure.figsize': (10, 5),
'figure.dpi': 300,
'figure.titlesize': 14,
'font.family': 'Palatino',
'axes.grid': True,
'axes.labelsize': 8,
'polaraxes.grid': True,
}
pylab.rcParams.update(params)
def add_at(ax, t, loc=2):
fp = dict(size=11)
_at = AnchoredText(t, loc=loc, prop=fp)
ax.add_artist(_at)
return _at
# show 6 different variables on our radar chart, so take them out and set as a np.array.
labels=np.array(['Median', 'Max', '1stQ', '3rdQ', 'Min', 'Mean'])
stats1=df.loc[1, labels].values
stats2=df.loc[5, labels].values
stats3=df.loc[12, labels].values
stats4=df.loc[15, labels].values
stats5=df.loc[18, labels].values
stats6=df.loc[23, labels].values
# close the plot
angles=np.linspace(0, 2*np.pi, len(labels), endpoint=False)
angles=np.concatenate((angles,[angles[0]]))
stats1=np.concatenate((stats1,[stats1[0]]))
stats2=np.concatenate((stats2,[stats2[0]]))
stats3= | np.concatenate((stats3,[stats3[0]])) | numpy.concatenate |
# Copyright (C) 2021 <NAME>
'''
Reference
---------
Eq. 10, Fig. 4
Mesh size functions for implicit geometries and PDE-based
gradient limiting, Persson, PO. https://doi.org/10.1007/s00366-006-0014-1
Section 2.4
Automatic feature-preserving size field for 3D mesh generation,
<NAME>, https://arxiv.org/abs/2009.03984
'''
import numpy as np
from numba import jit
def hgrad_limit(hfield, dx=1.0, hgradation=1.1, guess_h0=False):
"""Regularize the mesh size function based on length ratio of two adjacent edges
Reference
---------
Eq. 10, Fig. 4
Mesh size functions for implicit geometries and PDE-based
gradient limiting, Persson, PO. https://doi.org/10.1007/s00366-006-0014-1
Section 2.4
Automatic feature-preserving size field for 3D mesh generation,
<NAME>, https://arxiv.org/abs/2009.03984
Current only support Cartesian mesh, Octree could be added in the future
Parameters
----------
hfield : object
Input mesh size function object using cartesian mesh
Cartesian mesh -> numpy NxNxN array
dx : float
unit mesh size in the Cartesian mesh
hgradation : float
length ratio of two adjacent edges in the final mesh
guess_h0 : bool
we could give an initial solution using numpy.gradient to locate sharp region
this may acclerate the function for large problem
Author: <NAME> (<EMAIL>)
"""
grad_limit = hgradation-1.0
aset=None
if(guess_h0): #Using numpy gradient to prepare the initial condition
grads = np.gradient(hfield)
grad_limit_mask=[np.abs(dh)>grad_limit for dh in grads]
grad_limit_mask=np.logical_or.reduce(grad_limit_mask)
aset = np.zeros_like(grad_limit_mask,dtype=np.int32)
aset[grad_limit_mask]=1
aset-=1
aset = aset.flatten()
#Convert 2D shape list into 3D
dim = hfield.ndim
dims = hfield.shape
if dim == 2: dims = (dims[0], dims[1], 1)
return fastHJ(hfield, dims, dx=dx, hgradation=hgradation, aset0=aset)
##---------------Numba Accelerated C-like Funcs------------------
@jit(nopython=True)
def getI_J_K(ijk,shape):
#Find index [i,j,k] from a flat 3D matrix index [i,j,k]
NX,NY,NZ = shape
i,j,k=0,0,0
#Col major
#i=ijk%NX
#j=((int)(ijk / NX)) % NY
#k=(int)(ijk / (NX*NY))
#Row major
k = ijk%NZ
j = ((int)(ijk / NZ)) % NY
i = (int)(ijk / (NZ*NY))
return i,j,k
@jit(nopython=True)
def getIJK(i,j,k,shape):
#Convert index [i,j,k] to a flat 3D matrix index [ijk]
NX,NY,NZ = shape
#Col major
#return i + (NX)*(j + k*(NY))
#Row major
return k + NZ*(j+i*NY)
@jit(nopython=True)
def fastHJ(ffun, dims, dx, hgradation, imax=10000, aset0=None):
ftol = np.min(ffun)*np.sqrt(1e-9)
dfdx = hgradation-1.0
elen = dx
npos = np.zeros(7,dtype=np.int32)
#output field, convert into 1d for generic nD indexing
ffun_s = | np.empty_like(ffun.size,dtype=np.float64) | numpy.empty_like |
import numpy as np
import xarray as xr
import laspy
import os
from time import perf_counter
from datetime import datetime
from HSTB.kluster.pydro_helpers import is_pydro
from HSTB.kluster.pdal_entwine import build_entwine_points
class FqprExport:
"""
Visualizations in Matplotlib built on top of FQPR class. Includes animations of beam vectors and vessel
orientation.
Processed fqpr_generation.Fqpr instance is passed in as argument
"""
def __init__(self, fqpr):
"""
Parameters
----------
fqpr
Fqpr instance to export from
"""
self.fqpr = fqpr
def _generate_export_data(self, ping_dataset: xr.Dataset, filter_by_detection: bool = True, z_pos_down: bool = True):
"""
Take the georeferenced data in the multibeam.raw_ping datasets held by fqpr_generation.Fqpr (ping_dataset is one of those
raw_ping datasets) and build the necessary arrays for exporting.
Parameters
----------
ping_dataset
one of the multibeam.raw_ping xarray Datasets, must contain the x,y,z variables generated by georeferencing
filter_by_detection
if True, will filter the xyz data by the detection info flag (rejected by multibeam system)
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
Returns
-------
xr.DataArray
x variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
xr.DataArray
y variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
xr.DataArray
z variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
xr.DataArray
uncertainty variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
np.array
indexes of the original z data before stacking, used to unstack x
np.array
if detectioninfo exists, this is the integer classification for each sounding
np.array
if detectioninfo exists, boolean mask for the valid detections
bool
if tvu exists, True
"""
uncertainty_included = False
nan_mask = ~np.isnan(ping_dataset['x'])
x_stck = ping_dataset['x'][nan_mask]
y_stck = ping_dataset['y'][nan_mask]
z_stck = ping_dataset['z'][nan_mask]
if 'tvu' in ping_dataset:
uncertainty_included = True
unc_stck = ping_dataset['tvu'][nan_mask]
# build mask with kongsberg detection info
classification = None
valid_detections = None
if 'detectioninfo' in ping_dataset:
dinfo = ping_dataset.detectioninfo
filter_stck = dinfo.values[nan_mask]
# filter_idx, filter_stck = stack_nan_array(dinfo, stack_dims=('time', 'beam'))
valid_detections = filter_stck != 2
tot = len(filter_stck)
tot_valid = np.count_nonzero(valid_detections)
tot_invalid = tot - tot_valid
self.fqpr.logger.info(
'{}: {} total soundings, {} retained, {} filtered'.format(ping_dataset.system_identifier, tot, tot_valid,
tot_invalid))
# filter points by mask
unc = None
if filter_by_detection and valid_detections is not None:
x = x_stck[valid_detections]
y = y_stck[valid_detections]
z = z_stck[valid_detections]
classification = filter_stck[valid_detections]
if uncertainty_included:
unc = unc_stck[valid_detections]
else:
x = x_stck
y = y_stck
z = z_stck
if 'detectioninfo' in ping_dataset:
classification = filter_stck
if uncertainty_included:
unc = unc_stck
# z positive down is the native convention in Kluster, if you want positive up, gotta flip
if not z_pos_down:
z = z * -1
return x, y, z, unc, nan_mask, classification, valid_detections, uncertainty_included
def export_pings_to_file(self, output_directory: str = None, file_format: str = 'csv', csv_delimiter=' ',
filter_by_detection: bool = True, z_pos_down: bool = True, export_by_identifiers: bool = True):
"""
Uses the output of georef_along_across_depth to build sounding exports. Currently you can export to csv, las or
entwine file formats, see file_format argument.
If you export to las and want to retain rejected soundings under the noise classification, set
filter_by_detection to False.
Filters using the detectioninfo variable if present in multibeam and filter_by_detection is set. Set z_pos_down
to False if you want positive up. Otherwise you get positive down.
Will generate an xyz file for each sector in multibeam. Results in one xyz file for each freq/sector id/serial
number combination.
entwine export will build las first, and then entwine from las
Parameters
----------
output_directory
optional, destination directory for the xyz exports, otherwise will auto export next to converted data
file_format
optional, destination file format, default is csv file, options include ['csv', 'las', 'entwine']
csv_delimiter
optional, if you choose file_format=csv, this will control the delimiter
filter_by_detection
optional, if True will only write soundings that are not rejected
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
export_by_identifiers
if True, will generate separate files for each combination of serial number/sector/frequency
Returns
-------
list
list of written file paths
"""
if 'x' not in self.fqpr.multibeam.raw_ping[0]:
self.fqpr.logger.error('export_pings_to_file: No xyz data found, please run All Processing - Georeference Soundings first.')
return
if file_format not in ['csv', 'las', 'entwine']:
self.fqpr.logger.error('export_pings_to_file: Only csv, las and entwine format options supported at this time')
return
if file_format == 'entwine' and not is_pydro():
self.fqpr.logger.error(
'export_pings_to_file: Only pydro environments support entwine tile building. Please see https://entwine.io/configuration.html for instructions on installing entwine if you wish to use entwine outside of Kluster. Kluster exported las files will work with the entwine build command')
if output_directory is None:
output_directory = self.fqpr.multibeam.converted_pth
self.fqpr.logger.info('****Exporting xyz data to {}****'.format(file_format))
if file_format == 'csv':
fldr_path = _create_folder(output_directory, 'csv_export')
written_files = self._export_pings_to_csv(output_directory=fldr_path, csv_delimiter=csv_delimiter,
filter_by_detection=filter_by_detection, z_pos_down=z_pos_down,
export_by_identifiers=export_by_identifiers)
elif file_format == 'las':
fldr_path = _create_folder(output_directory, 'las_export')
written_files = self._export_pings_to_las(output_directory=fldr_path, filter_by_detection=filter_by_detection,
z_pos_down=z_pos_down, export_by_identifiers=export_by_identifiers)
elif file_format == 'entwine':
fldr_path = _create_folder(output_directory, 'las_export')
entwine_fldr_path = _create_folder(output_directory, 'entwine_export')
written_files = self.export_pings_to_entwine(output_directory=entwine_fldr_path, las_export_folder=fldr_path,
filter_by_detection=filter_by_detection, z_pos_down=z_pos_down,
export_by_identifiers=export_by_identifiers)
else:
raise NotImplementedError('export_pings_to_file: {} is not a supported file format'.format(file_format))
return written_files
def _export_pings_to_csv(self, output_directory: str = None, csv_delimiter=' ', filter_by_detection: bool = True,
z_pos_down: bool = True, export_by_identifiers: bool = True):
"""
Method for exporting pings to csv files. See export_pings_to_file to use.
Parameters
----------
output_directory
destination directory for the xyz exports, otherwise will auto export next to converted data
csv_delimiter
optional, if you choose file_format=csv, this will control the delimiter
filter_by_detection
optional, if True will only write soundings that are not rejected
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
export_by_identifiers
if True, will generate separate files for each combination of serial number/sector/frequency
Returns
-------
list
list of written file paths
"""
starttime = perf_counter()
written_files = []
for rp in self.fqpr.multibeam.raw_ping:
self.fqpr.logger.info('Operating on system {}'.format(rp.system_identifier))
if filter_by_detection and 'detectioninfo' not in rp:
self.fqpr.logger.error('_export_pings_to_csv: Unable to filter by detection type, detectioninfo not found')
return
rp = rp.stack({'sounding': ('time', 'beam')})
if export_by_identifiers:
for freq in np.unique(rp.frequency):
subset_rp = rp.where(rp.frequency == freq, drop=True)
for secid in np.unique(subset_rp.txsector_beam).astype(np.int):
sec_subset_rp = subset_rp.where(subset_rp.txsector_beam == secid, drop=True)
dest_path = os.path.join(output_directory, '{}_{}_{}.csv'.format(rp.system_identifier, secid, freq))
self.fqpr.logger.info('writing to {}'.format(dest_path))
export_data = self._generate_export_data(sec_subset_rp, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down)
self._csv_write(export_data[0], export_data[1], export_data[2], export_data[3], export_data[7],
dest_path, csv_delimiter)
written_files.append(dest_path)
else:
dest_path = os.path.join(output_directory, rp.system_identifier + '.csv')
self.fqpr.logger.info('writing to {}'.format(dest_path))
export_data = self._generate_export_data(rp, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down)
self._csv_write(export_data[0], export_data[1], export_data[2], export_data[3], export_data[7],
dest_path, csv_delimiter)
written_files.append(dest_path)
endtime = perf_counter()
self.fqpr.logger.info('****Exporting xyz data to csv complete: {}s****\n'.format(round(endtime - starttime, 1)))
return written_files
def _csv_write(self, x: xr.DataArray, y: xr.DataArray, z: xr.DataArray, uncertainty: xr.DataArray,
uncertainty_included: bool, dest_path: str, delimiter: str):
"""
Write the data to csv
Parameters
----------
x
x variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
y
y variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
z
z variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
uncertainty
uncertainty variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
uncertainty_included
if tvu exists, True
dest_path
output path to write to
delimiter
csv delimiter to use
"""
if uncertainty_included:
np.savetxt(dest_path, np.c_[x, y, z, uncertainty],
fmt=['%3.3f', '%2.3f', '%4.3f', '%4.3f'],
delimiter=delimiter,
header='easting{}northing{}depth{}uncertainty'.format(delimiter, delimiter, delimiter),
comments='')
else:
np.savetxt(dest_path, np.c_[x, y, z],
fmt=['%3.3f', '%2.3f', '%4.3f'],
delimiter=delimiter,
header='easting{}northing{}depth'.format(delimiter, delimiter),
comments='')
def _export_pings_to_las(self, output_directory: str = None, filter_by_detection: bool = True, z_pos_down: bool = True,
export_by_identifiers: bool = True):
"""
Uses the output of georef_along_across_depth to build sounding exports. Currently you can export to csv or las
file formats, see file_format argument.
If you export to las and want to retain rejected soundings under the noise classification, set
filter_by_detection to False.
Filters using the detectioninfo variable if present in multibeam and filter_by_detection is set.
Will generate an xyz file for each sector in multibeam. Results in one xyz file for each freq/sector id/serial
number combination.
entwine export will build las first, and then entwine from las
Parameters
----------
output_directory
destination directory for the xyz exports, otherwise will auto export next to converted data
filter_by_detection
optional, if True will only write soundings that are not rejected
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
export_by_identifiers
if True, will generate separate files for each combination of serial number/sector/frequency
Returns
-------
list
list of written file paths
"""
starttime = perf_counter()
written_files = []
for rp in self.fqpr.multibeam.raw_ping:
self.fqpr.logger.info('Operating on system {}'.format(rp.system_identifier))
if filter_by_detection and 'detectioninfo' not in rp:
self.fqpr.logger.error('_export_pings_to_las: Unable to filter by detection type, detectioninfo not found')
return
rp = rp.stack({'sounding': ('time', 'beam')})
if export_by_identifiers:
for freq in np.unique(rp.frequency):
subset_rp = rp.where(rp.frequency == freq, drop=True)
for secid in np.unique(subset_rp.txsector_beam).astype(np.int):
sec_subset_rp = subset_rp.where(subset_rp.txsector_beam == secid, drop=True)
dest_path = os.path.join(output_directory, '{}_{}_{}.las'.format(rp.system_identifier, secid, freq))
self.fqpr.logger.info('writing to {}'.format(dest_path))
export_data = self._generate_export_data(sec_subset_rp, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down)
self._las_write(export_data[0], export_data[1], export_data[2], export_data[3],
export_data[5], export_data[7], dest_path)
written_files.append(dest_path)
else:
dest_path = os.path.join(output_directory, rp.system_identifier + '.las')
self.fqpr.logger.info('writing to {}'.format(dest_path))
export_data = self._generate_export_data(rp, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down)
self._las_write(export_data[0], export_data[1], export_data[2], export_data[3],
export_data[5], export_data[7], dest_path)
written_files.append(dest_path)
endtime = perf_counter()
self.fqpr.logger.info('****Exporting xyz data to las complete: {}s****\n'.format(round(endtime - starttime, 1)))
return written_files
def _las_write(self, x: xr.DataArray, y: xr.DataArray, z: xr.DataArray, uncertainty: xr.DataArray,
classification: np.array, uncertainty_included: bool, dest_path: str):
"""
Write the data to LAS format
Parameters
----------
x
x variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
y
y variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
z
z variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
uncertainty
uncertainty variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
classification
if detectioninfo exists, this is the integer classification for each sounding
uncertainty_included
if tvu exists, True
dest_path
output path to write to
"""
x = np.round(x.values, 2)
y = | np.round(y.values, 2) | numpy.round |
import sys
import os
import cv2
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
import numpy as np
import mxnet as mx
input_path = sys.argv[1].rstrip(os.sep)
mod = mx.mod.Module.load('mnist_lenet', 35, context=mx.gpu(2))
mod.bind(
data_shapes=[('data', (1, 1, 28, 28))],
for_training=False)
filenames = os.listdir(input_path)
for filename in filenames:
filepath = os.sep.join([input_path, filename])
img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
img = (img.astype(np.float)-128) * 0.00390625
img = img.reshape((1, 1)+img.shape)
mod.forward(Batch([mx.nd.array(img)]))
prob = mod.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
pred_label = | np.argmax(prob) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 15:10:24 2020
@author: Nicolai
----------------
"""
import numpy as np
import time
from scipy.stats import cauchy
import testFunctions as tf
def L_SHADE(population, p, H, function, minError, maxGeneration):
'''
implementation of L-SHADE based on: \n
Improving the Search Performance of SHADE Using Linear Population Size Reduction\n
by Tanabe and Fukunaga\n
adaptions:
* no constraint handling implemented
* population size reduction based on generation insteas of function evaluation
Parameters
----------
population: numpy array
2D numpy array where lines are candidates and colums is the dimension
p: float ]0,1]
percentage of best individuals for current-to-p-best mutation
H: int
size of the memory
function: function
fitness function that is optimised
minError: float
stopping condition on function value
maxGeneration: int
stopping condition on max number of generation
Returns
-------
history: tuple
tupel[0] - popDynamic\n
tupel[1] - FEDynamic\n
tupel[2] - FDynamic\n
tupel[3] - CRDynamic\n
Examples
--------
>>> import numpy as np
>>> def sphere(x):
return np.dot(x,x)
>>> maxError = -1*np.inf
>>> maxGen = 10**3
>>> H = 50
>>> population = 100*np.random.rand(50,2)
>>> p = 0.1
>>> (popDynamic, FEDynamic, FDynamic, CRDynamic) =
L_SHADE(population, p, H, sphere, maxError, maxGen)
'''
# initialisation of variables
populationSize, dimension = population.shape
functionValue = np.asarray([function(candidate) for candidate in population])
genCount = 1
F = 0.5
CR = 0.5
archive = np.array([population[0]])
# temorary arrays for holding the population and its function values
# during a generation
trailPopulation = np.copy(population)
trailFunctionValue = np.copy(functionValue)
# memory for control parameters
mCR = 0.5*np.ones(H)
mF = 0.5*np.ones(H)
# k is the running memory index
k = 0
# population size reduction parameter
NGmin = int(np.ceil(1/p))
NGinit = populationSize
popDynamic = []
FEDynamic = []
FDynamic = []
CRDynamic = []
popDynamic.append(np.copy(population))
FEDynamic.append(np.copy(functionValue))
FDynamic.append(np.copy(mF))
CRDynamic.append(np.copy(mCR))
while(genCount < maxGeneration and np.min(functionValue) > minError):
# success history S for control parameters
sCR = []
sF = []
sCRtemp = []
sFtemp = []
for i in range(populationSize):
F = selectF(mF)
sFtemp.append(F)
vi = mutationCurrentToPBest1(population, archive, i, functionValue, F, p)
CR = selectCR(mCR)
sCRtemp.append(CR)
ui = crossoverBIN(np.array([population[i]]), vi, CR)
trailPopulation[i] = ui
#######################################################
# for actual L-SHADE missing constraint handling here #
#######################################################
trailFunctionValue[i] = function(ui)
functionValueDifference = []
for i in range(populationSize):
if(trailFunctionValue[i] <= functionValue[i]):
# build and remove archive
archLength, _ = archive.shape
if (archLength >= populationSize):
randIndex = np.random.randint(0, high=archLength)
archive = np.delete(archive, randIndex, 0)
archive = np.vstack([archive, population[i]])
# create parameter success history and weights for lehmer mean
sF.append(sFtemp[i])
sCR.append(sCRtemp[i])
# equation 9 in paper
functionValueDifference.append(np.abs(trailFunctionValue[i] - functionValue[i]))
# perform selection
population[i] = trailPopulation[i]
functionValue[i] = trailFunctionValue[i]
# calculate lehmer weights
weights = []
sDF = np.sum(functionValueDifference)
for df in functionValueDifference:
if sDF == 0.0:
weights.append(0)
else:
weights.append(df/sDF)
# update parameter memory with success history
if len(sCR) != 0 and len(sF) != 0:
if mCR[k] == np.inf or | np.max(mCR) | numpy.max |
# Licence?
import os
import atexit
import signal
import timeit
import warnings
from sys import exc_info
import numpy as np
def save():
np.save("saved/state.npy", xx, allow_pickle=True)
warnings.warn("Saving plant state")
atexit.register(save)
try:
savedstate = np.load("saved/state.npy", ndmin=2)
except:
savedstate = None
N = 1
try:
if "NUM_PLANTS" in os.environ.keys():
N = int(os.environ["NUM_PLANTS"])
warnings.warn("Using %s plants" % N)
except:
warnings.warn("Not able to access the number of plants to generate, using %s" % N)
h = 0.05
try:
if "SAMPLE_PERIOD" in os.environ.keys():
_h = float(os.environ["SAMPLE_PERIOD"])
if 0 < _h < 2:
h = _h
else:
raise ValueError("Requested duration %s is not acceptable", _h)
warnings.warn("Using %s as sample period" % h)
except:
warnings.warn("Not able to access the suggested sample period. Using %s" % h)
clock = timeit.default_timer
# Set model parameters
R = 4.67 # ohm
L = 170e-3 # H
J = 42.6e-6 # Kg-m^2
f = 47.3e-6 # N-m/rad/sec
K = 14.7e-3 # N-m/A
Kb = 14.7e-3 # V-sec/rad
# motor_state = [theta, thetadot, i]
x = np.zeros((3, 1))
# motor_output = [velocity]
y = np.zeros((1, 1))
u = np.zeros((1, 1))
A = np.array([[0, 1, 0], [0, -f/J, K/J], [0, -K/L, -R/L]])
B = np.array([[0], [0], [1/L]])
C = np.array([[1, 0, 0],[0, 1, 0]])
xx = np.tile(x, (N, 1))
yy = | np.tile(y, (N, 1)) | numpy.tile |
# coding: utf-8
# Distributed under the terms of the MIT License.
import itertools
import numpy as np
import spglib
from functools import reduce
from progressbar import ProgressBar
# import pdb
from itertools import product
from ababe.stru.scaffold import GeneralCell
class SuperLatticeCell(object):
"""
The class will produce the instance which is a Hermite Normal
Form of a superlattice. It can be used to generate a GeneralCell.
TODO: input present, unit_cell is row vectors, which lat_coeff
is column vectors. SHOULD CHNAGE code to make lat_coeff be a
row vectors.
"""
def __init__(self, unit_gcell, lat_coeff):
self.ub = unit_gcell.lattice
self.upositions = unit_gcell.positions
self.unumbers = unit_gcell.numbers
self.lat_coeff = lat_coeff
self.unit_cell = unit_gcell.spg_cell
self.sym = spglib.get_symmetry(self.unit_cell, symprec=1e-3)
def __eq__(self, other):
# H_j * R.T ^-1 * H ^-1 should be an int matrix
inv = np.linalg.inv
mul = np.matmul
for r in self.sym['rotations']:
h_inv = mul(other.lat_coeff,
inv(r.T))
# h = mul(r, other.lat_coeff)
h_mat = mul(h_inv, inv(self.lat_coeff))
h_mat = np.around(h_mat, decimals=3)
if np.all(np.mod(h_mat, 1) == 0):
return True
return False
def to_general_cell(self):
"""
The function used to convert the superlattice
HermiteLattice to a GeneralCell instance.
input aps for atome_position_s, which are atom positions
of the unit basis.
input numbers are element number of the corespoding positions.
"""
# lat_coeff is represent as column vector
# while ub and latt is row vector
latt = np.matmul(self.lat_coeff,
self.ub)
# coor_unit_pos = np.matmul(self.upositions, self.ub)
# o_unit_pos = np.matmul(coor_unit_pos, np.linalg.inv(self.lat_coeff))
########################################################
# Tag: algorithm changed May be harmful!!!!!!!!
########################################################
o_unit_pos = np.matmul(self.upositions, np.linalg.inv(self.lat_coeff))
o_pos = self.get_frac_from_H(self.lat_coeff)
l_of_positions = [i for i in map(lambda x: x+o_pos, list(o_unit_pos))]
# pdb.set_trace()
pos = np.concatenate(l_of_positions, axis=0)
n = self.lat_coeff.diagonal().prod()
numbers = | np.repeat(self.unumbers, n) | numpy.repeat |
# Licensed with the 3-clause BSD license. See LICENSE for details.
import os
from glob import glob
import argparse
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
import pds3
from catch import Catch
from catch.schema import NEATPalomar
from catch.config import Config
from sbsearch.util import FieldOfView, RADec
parser = argparse.ArgumentParser('add-neat-palomar')
parser.add_argument(
'path', help='directory containing NEAT PDS3 labels (.lbl suffix)')
args = parser.parse_args()
# Files to skip, based on file name and PRODUCT_CREATION_TIME. See catch README for notes.
skip = {
'20020814063615d.lbl': '2014-12-03T19:42:48.000',
'20020814063615e.lbl': '2014-12-03T19:42:48.000',
'20020814063615f.lbl': '2014-12-03T19:42:48.000',
'20020626063738d.lbl': '2014-12-03T19:42:07.000',
'20020626063738e.lbl': '2014-12-03T19:42:07.000',
'20020626063738f.lbl': '2014-12-03T19:42:07.000',
}
def product_id_to_int_id(pid):
s = pid.split('_')[-1]
s = s[:-1] + str(ord(s[-1]) - 65)
return int(s[2:])
with Catch(Config.from_file(), save_log=True, debug=True) as catch:
obs = []
for labelfn in glob(os.path.join(args.path, '*.lbl')):
path = os.path.dirname(labelfn)
label = pds3.PDS3Label(labelfn)
if os.path.basename(labelfn) in skip:
if label['PRODUCT_CREATION_TIME'] == skip[os.path.basename(labelfn)]:
continue
# local archive has compressed data:
datafn = os.path.join(path, label['^IMAGE'][0]) + '.fz'
h = fits.getheader(datafn, ext=1)
# hardcoded because Palomar Tricam part 1 labels are wrong
# shape = np.array((label['IMAGE']['LINES'],
# label['IMAGE']['SAMPLES']))
shape = | np.array((4080, 4080)) | numpy.array |
import os
import numpy as np
import h5py
import tempfile
import pytest
from keras import backend as K
from keras.layers import Input, Convolution3D, concatenate
from keras.models import Model
from keras.optimizers import Adam
import pybel
from tfbio.data import Featurizer
from kalasanty.net import dice_np, dice, dice_loss, ovl_np, ovl, ovl_loss, DataWrapper, UNet
path = os.path.dirname(os.path.realpath(__file__))
test_dataset = os.path.join(path, 'test_data.hdf')
protein_file = os.path.join(path, 'datasets', 'scpdb', '2qfo_1', 'protein.mol2')
featurizer = Featurizer(save_molecule_codes=False)
num_features = len(featurizer.FEATURE_NAMES)
input_shape = (1, 4, 2, 3, 1)
arr_zeros = np.zeros(input_shape)
arr_ones = np.ones(input_shape)
def teardown_function(function):
K.clear_session()
@pytest.fixture(scope='function')
def data():
data = DataWrapper(test_dataset, test_set=0.2, max_dist=52, scale=0.33)
yield data
data.close()
@pytest.mark.parametrize('smoothing', (0, 0.1, 0.001),
ids=lambda x: 'smoothing %s' % x)
def test_dice(smoothing):
x = Input(input_shape[1:])
m = Model(inputs=x, outputs=x)
arr_random = np.random.choice([0, 1], size=input_shape,
p=[0.75, 0.25])
arrays = (arr_random, arr_zeros, arr_ones)
arr_sum = arr_random.sum()
ones_sum = arr_ones.sum()
scores = (1.0, smoothing / (arr_sum + smoothing),
(2 * arr_sum + smoothing) / (arr_sum + ones_sum + smoothing))
m.compile(Adam(), lambda x, y: dice(x, y, smoothing_factor=smoothing))
for array, score in zip(arrays, scores):
score_keras = m.evaluate(arr_random, array, verbose=0)
score_np = dice_np(arr_random, array, smoothing_factor=smoothing)
assert np.allclose(score_keras, score_np, 6)
assert np.allclose(score_keras, score, 6)
@pytest.mark.parametrize('smoothing', (0, 0.1, 0.001),
ids=lambda x: 'smoothing %s' % x)
def test_ovl(smoothing):
x = Input(input_shape[1:])
m = Model(inputs=x, outputs=x)
arr_random = np.random.choice([0, 1], size=input_shape,
p=[0.75, 0.25])
arr_sum = arr_random.sum()
ones_sum = arr_ones.sum()
arrays = (arr_random, arr_zeros, arr_ones)
scores = (1.0, smoothing / (arr_sum + smoothing),
(arr_sum + smoothing) / (ones_sum + smoothing))
m.compile(Adam(), lambda x, y: ovl(x, y, smoothing_factor=smoothing))
for array, score in zip(arrays, scores):
score_keras = m.evaluate(arr_random, array, verbose=0)
score_np = ovl_np(arr_random, array, smoothing_factor=smoothing)
assert np.allclose(score_keras, score_np, 6)
assert np.allclose(score_keras, score, 6)
def test_unet_from_data_handle(data):
with pytest.raises(ValueError, match='you must either provide'):
UNet()
with pytest.raises(TypeError, match='data_handle should be a DataWrapper'):
UNet(data_handle='10gs')
model = UNet(data_handle=data)
assert model.data_handle == data
assert model.scale == data.scale
assert model.max_dist == data.max_dist
assert len(model.inputs) == 1
assert model.inputs[0].shape[-1] == data.x_channels
assert len(model.outputs) == 1
assert model.outputs[0].shape[-1] == data.y_channels
@pytest.mark.parametrize('box_size', (4, 16), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('i', (5, 1), ids=lambda x: 'i=%s' % x)
@pytest.mark.parametrize('o', (2, 1), ids=lambda x: 'o=%s' % x)
def test_unet_from_layers(box_size, i, o):
inputs = Input([box_size] * 3 + [i])
conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inputs)
outputs = Convolution3D(filters=o, kernel_size=1, activation='sigmoid',
padding='same')(conv1)
model = UNet(inputs=inputs, outputs=outputs, box_size=box_size,
input_channels=i, output_channels=o)
assert hasattr(model, 'data_handle')
assert model.data_handle is None
with pytest.raises(ValueError, match='input should be 5D'):
UNet(inputs=inputs[0], outputs=inputs)
with pytest.raises(ValueError, match='output should be 5D'):
UNet(inputs=inputs, outputs=outputs[1])
with pytest.raises(ValueError, match='input and output shapes do not match'):
UNet(inputs=inputs, outputs=concatenate([outputs, outputs], 1))
@pytest.mark.parametrize('box_size', (36, 144), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (4, 2), ids=lambda x: 'o=%s' % x)
def test_unet_with_featurizer(box_size, o):
f = Featurizer()
i = len(f.FEATURE_NAMES)
with pytest.raises(TypeError, match='should be a tfbio.data.Featurize'):
UNet(box_size=box_size, input_channels=i, output_channels=o,
scale=0.5, featurizer=1)
model = UNet(box_size=box_size, input_channels=i, output_channels=o,
scale=0.5, featurizer=f)
assert hasattr(model, 'data_handle')
assert model.data_handle is None
assert hasattr(model, 'featurizer')
assert isinstance(model.featurizer, Featurizer)
@pytest.mark.parametrize('box_size', (8, 16), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('i_channels', ([5, 3], [2, 1, 1]),
ids=lambda x: 'i=' + ','.join([str(i) for i in x]))
@pytest.mark.parametrize('o_channels', ([3, 3], [2, 1, 4]),
ids=lambda x: 'o=' + ','.join([str(i) for i in x]))
def test_multiple_inputs_outputs(box_size, i_channels, o_channels):
inputs = [Input([box_size] * 3 + [i]) for i in i_channels]
conv1 = [Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inp) for inp in inputs]
conv1 = concatenate(conv1, axis=-1)
outputs = [Convolution3D(filters=o, kernel_size=1, activation='sigmoid',
padding='same')(conv1) for o in o_channels]
model = UNet(inputs=inputs, outputs=outputs, box_size=box_size,
input_channels=sum(i_channels),
output_channels=sum(o_channels))
assert len(model.inputs) == len(i_channels)
assert len(model.outputs) == len(o_channels)
@pytest.mark.parametrize('loss', (dice_loss, ovl_loss))
def test_training(data, loss):
train_gen = data.batch_generator(batch_size=5)
eval_gen = data.batch_generator(batch_size=5)
test_gen = data.batch_generator(batch_size=2, subset='test')
num_epochs = 2
box_size = data.box_size
input_channels = data.x_channels
output_channels = data.y_channels
inputs = Input((box_size, box_size, box_size, input_channels))
outputs = Convolution3D(filters=output_channels, kernel_size=1,
activation='sigmoid')(inputs)
model = UNet(inputs=inputs, outputs=outputs)
model.compile(optimizer=Adam(lr=1e-6), loss=loss,
metrics=[dice, dice_loss, ovl, ovl_loss])
model.fit_generator(train_gen, steps_per_epoch=2,
epochs=num_epochs, verbose=0)
for scores in (model.evaluate_generator(eval_gen, steps=2),
model.evaluate_generator(test_gen, steps=1)):
assert np.allclose(scores[1], -scores[2])
assert np.allclose(scores[3], -scores[4])
loss_change = model.history.history['loss']
assert len(loss_change) == num_epochs
assert (loss_change[0] != loss_change[1:]).all()
@pytest.mark.parametrize('kwargs, err', (
({'scale': 1.0}, ValueError),
({'max_dist': 35}, ValueError),
({'featurizer': 123}, TypeError),
({'featurizer': Featurizer()}, ValueError)
), ids=('wrong scale', 'wrong dist', 'wrong featurizer type',
'wrong featurizer shape'))
@pytest.mark.parametrize('compiled', (True, False),
ids=('compiled', 'not compiled'))
@pytest.mark.filterwarnings('ignore:No training configuration found')
def test_load_wrong_args(data, kwargs, err, compiled):
box_size = data.box_size
i = data.x_channels
o = data.y_channels
model1 = UNet(box_size=box_size, input_channels=i,
output_channels=o, scale=data.scale,
data_handle=data)
if compiled:
model1.compile(optimizer=Adam(lr=1e-6),
loss='binary_crossentropy',
metrics=[dice, dice_loss, ovl, ovl_loss])
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save(f.name)
with pytest.raises(err, match=list(kwargs)[0]):
UNet.load_model(f.name, data_handle=data, **kwargs)
@pytest.mark.parametrize('kwargs', (
{},
{'max_dist': 52, 'scale': 0.33, 'featurizer': featurizer},
), ids=('no args', 'scale 1:3, dist=52, featurizer'))
@pytest.mark.parametrize('compiled', (True, False),
ids=('compiled', 'not compiled'))
@pytest.mark.filterwarnings('ignore:No training configuration found')
def test_save_load(data, kwargs, compiled):
from keras.models import load_model as keras_load
box_size = data.box_size
i = data.x_channels
o = data.y_channels
model1 = UNet(box_size=box_size, input_channels=i,
output_channels=o, scale=data.scale,
data_handle=data)
if compiled:
model1.compile(optimizer=Adam(lr=1e-6),
loss='binary_crossentropy',
metrics=[dice, dice_loss, ovl, ovl_loss])
weights1 = model1.get_weights()
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save(f.name)
model2 = UNet.load_model(f.name, data_handle=data, **kwargs)
weights2 = model2.get_weights()
assert model1.to_json() == model2.to_json()
for w1, w2 in zip(weights1, weights2):
assert np.allclose(w1, w2)
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save_keras(f.name)
model2 = keras_load(f.name)
weights2 = model2.get_weights()
for w1, w2 in zip(weights1, weights2):
assert np.allclose(w1, w2)
@pytest.mark.parametrize('kwargs', (
{'box_size': 30},
{'input_channels': 1},
{'output_channels': 4},
{'scale': 2.0},
{'featurizer': Featurizer()},
{'inputs': Input([36] * 3 + [1])},
{'outputs': Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(Input([36] * 3 + [1]))}
), ids=('box_size', 'input_channels', 'output_channels', 'scale', 'featurizer',
'inputs, no outputs', 'outputs, no inputs'))
def test_incompatible_with_data_handle(data, kwargs):
with pytest.raises(ValueError, match=list(kwargs)[0]):
UNet(data_handle=data, **kwargs)
@pytest.mark.parametrize('input_shape, strides, message', (
([10] * 3 + [1], 1, 'input shape does not match box_size'),
([20] * 5 + [1], 1, 'input should be 5D'),
([20] * 3 + [1], 2, 'input and output shapes do not match'),
), ids=('box size', 'not 3D image', 'different shapes'))
def test_incompatible_layers_shapes(input_shape, strides, message):
inputs = Input(input_shape)
if message == 'input should be 5D':
outputs = inputs
else:
outputs = Convolution3D(filters=3, kernel_size=1, activation='sigmoid',
padding='same', strides=strides)(inputs)
with pytest.raises(ValueError, match=message):
UNet(inputs=inputs, outputs=outputs, box_size=20)
@pytest.mark.parametrize('kwargs', (
{'box_size': 30},
{'input_channels': 1},
{'output_channels': 4},
{'featurizer': Featurizer()},
), ids=lambda x: ', '.join(str(k) for k in x))
def test_incompatible_with_layers(kwargs):
inputs = Input([10] * 3 + [3])
conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inputs)
outputs = Convolution3D(filters=5, kernel_size=1, activation='sigmoid',
padding='same')(conv1)
with pytest.raises(ValueError, match=list(kwargs)[0]):
UNet(inputs=inputs, outputs=outputs, **kwargs)
def test_get_pockets_segmentation(data):
with pytest.raises(ValueError, match='data_handle must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7)
model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.scale = None
model.pocket_density_from_grid('10gs')
np.random.seed(42)
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, *_ = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='not supported'):
model.get_pockets_segmentation(np.array([density] * 2), 0.6)
pocket = model.get_pockets_segmentation(density, 0.6)
assert pocket.shape == (data.box_size,) * 3
assert pocket.max() > 0
assert len(np.unique(pocket)) - 1 <= pocket.max()
def test_save_pockets_cmap(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cmap(np.concatenate((density, density)), origin,
step)
with tempfile.NamedTemporaryFile(suffix='.cmap') as cmap_file:
fname = cmap_file.name
model.save_density_as_cmap(density, origin, step, fname=fname)
with h5py.File(fname, 'r') as f:
assert 'Chimera' in f
group = f['Chimera']
assert len(group.keys()) == data.y_channels
for i in range(data.y_channels):
key = 'image%s' % (i + 1)
assert key in group
assert 'data_zyx' in group[key]
dataset = group[key]['data_zyx'][:]
assert np.allclose(density[0, ..., i].transpose([2, 1, 0]),
dataset[:])
def test_save_pockets_cube(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cube(np.concatenate((density, density)), origin,
step)
with pytest.raises(NotImplementedError, match='saving multichannel'):
model.save_density_as_cube(density, origin, step)
density = density[..., [0]]
with tempfile.NamedTemporaryFile(suffix='.cube') as cmap_file:
fname = cmap_file.name
model.save_density_as_cube(density, origin, step, fname=fname)
with open(fname, 'r') as f:
# skip header
for _ in range(7):
f.readline()
values = np.array(f.read().split()).reshape(density.shape)
assert np.allclose(density, values.astype(float))
@pytest.mark.parametrize('box_size', (36, 72), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (1, 3), ids=lambda x: 'o=%s' % x)
def test_predict_mol(box_size, o):
mol = next(pybel.readfile('mol2', protein_file))
with pytest.raises(ValueError, match='featurizer must be set'):
model = UNet(box_size=box_size, scale=0.5, input_channels=num_features,
output_channels=o)
model.pocket_density_from_mol(mol)
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(featurizer=featurizer, box_size=box_size,
input_channels=num_features, output_channels=o)
model.pocket_density_from_mol(mol)
model = UNet(featurizer=featurizer, box_size=box_size, scale=0.5,
output_channels=o)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
with pytest.raises(TypeError, match='pybel.Molecule'):
model.pocket_density_from_mol(protein_file)
density, origin, step = model.pocket_density_from_mol(mol)
assert (density > 0).any()
@pytest.mark.parametrize('box_size', (36, 72), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (1, 2), ids=lambda x: 'o=%s' % x)
def test_predict_pocket_atoms(box_size, o):
| np.random.seed(42) | numpy.random.seed |
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import statistics
from typing import Any, Callable, List
import numpy as np
class Aggregation:
def __init__(self) -> None:
self._data: List[float] = []
def append(self, measurement: float) -> None:
self._data.append(measurement)
def aggregate(self) -> Any:
return self.func()(*self._data)
def func(self) -> Callable:
assert False
class MinAggregation(Aggregation):
def func(self) -> Callable:
return min
class MeanAggregation(Aggregation):
def func(self) -> Callable:
return np.mean
class StdDevAggregation(Aggregation):
def __init__(self, num_stdevs: float) -> None:
self._data = []
self._num_stdevs = num_stdevs
def aggregate(self) -> float:
stdev: float = np.std(self._data)
mean: float = | np.mean(self._data) | numpy.mean |
import numpy as np
from numpy import linalg as LA
import scipy.sparse as sparse
from scipy.sparse import csc_matrix
from scipy.sparse import dia_matrix
import itertools
import operator
"""
A few functions used in PDE-FIND
<NAME>. 2016
"""
##################################################################################
##################################################################################
#
# Functions for taking derivatives.
# When in doubt / nice data ===> finite differences
# \ noisy data ===> polynomials
#
##################################################################################
##################################################################################
def TikhonovDiff(f, dx, lam, d = 1):
"""
Tikhonov differentiation.
return argmin_g \|Ag-f\|_2^2 + lam*\|Dg\|_2^2
where A is trapezoidal integration and D is finite differences for first dervative
It looks like it will work well and does for the ODE case but
tends to introduce too much bias to work well for PDEs. If the data is noisy, try using
polynomials instead.
"""
# Initialize a few things
n = len(f)
f = np.matrix(f - f[0]).reshape((n,1))
# Get a trapezoidal approximation to an integral
A = np.zeros((n,n))
for i in range(1, n):
A[i,i] = dx/2
A[i,0] = dx/2
for j in range(1,i): A[i,j] = dx
e = np.ones(n-1)
D = sparse.diags([e, -e], [1, 0], shape=(n-1, n)).todense() / dx
# Invert to find derivative
g = np.squeeze(np.asarray(np.linalg.lstsq(A.T.dot(A) + lam*D.T.dot(D),A.T.dot(f))[0]))
if d == 1: return g
# If looking for a higher order derivative, this one should be smooth so now we can use finite differences
else: return FiniteDiff(g, dx, d-1)
def FiniteDiff(u, dx, d):
"""
Takes dth derivative data using 2nd order finite difference method (up to d=3)
Works but with poor accuracy for d > 3
Input:
u = data to be differentiated
dx = Grid spacing. Assumes uniform spacing
"""
n = u.size
ux = np.zeros(n, dtype=np.complex64)
if d == 1:
for i in range(1,n-1):
ux[i] = (u[i+1]-u[i-1]) / (2*dx)
ux[0] = (-3.0/2*u[0] + 2*u[1] - u[2]/2) / dx
ux[n-1] = (3.0/2*u[n-1] - 2*u[n-2] + u[n-3]/2) / dx
return ux
if d == 2:
for i in range(1,n-1):
ux[i] = (u[i+1]-2*u[i]+u[i-1]) / dx**2
ux[0] = (2*u[0] - 5*u[1] + 4*u[2] - u[3]) / dx**2
ux[n-1] = (2*u[n-1] - 5*u[n-2] + 4*u[n-3] - u[n-4]) / dx**2
return ux
if d == 3:
for i in range(2,n-2):
ux[i] = (u[i+2]/2-u[i+1]+u[i-1]-u[i-2]/2) / dx**3
ux[0] = (-2.5*u[0]+9*u[1]-12*u[2]+7*u[3]-1.5*u[4]) / dx**3
ux[1] = (-2.5*u[1]+9*u[2]-12*u[3]+7*u[4]-1.5*u[5]) / dx**3
ux[n-1] = (2.5*u[n-1]-9*u[n-2]+12*u[n-3]-7*u[n-4]+1.5*u[n-5]) / dx**3
ux[n-2] = (2.5*u[n-2]-9*u[n-3]+12*u[n-4]-7*u[n-5]+1.5*u[n-6]) / dx**3
return ux
if d > 3:
return FiniteDiff(FiniteDiff(u,dx,3), dx, d-3)
def ConvSmoother(x, p, sigma):
"""
Smoother for noisy data
Inpute = x, p, sigma
x = one dimensional series to be smoothed
p = width of smoother
sigma = standard deviation of gaussian smoothing kernel
"""
n = len(x)
y = np.zeros(n, dtype=np.complex64)
g = np.exp(-np.power(np.linspace(-p,p,2*p),2)/(2.0*sigma**2))
for i in range(n):
a = max([i-p,0])
b = min([i+p,n])
c = max([0, p-i])
d = min([2*p,p+n-i])
y[i] = np.sum(np.multiply(x[a:b], g[c:d]))/np.sum(g[c:d])
return y
def PolyDiff(u, x, deg = 3, diff = 1, width = 5):
"""
u = values of some function
x = x-coordinates where values are known
deg = degree of polynomial to use
diff = maximum order derivative we want
width = width of window to fit to polynomial
This throws out the data close to the edges since the polynomial derivative only works
well when we're looking at the middle of the points fit.
"""
u = u.flatten()
x = x.flatten()
n = len(x)
du = np.zeros((n - 2*width,diff))
# Take the derivatives in the center of the domain
for j in range(width, n-width):
points = np.arange(j - width, j + width)
# Fit to a Chebyshev polynomial
# this is the same as any polynomial since we're on a fixed grid but it's better conditioned :)
poly = np.polynomial.chebyshev.Chebyshev.fit(x[points],u[points],deg)
# Take derivatives
for d in range(1,diff+1):
du[j-width, d-1] = poly.deriv(m=d)(x[j])
return du
def PolyDiffPoint(u, x, deg = 3, diff = 1, index = None):
"""
Same as above but now just looking at a single point
u = values of some function
x = x-coordinates where values are known
deg = degree of polynomial to use
diff = maximum order derivative we want
"""
n = len(x)
if index == None: index = (n-1)/2
# Fit to a Chebyshev polynomial
# better conditioned than normal polynomials
poly = np.polynomial.chebyshev.Chebyshev.fit(x,u,deg)
# Take derivatives
derivatives = []
for d in range(1,diff+1):
derivatives.append(poly.deriv(m=d)(x[index]))
return derivatives
##################################################################################
##################################################################################
#
# Functions specific to PDE-FIND
#
##################################################################################
##################################################################################
def build_Theta(data, derivatives, derivatives_description, P, data_description = None):
"""
builds a matrix with columns representing polynoimials up to degree P of all variables
This is used when we subsample and take all the derivatives point by point or if there is an
extra input (Q in the paper) to put in.
input:
data: column 0 is U, and columns 1:end are Q
derivatives: a bunch of derivatives of U and maybe Q, should start with a column of ones
derivatives_description: description of what derivatives have been passed in
P: max power of polynomial function of U to be included in Theta
returns:
Theta = Theta(U,Q)
descr = description of what all the columns in Theta are
"""
n,d = data.shape
m, d2 = derivatives.shape
if n != m: raise Exception('dimension error')
if data_description is not None:
if len(data_description) != d: raise Exception('data descrption error')
# Create a list of all polynomials in d variables up to degree P
rhs_functions = {}
f = lambda x, y : np.prod(np.power(list(x), list(y)))
powers = []
for p in range(1,P+1):
size = d + p - 1
for indices in itertools.combinations(list(range(size)), d-1):
starts = [0] + [index+1 for index in indices]
stops = indices + (size,)
powers.append(tuple(map(operator.sub, stops, starts)))
for power in powers: rhs_functions[power] = [lambda x, y = power: f(x,y), power]
# First column of Theta is just ones.
Theta = np.ones((n,1), dtype=np.complex64)
descr = ['']
# Add the derivaitves onto Theta
for D in range(1,derivatives.shape[1]):
Theta = np.hstack([Theta, derivatives[:,D].reshape(n,1)])
descr.append(derivatives_description[D])
# Add on derivatives times polynomials
for D in range(derivatives.shape[1]):
for k in list(rhs_functions.keys()):
func = rhs_functions[k][0]
new_column = np.zeros((n,1), dtype=np.complex64)
for i in range(n):
new_column[i] = func(data[i,:])*derivatives[i,D]
Theta = np.hstack([Theta, new_column])
if data_description is None: descr.append(str(rhs_functions[k][1]) + derivatives_description[D])
else:
function_description = ''
for j in range(d):
if rhs_functions[k][1][j] != 0:
if rhs_functions[k][1][j] == 1:
function_description = function_description + data_description[j]
else:
function_description = function_description + data_description[j] + '^' + str(rhs_functions[k][1][j])
descr.append(function_description + derivatives_description[D])
return Theta, descr
def build_linear_system(u, dt, dx, D = 3, P = 3,time_diff = 'poly',space_diff = 'poly',lam_t = None,lam_x = None, width_x = None,width_t = None, deg_x = 5,deg_t = None,sigma = 2):
"""
Constructs a large linear system to use in later regression for finding PDE.
This function works when we are not subsampling the data or adding in any forcing.
Input:
Required:
u = data to be fit to a pde
dt = temporal grid spacing
dx = spatial grid spacing
Optional:
D = max derivative to include in rhs (default = 3)
P = max power of u to include in rhs (default = 3)
time_diff = method for taking time derivative
options = 'poly', 'FD', 'FDconv','TV'
'poly' (default) = interpolation with polynomial
'FD' = standard finite differences
'FDconv' = finite differences with convolutional smoothing
before and after along x-axis at each timestep
'Tik' = Tikhonov (takes very long time)
space_diff = same as time_diff with added option, 'Fourier' = differentiation via FFT
lam_t = penalization for L2 norm of second time derivative
only applies if time_diff = 'TV'
default = 1.0/(number of timesteps)
lam_x = penalization for L2 norm of (n+1)st spatial derivative
default = 1.0/(number of gridpoints)
width_x = number of points to use in polynomial interpolation for x derivatives
or width of convolutional smoother in x direction if using FDconv
width_t = number of points to use in polynomial interpolation for t derivatives
deg_x = degree of polynomial to differentiate x
deg_t = degree of polynomial to differentiate t
sigma = standard deviation of gaussian smoother
only applies if time_diff = 'FDconv'
default = 2
Output:
ut = column vector of length u.size
R = matrix with ((D+1)*(P+1)) of column, each as large as ut
rhs_description = description of what each column in R is
"""
n, m = u.shape
if width_x == None: width_x = n/10
if width_t == None: width_t = m/10
if deg_t == None: deg_t = deg_x
# If we're using polynomials to take derviatives, then we toss the data around the edges.
if time_diff == 'poly':
m2 = m-2*width_t
offset_t = width_t
else:
m2 = m
offset_t = 0
if space_diff == 'poly':
n2 = n-2*width_x
offset_x = width_x
else:
n2 = n
offset_x = 0
if lam_t == None: lam_t = 1.0/m
if lam_x == None: lam_x = 1.0/n
########################
# First take the time derivaitve for the left hand side of the equation
########################
ut = np.zeros((n2,m2), dtype=np.complex64)
if time_diff == 'FDconv':
Usmooth = np.zeros((n,m), dtype=np.complex64)
# Smooth across x cross-sections
for j in range(m):
Usmooth[:,j] = ConvSmoother(u[:,j],width_t,sigma)
# Now take finite differences
for i in range(n2):
ut[i,:] = FiniteDiff(Usmooth[i + offset_x,:],dt,1)
elif time_diff == 'poly':
T= np.linspace(0,(m-1)*dt,m)
for i in range(n2):
ut[i,:] = PolyDiff(u[i+offset_x,:],T,diff=1,width=width_t,deg=deg_t)[:,0]
elif time_diff == 'Tik':
for i in range(n2):
ut[i,:] = TikhonovDiff(u[i + offset_x,:], dt, lam_t)
else:
for i in range(n2):
ut[i,:] = FiniteDiff(u[i + offset_x,:],dt,1)
ut = np.reshape(ut, (n2*m2,1), order='F')
########################
# Now form the rhs one column at a time, and record what each one is
########################
u2 = u[offset_x:n-offset_x,offset_t:m-offset_t]
Theta = np.zeros((n2*m2, (D+1)*(P+1)), dtype=np.complex64)
ux = np.zeros((n2,m2), dtype=np.complex64)
rhs_description = ['' for i in range((D+1)*(P+1))]
if space_diff == 'poly':
Du = {}
for i in range(m2):
Du[i] = PolyDiff(u[:,i+offset_t],np.linspace(0,(n-1)*dx,n),diff=D,width=width_x,deg=deg_x)
if space_diff == 'Fourier': ik = 1j*np.fft.fftfreq(n)*n
for d in range(D+1):
if d > 0:
for i in range(m2):
if space_diff == 'Tik': ux[:,i] = TikhonovDiff(u[:,i+offset_t], dx, lam_x, d=d)
elif space_diff == 'FDconv':
Usmooth = ConvSmoother(u[:,i+offset_t],width_x,sigma)
ux[:,i] = FiniteDiff(Usmooth,dx,d)
elif space_diff == 'FD': ux[:,i] = FiniteDiff(u[:,i+offset_t],dx,d)
elif space_diff == 'poly': ux[:,i] = Du[i][:,d-1]
elif space_diff == 'Fourier': ux[:,i] = np.fft.ifft(ik**d*np.fft.fft(ux[:,i]))
else: ux = np.ones((n2,m2), dtype=np.complex64)
for p in range(P+1):
Theta[:, d*(P+1)+p] = np.reshape(np.multiply(ux, np.power(u2,p)), (n2*m2), order='F')
if p == 1: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+'u'
elif p>1: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+'u^' + str(p)
if d > 0: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+\
'u_{' + ''.join(['x' for _ in range(d)]) + '}'
return ut, Theta, rhs_description
def print_pde(w, rhs_description, ut = 'u_t'):
pde = ut + ' = '
first = True
for i in range(len(w)):
if w[i] != 0:
if not first:
pde = pde + ' + '
pde = pde + "(%05f %+05fi)" % (w[i].real, w[i].imag) + rhs_description[i] + "\n "
first = False
print(pde)
##################################################################################
##################################################################################
#
# Functions for sparse regression.
#
##################################################################################
##################################################################################
def TrainSTRidge_bad(R, Ut, lam, d_tol, maxit = 25, STR_iters = 10, l0_penalty = None, normalize = 2, split = 0.8, print_best_tol = False):
"""
This function trains a predictor using STRidge.
It runs over different values of tolerance and trains predictors on a training set, then evaluates them
using a loss function on a holdout set.
Please note published article has typo. Loss function used here for model selection evaluates fidelity using 2-norm,
not squared 2-norm.
"""
# Split data into 80% training and 20% test, then search for the best tolderance.
np.random.seed(0) # for consistancy
n,_ = R.shape
train = np.random.choice(n, int(n*split), replace = False)
test = [i for i in np.arange(n) if i not in train]
TrainR = R[train,:]
TestR = R[test,:]
TrainY = Ut[train,:]
TestY = Ut[test,:]
D = TrainR.shape[1]
# Set up the initial tolerance and l0 penalty
d_tol = float(d_tol)
tol = d_tol
if l0_penalty == None: l0_penalty = 0.001*np.linalg.cond(R)
# Get the standard least squares estimator
w = np.zeros((D,1))
w_best = np.linalg.lstsq(TrainR, TrainY)[0]
err_best = np.linalg.norm(TestY - TestR.dot(w_best), 2) + l0_penalty*np.count_nonzero(w_best)
tol_best = 0
# Now increase tolerance until test performance decreases
for iter in range(maxit):
# Get a set of coefficients and error
w = STRidge(R,Ut,lam,STR_iters,tol,normalize = normalize)
err = np.linalg.norm(TestY - TestR.dot(w), 2) + l0_penalty*np.count_nonzero(w)
# Has the accuracy improved?
if err <= err_best:
err_best = err
w_best = w
tol_best = tol
tol = tol + d_tol
else:
tol = max([0,tol - 2*d_tol])
d_tol = 2*d_tol / (maxit - iter)
tol = tol + d_tol
if print_best_tol: print("Optimal tolerance:", tol_best)
return w_best
def TrainSTRidge_correct(R, Ut, lam, d_tol, maxit = 25, STR_iters = 10, l0_penalty = None, normalize = 2, split = 0.8, print_best_tol = False):
"""
This function trains a predictor using STRidge.
It runs over different values of tolerance and trains predictors on a training set, then evaluates them
using a loss function on a holdout set.
Please note published article has typo. Loss function used here for model selection evaluates fidelity using 2-norm,
not squared 2-norm.
"""
# Split data into 80% training and 20% test, then search for the best tolderance.
np.random.seed(0) # for consistancy
n,_ = R.shape
train = np.random.choice(n, int(n*split), replace = False)
test = [i for i in np.arange(n) if i not in train]
TrainR = R[train,:]
TestR = R[test,:]
TrainY = Ut[train,:]
TestY = Ut[test,:]
D = TrainR.shape[1]
# Set up the initial tolerance and l0 penalty
d_tol = float(d_tol)
tol = d_tol
if l0_penalty == None: l0_penalty = 0.001*np.linalg.cond(R)
# Get the standard least squares estimator
w = np.zeros((D,1))
w_best = np.linalg.lstsq(TrainR, TrainY)[0]
err_best = np.linalg.norm(TestY - TestR.dot(w_best), 2) + l0_penalty*np.count_nonzero(w_best)
tol_best = 0
# Now increase tolerance until test performance decreases
for iter in range(maxit):
# Get a set of coefficients and error
w = STRidge(TrainR,TrainY,lam,STR_iters,tol,normalize = normalize)
err = np.linalg.norm(TestY - TestR.dot(w), 2) + l0_penalty*np.count_nonzero(w)
# Has the accuracy improved?
if err <= err_best:
err_best = err
w_best = w
tol_best = tol
tol = tol + d_tol
else:
tol = max([0,tol - 2*d_tol])
d_tol = 2*d_tol / (maxit - iter)
tol = tol + d_tol
if print_best_tol: print("Optimal tolerance:", tol_best)
return w_best
def Lasso(X0, Y, lam, w = np.array([0]), maxit = 100, normalize = 2):
"""
Uses accelerated proximal gradient (FISTA) to solve Lasso
argmin (1/2)*||Xw-Y||_2^2 + lam||w||_1
"""
# Obtain size of X
n,d = X0.shape
X = np.zeros((n,d), dtype=np.complex64)
Y = Y.reshape(n,1)
# Create w if none is given
if w.size != d:
w = np.zeros((d,1), dtype=np.complex64)
w_old = np.zeros((d,1), dtype=np.complex64)
# Initialize a few other parameters
converge = 0
objective = np.zeros((maxit,1))
# First normalize data
if normalize != 0:
Mreg = np.zeros((d,1))
for i in range(0,d):
Mreg[i] = 1.0/(np.linalg.norm(X0[:,i],normalize))
X[:,i] = Mreg[i]*X0[:,i]
else: X = X0
# Lipschitz constant of gradient of smooth part of loss function
L = np.linalg.norm(X.T.dot(X),2)
# Now loop until converged or max iterations
for iters in range(0, maxit):
# Update w
z = w + iters/float(iters+1)*(w - w_old)
w_old = w
z = z - X.T.dot(X.dot(z)-Y)/L
for j in range(d): w[j] = np.multiply(np.sign(z[j]), np.max([abs(z[j])-lam/L,0]))
# Could put in some sort of break condition based on convergence here.
# Now that we have the sparsity pattern, used least squares.
biginds = np.where(w != 0)[0]
if biginds != []: w[biginds] = np.linalg.lstsq(X[:, biginds],Y)[0]
# Finally, reverse the regularization so as to be able to use with raw data
if normalize != 0: return np.multiply(Mreg,w)
else: return w
def ElasticNet(X0, Y, lam1, lam2, w = np.array([0]), maxit = 100, normalize = 2):
"""
Uses accelerated proximal gradient (FISTA) to solve elastic net
argmin (1/2)*||Xw-Y||_2^2 + lam_1||w||_1 + (1/2)*lam_2||w||_2^2
"""
# Obtain size of X
n,d = X0.shape
X = np.zeros((n,d), dtype=np.complex64)
Y = Y.reshape(n,1)
# Create w if none is given
if w.size != d:
w = np.zeros((d,1), dtype=np.complex64)
w_old = np.zeros((d,1), dtype=np.complex64)
# Initialize a few other parameters
converge = 0
objective = np.zeros((maxit,1))
# First normalize data
if normalize != 0:
Mreg = | np.zeros((d,1)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""Top-level package for gnpy."""
__author__ = """<TBD>"""
__email__ = '<<EMAIL>'
__version__ = '0.1.0'
import numpy as np
import multiprocessing as mp
import scipy.interpolate as interp
"""
GNPy: a Python 3 implementation of the Gaussian Noise (GN) Model of nonlinear
propagation, developed by the OptCom group, Department of Electronics and
Telecommunications, Politecnico di Torino, Italy
"""
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
def raised_cosine_comb(f, rs, roll_off, center_freq, power):
""" Returns an array storing the PSD of a WDM comb of raised cosine shaped
channels at the input frequencies defined in array f
:param f: Array of frequencies in THz
:param rs: Array of Symbol Rates in TBaud. One Symbol rate for each channel
:param roll_off: Array of roll-off factors [0,1). One per channel
:param center_freq: Array of channels central frequencies in THz. One per channel
:param power: Array of channel powers in W. One per channel
:return: PSD of the WDM comb evaluated over f
"""
ts_arr = 1.0 / rs
passband_arr = (1.0 - roll_off) / (2.0 * ts_arr)
stopband_arr = (1.0 + roll_off) / (2.0 * ts_arr)
g = power / rs
psd = np.zeros(np.shape(f))
for ind in range(np.size(center_freq)):
f_nch = center_freq[ind]
g_ch = g[ind]
ts = ts_arr[ind]
passband = passband_arr[ind]
stopband = stopband_arr[ind]
ff = np.abs(f - f_nch)
tf = ff - passband
if roll_off[ind] == 0:
psd = np.where(tf <= 0, g_ch, 0.) + psd
else:
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1.0 / 2.0 * (1 + np.cos(np.pi * ts / roll_off[ind] *
tf)) * np.where(tf > 0, 1., 0.) *
np.where(np.abs(ff) <= stopband, 1., 0.)) + psd
return psd
def fwm_eff(a, Lspan, b2, ff):
""" Computes the four-wave mixing efficiency given the fiber characteristics
over a given frequency set ff
:param a: Fiber loss coefficient in 1/km
:param Lspan: Fiber length in km
:param b2: Fiber Dispersion coefficient in ps/THz/km
:param ff: Array of Frequency points in THz
:return: FWM efficiency rho
"""
rho = np.power(np.abs((1.0 - np.exp(-2.0 * a * Lspan + 1j * 4.0 * np.pi * np.pi * b2 * Lspan * ff)) / (
2.0 * a - 1j * 4.0 * np.pi * np.pi * b2 * ff)), 2)
return rho
def get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df_dense):
""" Returns a non-uniformly spaced frequency array useful for fast GN-model.
integration. The frequency array is made of a denser area, sided by two
log-spaced arrays
:param f: Central frequency at which NLI is evaluated in THz
:param Bopt: Total optical bandwidth of the system in THz
:param fmax: Upper limit of the integration domain in THz
:param max_step: Maximum step size for frequency array definition in THz
:param f_dense_low: Lower limit of denser frequency region in THz
:param f_dense_up: Upper limit of denser frequency region in THz
:param df_dense: Step size to be used in the denser frequency region in THz
:return: Non uniformly defined frequency array
"""
f_dense = np.arange(f_dense_low, f_dense_up, df_dense)
k = Bopt / 2.0 / (Bopt / 2.0 - max_step) # Compute Step ratio for log-spaced array definition
if f < 0:
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_low)) / np.log(k) + 1.0)
f1_short = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_short, 0.0, -1.0) - 1.0))
k = (Bopt / 2 + (np.abs(f_dense_up) - f_dense_low)) / (Bopt / 2.0 - max_step + (np.abs(f_dense_up) - f_dense_up))
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_up) - f_dense_up)) / abs(f_dense_up)) * 1.0 / np.log(k) + 1.0)
f1_long = np.abs(f_dense_up) * np.power(k, (np.arange(1, Nlog_long + 1) - 1.0)) - (
np.abs(f_dense_up) - f_dense_up)
f1_array = np.concatenate([f1_short, f_dense[1:], f1_long])
else:
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_up)) / np.log(k) + 1.0)
f1_short = f_dense_up * np.power(k, np.arange(1, Nlog_short + 1.0) - 1.0)
k = (Bopt / 2.0 + (abs(f_dense_low) + f_dense_low)) / (Bopt / 2.0 - max_step + (abs(f_dense_low) + f_dense_low))
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_low) + f_dense_low)) / np.abs(f_dense_low)) / np.log(k) + 1)
f1_long = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_long, 0, -1) - 1.0)) + (
abs(f_dense_low) + f_dense_low)
f1_array = np.concatenate([f1_long, f_dense[1:], f1_short])
return f1_array
def GN_integral(b2, Lspan, a_db, gam, f_ch, b_ch, roll_off, power, Nch, model_param):
""" GN_integral computes the GN reference formula via smart brute force integration. The Gaussian Noise model is
applied in its incoherent form (phased-array factor =1). The function computes the integral by columns: for each f1,
a non-uniformly spaced f2 array is generated, and the integrand function is computed there. At the end of the loop
on f1, the overall GNLI is computed. Accuracy can be tuned by operating on model_param argument.
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
:param Lspan: Fiber Span length in km. Scalar
:param a_db: Fiber loss coeffiecient in dB/km. Scalar
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
:param b_ch: Channels' -3 dB bandwidth. Array of size 1xNch
:param roll_off: Channels' Roll-off factors [0,1). Array of size 1xNch
:param power: Channels' power values in W. Array of size 1xNch
:param Nch: Number of channels. Scalar
:param model_param: Dictionary with model parameters for accuracy tuning
model_param['min_FWM_inv']: Minimum FWM efficiency value to be considered for high density
integration in dB
model_param['n_grid']: Maximum Number of integration points to be used in each frequency slot of
the spectrum
model_param['n_grid_min']: Minimum Number of integration points to be used in each frequency
slot of the spectrum
model_param['f_array']: Frequencies at which evaluate GNLI, expressed in THz
:return: GNLI: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
"""
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
min_FWM_inv = | np.power(10, model_param['min_FWM_inv'] / 10) | numpy.power |
from __future__ import print_function
"""
Classes that provide support functions for minis_methods,
including fitting, smoothing, filtering, and some analysis.
Test run timing:
cb: 0.175 s (with cython version of algorithm); misses overlapping events
aj: 0.028 s, plus gets overlapping events
July 2017
Note: all values are MKS (Seconds, plus Volts, Amps)
per acq4 standards...
"""
import numpy as np
import scipy.signal
from dataclasses import dataclass, field
import traceback
from typing import Union, List
import timeit
from scipy.optimize import curve_fit
import lmfit
import pylibrary.tools.digital_filters as dfilt
from pylibrary.tools.cprint import cprint
@dataclass
class Filtering:
LPF_applied: bool=False
HPF_applied: bool=False
LPF_frequency: Union[float, None]= None
HPF_frequency: Union[float, None]= None
def def_empty_list():
return [0] # [0.0003, 0.001] # in seconds (was 0.001, 0.010)
def def_empty_list2():
return [[None]] # [0.0003, 0.001] # in seconds (was 0.001, 0.010)
@dataclass
class AverageEvent:
"""
The AverageEvent class holds the averaged events from all
traces/trials
"""
averaged : bool= False # set flags in case of no events found
avgeventtb:Union[List, np.ndarray] = field(
default_factory=def_empty_list)
avgevent: Union[List, np.ndarray] =field(
default_factory=def_empty_list)
Nevents: int = 0
avgnpts: int = 0
fitted :bool = False
fitted_tau1 :float = np.nan
fitted_tau2 :float = np.nan
Amplitude :float = np.nan
avg_fiterr :float = np.nan
risetenninety:float = np.nan
decaythirtyseven:float = np.nan
@dataclass
class Summaries:
"""
The Summaries dataclass holdes the results of the
individual events that were detected,
as well as the results of various fits
and the averge fit
"""
onsets: Union[List, np.ndarray] = field(
default_factory=def_empty_list2)
peaks: Union[List, np.ndarray] = field(
default_factory=def_empty_list)
smpkindex: Union[List, np.ndarray] = field(
default_factory=def_empty_list)
smoothed_peaks : Union[List, np.ndarray] = field(
default_factory=def_empty_list)
amplitudes : Union[List, np.ndarray] = field(
default_factory=def_empty_list)
Qtotal : Union[List, np.ndarray] = field(
default_factory=def_empty_list)
individual_events: bool = False
average: object = AverageEvent()
allevents: Union[List, np.ndarray] = field(
default_factory=def_empty_list)
event_trace_list : Union[List] = field(
default_factory=def_empty_list)
class MiniAnalyses:
def __init__(self):
"""
Base class for Clements-Bekkers and Andrade-Jonas methods
Provides template generation, and summary analyses
Allows use of common methods between different algorithms
"""
self.verbose = False
self.ntraces = 1
self.filtering = Filtering()
self.risepower = 4.0
self.min_event_amplitude = 5.0e-12 # pA default
self.Criterion = [None]
self.template = None
self.template_tmax = 0.
self.analysis_window=[None, None] # specify window or entire data set
super().__init__()
def setup(
self,
ntraces: int = 1,
tau1: Union[float, None] = None,
tau2: Union[float, None] = None,
template_tmax: float = 0.05,
dt_seconds: Union[float, None] = None,
delay: float = 0.0,
sign: int = 1,
eventstartthr: Union[float, None] = None,
risepower: float = 4.0,
min_event_amplitude: float = 5.0e-12,
threshold:float = 2.5,
global_SD:Union[float, None] = None,
analysis_window:[Union[float, None], Union[float, None]] = [None, None],
lpf:Union[float, None] = None,
hpf:Union[float, None] = None,
notch:Union[float, None] = None,
) -> None:
"""
Just store the parameters - will compute when needed
Use of globalSD and threshold:
if glboal SD is None, we use the threshold as it.
If Global SD has a value, then we use that rather than the
current trace SD for threshold determinations
"""
cprint('r', 'SETUP***')
assert sign in [-1, 1] # must be selective, positive or negative events only
self.ntraces = ntraces
self.Criterion = [[] for x in range(ntraces)]
self.sign = sign
self.taus = [tau1, tau2]
self.dt_seconds = dt_seconds
self.template_tmax = template_tmax
self.idelay = int(delay / self.dt_seconds) # points delay in template with zeros
self.template = None # reset the template if needed.
self.eventstartthr = eventstartthr
self.risepower = risepower
self.min_event_amplitude = min_event_amplitude
self.threshold = threshold
self.sdthr = self.threshold # for starters
self.analysis_window = analysis_window
self.lpf = lpf
self.hpf = hpf
self.notch = notch
self.reset_filtering()
def set_sign(self, sign: int = 1):
self.sign = sign
def set_dt_seconds(self, dt_seconds:Union[None, float] = None):
self.dt_seconds = dt_seconds
def set_risepower(self, risepower: float = 4):
if risepower > 0 and risepower <= 8:
self.risepower = risepower
else:
raise ValueError("Risepower must be 0 < n <= 8")
# def set_notch(self, notches):
# if isinstance(nothce, float):
# notches = [notches]
# elif isinstance(notches, None):
# self.notch = None
# self.Notch_applied = False
# return
# elif isinstance(notches, list):
# self.notch = notches
# else:
# raise ValueError("set_notch: Notch must be list, float or None")
def _make_template(self):
"""
Private function: make template when it is needed
"""
tau_1, tau_2 = self.taus # use the predefined taus
t_psc = np.arange(0, self.template_tmax, self.dt_seconds)
self.t_template = t_psc
Aprime = (tau_2 / tau_1) ** (tau_1 / (tau_1 - tau_2))
self.template = np.zeros_like(t_psc)
tm = (
1.0
/ Aprime
* (
(1 - (np.exp(-t_psc / tau_1))) ** self.risepower
* np.exp((-t_psc / tau_2))
)
)
# tm = 1./2. * (np.exp(-t_psc/tau_1) - np.exp(-t_psc/tau_2))
if self.idelay > 0:
self.template[self.idelay :] = tm[: -self.idelay] # shift the template
else:
self.template = tm
if self.sign > 0:
self.template_amax = np.max(self.template)
else:
self.template = -self.template
self.template_amax = np.min(self.template)
def reset_filtering(self):
self.filtering.LPF_applied = False
self.filtering.HPF_applied = False
self.filtering.Notch_applied = False
def LPFData(
self, data: np.ndarray, lpf: Union[float, None] = None, NPole: int = 8
) -> np.ndarray:
assert (not self.filtering.LPF_applied) # block repeated application of filtering
cprint('y', f"minis_methods_common, LPF data: {lpf:f}")
# old_data = data.copy()
if lpf is not None :
# cprint('y', f" ... lpf at {lpf:f}")
if lpf > 0.49 / self.dt_seconds:
raise ValueError(
"lpf > Nyquist: ", lpf, 0.49 / self.dt_seconds, self.dt_seconds, 1.0 / self.dt_seconds
)
data = dfilt.SignalFilter_LPFButter(data, lpf, 1./self.dt_seconds, NPole=8)
self.filtering.LPF = lpf
self.filtering.LPF_applied = True
# import matplotlib.pyplot as mpl
# print(old_data.shape[0]*self.dt_seconds)
# tb = np.arange(0, old_data.shape[0]*self.dt_seconds, self.dt_seconds)
# print(tb.shape)
# mpl.plot(tb, old_data, 'b-')
# mpl.plot(tb, data, 'k-')
# mpl.show()
# exit()
return data
def HPFData(self, data:np.ndarray, hpf: Union[float, None] = None, NPole: int = 8) -> np.ndarray:
assert (not self.filtering.HPF_applied) # block repeated application of filtering
if hpf is None or hpf == 0.0 :
return data
if len(data.shape) == 1:
ndata = data.shape[0]
else:
ndata = data.shape[1]
nyqf = 0.5 * ndata * self.dt_seconds
# cprint('y', f"minis_methods: hpf at {hpf:f}")
if hpf < 1.0 / nyqf: # duration of a trace
raise ValueError(
"hpf < Nyquist: ",
hpf,
"nyquist",
1.0 / nyqf,
"ndata",
ndata,
"dt in seconds",
self.dt_seconds,
"sampelrate",
1.0 / self.dt,
)
data = dfilt.SignalFilter_HPFButter(data-data[0], hpf, 1.0 / self.dt_seconds, NPole=4)
self.filtering.HPF = hpf
self.filtering.HPF_applied = True
return data
# def NotchData(self, data:np.ndarray, notch: Union[list, None] = None) -> np.ndarray:
# assert (not self.filtering.notch_applied) # block repeated application of filtering
# if notch is None or len(notch) == 0 :
# return data
# if len(data.shape) == 1:
# ndata = data.shape[0]
# else:
# ndata = data.shape[1]
#
# data[i] = dfilt.NotchFilter(
# data[i]-data[0],
# notch,
# Q=20.0,
# samplefreq=1.0 / self.dt_seconds,
# )
# self.filtering.notch = notch
# self.filtering.Notch_applied = True
#
# return data
def prepare_data(self, data):
"""
This function prepares the incoming data for the mini analyses.
1. Clip the data in time (remove sections with current or voltage steps)
2. Filter the data (LPF, HPF)
"""
# cprint('r', 'Prepare data')
self.timebase = np.arange(0.0, data.shape[0] * self.dt_seconds, self.dt_seconds)
if self.analysis_window[1] is not None:
jmax = np.argmin(np.fabs(self.timebase - self.analysis_window[1]))
else:
jmax = len(self.timebase)
if self.analysis_window[0] is not None:
jmin = np.argmin(np.fabs(self.timebase) - self.analysis_window[0])
else:
jmin = 0
data = data[jmin:jmax]
if self.verbose:
if self.lpf is not None:
cprint('y', f"minis_methods_common, prepare_data: LPF: {self.lpf:.1f} Hz")
else:
cprint('r', f"minis_methods_common, no LPF applied")
if self.hpf is not None:
cprint('y', f"minis_methods_common, prepare_data: HPF: {self.hpf:.1f} Hz")
else:
cprint('r', f"minis_methods_common, no HPF applied")
if isinstance(self.lpf, float):
data = self.LPFData(data, lpf=self.lpf)
if isinstance(self.hpf, float):
data = self.HPFData(data, hpf=self.hpf)
# if isinstance(self.notch, list):
# data = self.HPFData(data, notch=self.notch)
self.data = data
self.timebase = self.timebase[jmin:jmax]
def moving_average(self, a, n: int = 3) -> (np.array, int):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
# return ret[n - 1 :] / n, n
return ret[int(n/2):] / n, n # re-align array
def remove_outliers(self, x:np.ndarray, scale:float=3.0) -> np.ndarray:
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * scale
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
result = np.where(((a >= quartileSet[0]) & (a <= quartileSet[1])), a, np.nan)
# import matplotlib.pyplot as mpl
# mpl.plot(x)
# mpl.plot(result)
# mpl.show()
return result
def summarize(self, data, order: int = 11, verbose: bool = False) -> None:
"""
compute intervals, peaks and ampitudes for all found events in a
trace or a group of traces
filter out events that are less than min_event_amplitude
"""
i_decay_pts = int(2.0 * self.taus[1] / self.dt_seconds) # decay window time (points) Units all seconds
assert i_decay_pts > 5
self.Summary = Summaries() # a single summary class is created
ndata = len(data)
# set up arrays : note construction to avoid "same memory but different index" problem
self.Summary.onsets = [[] for x in range(ndata)]
self.Summary.peaks = [[] for x in range(ndata)]
self.Summary.smoothed_peaks = [[] for x in range(ndata)]
self.Summary.smpkindex = [[] for x in range(ndata)]
self.Summary.amplitudes = [[] for x in range(ndata)]
self.Summary.filtered_traces = [[] for x in range(ndata)]
avgwin = (
5 # int(1.0/self.dt_seconds) # 5 point moving average window for peak detection
)
mwin = int((0.50) / self.dt_seconds)
if self.sign > 0:
nparg = np.greater
else:
nparg = np.less
self.intervals = []
self.timebase = np.arange(0., data.shape[1]*self.dt_seconds, self.dt_seconds)
nrejected_too_small = 0
for itrial, dataset in enumerate(data): # each trial/trace
if len(self.onsets[itrial]) == 0: # original events
continue
# cprint('c', f"Onsets found: {len(self.onsets[itrial]):d} in trial {itrial:d}")
acceptlist_trial = []
self.intervals.append(np.diff(self.timebase[self.onsets[itrial]])) # event intervals
# cprint('y', f"Summarize: trial: {itrial:d} onsets: {len(self.onsets[itrial]):d}")
# print('onsets: ', self.onsets[itrial])
ev_accept = []
for j, onset in enumerate(self.onsets[itrial]): # for all of the events in this trace
if self.sign > 0 and self.eventstartthr is not None:
if dataset[onset] < self.eventstartthr:
# print('pos sign: data onset < eventstartthr')
continue
if self.sign < 0 and self.eventstartthr is not None:
if dataset[onset] > -self.eventstartthr:
# print('neg sign: data onset > eventstartthr')
continue
event_data = dataset[onset : (onset + mwin)] # get this event
# print('onset, mwin: ', onset, mwin)
svwinlen = event_data.shape[0]
if svwinlen > 11:
svn = 11
else:
svn = svwinlen
if (
svn % 2 == 0
): # if even, decrease by 1 point to meet ood requirement for savgol_filter
svn -= 1
if svn > 3: # go ahead and filter
p = scipy.signal.argrelextrema(
scipy.signal.savgol_filter(
event_data, svn, 2
),
nparg,
order=order,
)[0]
else: # skip filtering
p = scipy.signal.argrelextrema(
event_data,
nparg,
order=order,
)[0]
# print('len(p): ', len(p), svn, event_data)
if len(p) > 0:
# print('p, idecay onset: ', len(p), i_decay_pts, onset)
i_end = i_decay_pts + onset # distance from peak to end
i_end = min(dataset.shape[0], i_end) # keep within the array limits
if j < len(self.onsets[itrial]) - 1:
if i_end > self.onsets[itrial][j + 1]:
i_end = (
self.onsets[itrial][j + 1] - 1
) # only go to next event start
windowed_data = dataset[onset : i_end]
# print('onset, iend: ', onset, i_end)
# import matplotlib.pyplot as mpl
# fx, axx = mpl.subplots(1,1)
# axx.plot(self.timebase[onset:i_end], dataset[onset:i_end], 'g-')
# mpl.show()
move_avg, n = self.moving_average(
windowed_data,
n=min(avgwin, len(windowed_data)),
)
# print('moveavg: ', move_avg)
# print(avgwin, len(windowed_data))
# print('windowed_data: ', windowed_data)
if self.sign > 0:
smpk = np.argmax(move_avg) # find peak of smoothed data
rawpk = np.argmax(windowed_data) # non-smoothed
else:
smpk = np.argmin(move_avg)
rawpk = np.argmin(windowed_data)
if self.sign*(move_avg[smpk] - windowed_data[0]) < self.min_event_amplitude:
nrejected_too_small += 1
# print(f"Event too small: {1e12*self.sign*(move_avg[smpk] - windowed_data[0]):6.1f} vs. thresj: {1e12*self.min_event_amplitude:6.1f} pA")
continue # filter out events smaller than the amplitude
else:
# print('accept: ', j)
ev_accept.append(j)
# cprint('m', f"Extending for trial: {itrial:d}, {len(self.Summary.onsets[itrial]):d}, onset={onset}")
self.Summary.onsets[itrial].append(onset)
self.Summary.peaks[itrial].append(onset + rawpk)
self.Summary.amplitudes[itrial].append(windowed_data[rawpk])
self.Summary.smpkindex[itrial].append(onset + smpk)
self.Summary.smoothed_peaks[itrial].append(move_avg[smpk])
acceptlist_trial.append(j)
self.onsets[itrial] = self.onsets[itrial][ev_accept] # reduce to the accepted values only
# self.Summary.smoothed_peaks = np.array(self.Summary.smoothed_peaks)
# self.Summary.amplitudes = np.array(self.Summary.amplitudes)
print(f"Rejected {nrejected_too_small:6d} events (threshold = {1e12*self.min_event_amplitude:6.1f} pA)")
self.average_events(
data,
)
# print(self.Summary.average.avgevent)
if self.Summary.average.averaged:
self.fit_average_event(
tb=self.Summary.average.avgeventtb,
avgevent=self.Summary.average.avgevent,
initdelay=0.,
debug=False)
else:
if verbose:
print("No events found")
return
def measure_events(self, data:object, eventlist: list) -> dict:
# compute simple measurements of events (area, amplitude, half-width)
#
# cprint('r', 'MEASURE EVENTS')
assert data.ndim == 1
self.measured = False
# treat like averaging
tdur = np.max((np.max(self.taus) * 5.0, 0.010)) # go 5 taus or 10 ms past event
tpre = 0.0 # self.taus[0]*10.
self.avgeventdur = tdur
self.tpre = tpre
self.avgnpts = int((tpre + tdur) / self.dt_seconds) # points for the average
npre = int(tpre / self.dt_seconds) # points for the pre time
npost = int(tdur / self.dt_seconds)
avg = np.zeros(self.avgnpts)
avgeventtb = np.arange(self.avgnpts) * self.dt_seconds
# assert True == False
allevents = np.zeros((len(eventlist), self.avgnpts))
k = 0
pkt = 0 # np.argmax(self.template) # accumulate
meas = {"Q": [], "A": [], "HWup": [], "HWdown": [], "HW": []}
for j, i in enumerate(eventlist):
ix = i + pkt # self.idelay
if (ix + npost) < len(self.data) and (ix - npre) >= 0:
allevents[k, :] = data[ix - npre : ix + npost]
k = k + 1
if k > 0:
allevents = allevents[0:k, :] # trim unused
for j in range(k):
ev_j = scipy.signal.savgol_filter(
self.sign * allevents[j, :], 7, 2, mode="nearest"
) # flip sign if negative
ai = np.argmax(ev_j)
if ai == 0:
continue # skip events where max is first point
q = np.sum(ev_j) * tdur
meas["Q"].append(q)
meas["A"].append(ev_j[ai])
hw_up = self.dt_seconds * np.argmin(np.fabs((ev_j[ai] / 2.0) - ev_j[:ai]))
hw_down = self.dt_seconds * np.argmin(np.fabs(ev_j[ai:] - (ev_j[ai] / 2.0)))
meas["HWup"].append(hw_up)
meas["HWdown"].append(hw_down)
meas["HW"].append(hw_up + hw_down)
self.measured = True
self.Summary.allevents = allevents
else:
self.measured = False
self.Summary.allevents = None
return meas
def average_events(self, data: np.ndarray) -> tuple:
"""
compute average event with length of template
Parameters
----------
eventlist : list
List of event onset indices into the arrays
Expect a 2-d list (traces x onsets)
"""
# cprint('r', 'AVERAGE EVENTS')
self.Summary.average.averaged = False
tdur = np.max((np.max(self.taus) * 5.0, 0.010)) # go 5 taus or 10 ms past event
tpre = 1e-3 # self.taus[0]*10.
avgeventdur = tdur
self.tpre = tpre
avgnpts = int((tpre + tdur) / self.dt_seconds) # points for the average
npre = int(tpre / self.dt_seconds) # points for the pre time
npost = int(tdur / self.dt_seconds)
print('npre, npost avgnpts: ', npre, npost, avgnpts)
avg = np.zeros(avgnpts)
avgeventtb = np.arange(avgnpts) * self.dt_seconds
n_events = sum([len(events) for events in self.Summary.onsets])
allevents = np.zeros((n_events, avgnpts))
event_trace = [[]]*n_events
k = 0
pkt = 0
n_incomplete_events = 0
for itrace, onsets in enumerate(self.Summary.onsets):
# cprint('c', f"Trace: {itrace: d}, # onsets: {len(onsets):d}")
for j, event_onset in enumerate(onsets):
ix = event_onset + pkt # self.idelay
# print('itrace, ix, npre, npost: ', itrace, ix, npre, npost, data[itrace].shape[0])
if (ix + npost) < data[itrace].shape[0] and (ix - npre) >= 0:
allevents[k, :] = data[itrace, (ix - npre) : (ix + npost)]
allevents[k, :] -= np.mean(allevents[k, 0:npre])
else:
allevents[k, :] = np.nan*allevents[k,:]
n_incomplete_events += 1
event_trace[k] = [itrace, j]
k = k + 1
if n_incomplete_events > 0:
cprint("y", f"{n_incomplete_events:d} were excluded because they were incomplete (too close to end of trace)")
# tr_incl = [u[0] for u in event_trace]
# print(set(tr_incl), len(set(tr_incl)), len(event_trace))
# exit()
# print('k: ', k)
if k > 0:
self.Summary.average.averaged = True
self.Summary.average.avgnpts = avgnpts
self.Summary.average.Nevents = k
self.Summary.allevents = allevents
self.Summary.average.avgeventtb = avgeventtb
avgevent = np.nanmean(allevents, axis=0)
# print(allevents)
# import matplotlib.pyplot as mpl
# f, ax = mpl.subplots(1,1)
# ax.plot(allevents.T, 'k', alpha=0.3)
# ax.plot(avgevent, 'r', linewidth=3)
# mpl.show()
# print(avgevent)
# exit(1)
self.Summary.average.avgevent = avgevent# - np.mean(avgevent[:3])
self.Summary.event_trace_list = event_trace
return
else:
self.Summary.average.avgnpts = 0
self.Summary.average.avgevent = []
self.Summary.average.allevents = []
self.Summary.average.avgeventtb = []
self.Summary.average.averaged = False
self.Summary.event_trace_list = []
return
def average_events_subset(self, data: np.ndarray, eventlist:list) -> tuple:
"""
compute average event with length of template
Parameters
----------
data:
1-d numpy array of the data
eventlist : list
List of event onset indices into the arrays
Expect a 1-d list (traces x onsets)
"""
assert data.ndim == 1
# cprint('r', 'AVERAGE EVENTS')
tdur = np.max((np.max(self.taus) * 5.0, 0.010)) # go 5 taus or 10 ms past event
tpre = 0.0 # self.taus[0]*10.
avgeventdur = tdur
self.tpre = tpre
avgnpts = int((tpre + tdur) / self.dt_seconds) # points for the average
npre = int(tpre / self.dt_seconds) # points for the pre time
npost = int(tdur / self.dt_seconds)
avg = np.zeros(avgnpts)
avgeventtb = np.arange(avgnpts) * self.dt_seconds
n_events = sum([len(events) for events in self.Summary.onsets])
allevents = np.zeros((n_events, avgnpts))
event_trace = [None]*n_events
k = 0
pkt = 0
for itrace, event_onset in enumerate(eventlist):
# cprint('c', f"Trace: {itrace: d}, # onsets: {len(onsets):d}")
ix = event_onset + pkt # self.idelay
# print('itrace, ix, npre, npost: ', itrace, ix, npre, npost)
if (ix + npost) < data.shape[0] and (ix - npre) >= 0:
allevents[k, :] = data[(ix - npre) : (ix + npost)]
k = k + 1
return np.mean(allevents, axis=0), avgeventtb, allevents
def doubleexp(
self,
p: list,
x: np.ndarray,
y: Union[None, np.ndarray],
risepower: float,
fixed_delay: float = 0.0,
mode: int = 0,
) -> np.ndarray:
"""
Calculate a double expoential EPSC-like waveform with the rise to a power
to make it sigmoidal
"""
# fixed_delay = p[3] # allow to adjust; ignore input value
ix = np.argmin(np.fabs(x - fixed_delay))
tm = np.zeros_like(x)
tm[ix:] = p[0] * (1.0 - np.exp(-(x[ix:] - fixed_delay) / p[1])) ** risepower
tm[ix:] *= np.exp(-(x[ix:] - fixed_delay) / p[2])
if mode == 0:
return tm - y
elif mode == 1:
return np.linalg.norm(tm - y)
elif mode == -1:
return tm
else:
raise ValueError(
"doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)"
)
def risefit(
self,
p: list,
x: np.ndarray,
y: Union[None, np.ndarray],
risepower: float,
mode: int = 0,
) -> np.ndarray:
"""
Calculate a delayed EPSC-like waveform rise shape with the rise to a power
to make it sigmoidal, and an adjustable delay
input data should only be the rising phase.
p is in order: [amplitude, tau, delay]
"""
assert mode in [-1, 0, 1]
# if np.isnan(p[0]):
# try:
# x = 1./p[0]
# except Exception as e:
# track = traceback.format_exc()
# print(track)
# exit(0)
# # assert not np.isnan(p[0])
ix = np.argmin(np.fabs(x - p[2]))
tm = np.zeros_like(x)
expf = (x[ix:] - p[2]) / p[1]
pclip = 1.0e3
nclip = 0.0
try:
expf[expf > pclip] = pclip
expf[expf < -nclip] = -nclip
except:
print(pclip, nclip)
print(expf)
exit(1)
tm[ix:] = p[0] * (1.0 - np.exp(-expf)) ** risepower
if mode == 0:
return tm - y
elif mode == 1:
return np.linalg.norm(tm - y)
elif mode == -1:
return tm
else:
raise ValueError(
"doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)"
)
def decayexp(
self,
p: list,
x: np.ndarray,
y: Union[None, np.ndarray],
fixed_delay: float = 0.0,
mode: int = 0,
):
"""
Calculate an exponential decay (falling phase fit)
"""
tm = p[0] * np.exp(-(x - fixed_delay) / p[1])
if mode == 0:
return tm - y
elif mode == 1:
return np.linalg.norm(tm - y)
elif mode == -1:
return tm
else:
raise ValueError(
"doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)"
)
def fit_average_event(
self,
tb,
avgevent,
debug: bool = False,
label: str = "",
inittaus: List = [0.001, 0.005],
initdelay: Union[float, None] = None,
) -> None:
"""
Fit the averaged event to a double exponential epsc-like function
Operates on the AverageEvent data structure
"""
# tsel = np.argwhere(self.avgeventtb > self.tpre)[0] # only fit data in event, not baseline
tsel = 0 # use whole averaged trace
self.tsel = tsel
self.tau1 = inittaus[0]
self.tau2 = inittaus[1]
self.tau2_range = 10.0
self.tau1_minimum_factor = 5.0
time_past_peak = 2.5e-4
self.fitted_tau1 = np.nan
self.fitted_tau2 = np.nan
self.Amplitude = np.nan
# peak_pos = np.argmax(self.sign*self.avgevent[self.tsel:])
# decay_fit_start = peak_pos + int(time_past_peak/self.dt_seconds)
# init_vals = [self.sign*10., 1.0, 4., 0.]
# init_vals_exp = [20., 5.0]
# bounds_exp = [(0., 0.5), (10000., 50.)]
cprint('m', 'Fitting average event')
res, rdelay = self.event_fitter(
tb,
avgevent,
time_past_peak=time_past_peak,
initdelay=initdelay,
debug=debug,
label=label,
)
# print('rdelay: ', rdelay)
if res is None:
cprint('r', 'average fit result is None')
self.fitted = False
return
self.fitresult = res.x
self.Amplitude = self.fitresult[0]
self.fitted_tau1 = self.fitresult[1]
self.fitted_tau2 = self.fitresult[2]
self.bfdelay = rdelay
self.avg_best_fit = self.doubleexp(
self.fitresult,
tb[self.tsel :],
np.zeros_like(tb[self.tsel :]),
risepower=self.risepower,
mode=0,
fixed_delay=self.bfdelay,
)
self.avg_best_fit = self.sign * self.avg_best_fit
fiterr = np.linalg.norm(self.avg_best_fit -
avgevent[self.tsel :])
self.avg_fiterr = fiterr
ave = self.sign * avgevent
ipk = np.argmax(ave)
pk = ave[ipk]
p10 = 0.1 * pk
p90 = 0.9 * pk
p37 = 0.37 * pk
try:
i10 = np.argmin(np.fabs(ave[:ipk] - p10))
except:
self.fitted = False
return
i90 = np.argmin(np.fabs(ave[:ipk] - p90))
i37 = np.argmin(np.fabs(ave[ipk:] - p37))
self.risetenninety = self.dt_seconds * (i90 - i10)
self.decaythirtyseven = self.dt_seconds * (i37 - ipk)
self.Qtotal = self.dt_seconds * np.sum(avgevent[self.tsel :])
self.fitted = True
def fit_individual_events(self, onsets: np.ndarray) -> None:
"""
Fitting individual events
Events to be fit are selected from the entire event pool as:
1. events that are completely within the trace, AND
2. events that do not overlap other events
Fit events are further classified according to the fit error
"""
if (
not self.averaged or not self.fitted
): # averaging should be done first: stores events for convenience and gives some tau estimates
print("Require fit of averaged events prior to fitting individual events")
raise (ValueError)
time_past_peak = 0.75 # msec - time after peak to start fitting
# allocate arrays for results. Arrays have space for ALL events
# okevents, notok, and evok are indices
nevents = len(self.Summary.allevents) # onsets.shape[0]
self.ev_fitamp = np.zeros(nevents) # measured peak amplitude from the fit
self.ev_A_fitamp = np.zeros(
nevents
) # fit amplitude - raw value can be quite different than true amplitude.....
self.ev_tau1 = np.zeros(nevents)
self.ev_tau2 = np.zeros(nevents)
self.ev_1090 = np.zeros(nevents)
self.ev_2080 = np.zeros(nevents)
self.ev_amp = np.zeros(nevents) # measured peak amplitude from the event itself
self.ev_Qtotal = np.zeros(
nevents
) # measured charge of the event (integral of current * dt)
self.fiterr = np.zeros(nevents)
self.bfdelay = np.zeros(nevents)
self.best_fit = np.zeros((nevents, self.avgeventtb.shape[0]))
self.best_decay_fit = np.zeros((nevents, self.avgeventtb.shape[0]))
self.tsel = 0
self.tau2_range = 10.0
self.tau1_minimum_factor = 5.0
# prescreen events
minint = self.avgeventdur # msec minimum interval between events.
self.fitted_events = (
[]
) # events that can be used (may not be all events, but these are the events that were fit)
for i in range(nevents):
te = self.timebase[onsets[i]] # get current event
try:
tn = self.timebase[onsets[i + 1]] # check time to next event
if tn - te < minint: # event is followed by too soon by another event
continue
except:
pass # just handle trace end condition
try:
tp = self.timebase[onsets[i - 1]] # check previous event
if (
te - tp < minint
): # if current event too close to a previous event, skip
continue
self.fitted_events.append(i) # passes test, include in ok events
except:
pass
for n, i in enumerate(self.fitted_events):
try:
max_event = np.max(self.sign * self.Summary.allevents[i, :])
except:
print("minis_methods eventfitter")
print("fitted: ", self.fitted_events)
print("i: ", i)
print("allev: ", self.Summary.allevents)
print("len allev: ", len(self.Summary.allevents), len(onsets))
raise ValueError('Fit failed)')
res, rdelay = self.event_fitter(
self.avgeventtb, self.Summmary.allevents[i, :], time_past_peak=time_past_peak
)
if res is None: # skip events that won't fit
continue
self.fitresult = res.x
# lmfit version - fails for odd reason
# dexpmodel = Model(self.doubleexp)
# params = dexpmodel.make_params(A=-10., tau_1=0.5, tau_2=4.0, dc=0.)
# self.fitresult = dexpmodel.fit(self.avgevent[tsel:], params, x=self.avgeventtb[tsel:])
self.ev_A_fitamp[i] = self.fitresult[0]
self.ev_tau1[i] = self.fitresult[1]
self.ev_tau2[i] = self.fitresult[2]
self.bfdelay[i] = rdelay
self.fiterr[i] = self.doubleexp(
self.fitresult,
self.avgeventtb,
self.sign * self.Summary.allevents[i, :],
risepower=self.risepower,
fixed_delay=self.bfdelay[i],
mode=1,
)
self.best_fit[i] = self.doubleexp(
self.fitresult,
self.avgeventtb,
np.zeros_like(self.avgeventtb),
risepower=self.risepower,
fixed_delay=self.bfdelay[i],
mode=0,
)
self.best_decay_fit[i] = self.decay_fit # from event_fitter
self.ev_fitamp[i] = np.max(self.best_fit[i])
self.ev_Qtotal[i] = self.dt_seconds * np.sum(self.sign * self.Summary.allevents[i, :])
self.ev_amp[i] = np.max(self.sign * self.Summary.allevents[i, :])
self.individual_event_screen(fit_err_limit=2000.0, tau2_range=10.0)
self.individual_events = True # we did this step
def event_fitter(
self,
timebase: np.ndarray,
event: np.ndarray,
time_past_peak: float = 0.0001,
initdelay: Union[float, None] = None,
debug: bool = False,
label: str = "",
) -> (dict, float):
"""
Fit the event
Procedure:
First we fit the rising phase (to the peak) with (1-exp(t)^n), allowing
the onset of the function to slide in time. This onset time is locked after this step
to minimize trading in the error surface between the onset and the tau values.
Second, we fit the decay phase, starting just past the peak (and accouting for the fixed delay)
Finally, we combine the parameters and do a final optimization with somewhat narrow
bounds.
Fits are good on noiseless test data.
Fits are affected by noise on the events (of course), but there is no "systematic"
variation that is present in terms of rise-fall tau tradeoffs.
"""
if len(event) == 0: # just check against emtpy event
cprint('m', 'Event length is 0? ')
# assert 1 == 0
return None, None
else:
cprint('c', f'event length is {len(event):d}')
debug = False
if debug:
import matplotlib.pyplot as mpl
if initdelay in [0, None]:
init_delay = 1
else:
init_delay = int(initdelay / self.dt_seconds)
ev_bl = 0. # np.mean(event[: init_delay]) # just first point...
# print("sign, event, bl: ", self.sign, len(event), init_delay)
# print('event: ', event)
# print('bl: ', ev_bl)
evfit = self.sign * (event - ev_bl)
# print('evfit0: ', evfit)
maxev = np.max(evfit)
if maxev == 0:
maxev = 1
# if peak_pos == 0:
# peak_pos = int(0.001/self.dt_seconds) # move to 1 msec later
evfit = evfit / maxev # scale to max of 1
peak_pos = np.argmax(evfit) + 1
if peak_pos == 1:
peak_pos = 5
amp_bounds = [0.0, 1.0]
# set reasonable, but wide bounds, and make sure init values are within bounds
# (and off center, but not at extremes)
bounds_rise = [amp_bounds, (self.dt_seconds, 4.0 * self.dt_seconds * peak_pos), (0.0, 0.005)]
if initdelay is None or initdelay < self.dt_seconds:
fdelay = 0.2 * np.mean(bounds_rise[2])
else:
fdelay = initdelay
if fdelay > self.dt_seconds * peak_pos:
fdelay = 0.2 * self.dt_seconds * peak_pos
init_vals_rise = [0.9, self.dt_seconds * peak_pos, fdelay]
cprint("r", "event_fitter: rise")
# print(' initvals: ', init_vals_rise)
# print(' bounds: ', bounds_rise)
# print(' peak_pos: ', peak_pos)
# print(' evfit: ', evfit[:peak_pos])
# print('maxev: ', maxev)
try:
res_rise = scipy.optimize.minimize(
self.risefit,
init_vals_rise,
bounds=bounds_rise,
method="SLSQP", # x_scale=[1e-12, 1e-3, 1e-3],
args=(
timebase[:peak_pos], # x
evfit[:peak_pos], # 'y
self.risepower,
1,
), # risepower, mode
)
except:
import matplotlib.pyplot as mpl
mpl.plot(self.timebase[:peak_pos], evfit[:peak_pos], 'k-')
mpl.show()
print('risefit: ', self.risefit)
print('init_vals_rise: ', init_vals_rise)
print('bounds rise: ', bounds_rise)
print('peak_pos: ', peak_pos)
return None, None
# raise ValueError()
if debug:
import matplotlib.pyplot as mpl
f, ax = mpl.subplots(2, 1)
ax[0].plot(timebase, evfit, "-k")
ax[1].plot(timebase[:peak_pos], evfit[:peak_pos], "-k")
print("\nrise fit:")
ax[1].set_title('To peak (black), to end (red)')
print("dt: ", self.dt_second, " maxev: ", maxev, " peak_pos: ", peak_pos)
print("bounds: ", bounds_rise)
print("init values: ", init_vals_rise)
print("result: ", res_rise.x)
rise_tb = timebase[:peak_pos]
rise_yfit = self.risefit(
res_rise.x, rise_tb, np.zeros_like(rise_tb), self.risepower, -1
)
ax[0].plot(rise_tb, rise_yfit, "r-")
ax[1].plot(rise_tb, rise_yfit, "r-")
# mpl.show()
# cprint('c', f"Res_rise: {str(res_rise):s}")
self.res_rise = res_rise
# fit decay exponential next:
bounds_decay = [
amp_bounds,
(self.dt_seconds, self.tau2 * 20.0),
] # be sure init values are inside bounds
init_vals_decay = [0.9 * np.mean(amp_bounds), self.tau2]
# print('peak, tpast, tdel', peak_pos , int(time_past_peak/self.dt_seconds) , int(res_rise.x[2]/self.dt_seconds))
decay_fit_start = peak_pos + int(
time_past_peak / self.dt_seconds
) # + int(res_rise.x[2]/self.dt_seconds)
# print('decay start: ', decay_fit_start, decay_fit_start*self.dt_seconds, len(event[decay_fit_start:]))
cprint("r", "event_fitter: decay")
res_decay = scipy.optimize.minimize(
self.decayexp,
init_vals_decay,
bounds=bounds_decay,
method="L-BFGS-B",
# bounds=bounds_decay, method='L-BFGS-B',
args=(
timebase[decay_fit_start:] - decay_fit_start * self.dt_seconds,
evfit[decay_fit_start:],
res_rise.x[2],
1,
),
) # res_rise.x[2], 1))
self.res_decay = res_decay
if debug:
decay_tb = timebase[decay_fit_start:]
decay_ev = evfit[decay_fit_start:]
# f, ax = mpl.subplots(2, 1)
# ax[0].plot(timebase, evfit)
ax[1].plot(decay_tb, decay_ev, "g-")
ax[1].set_title('Decay fit (green)')
print("\ndecay fit:")
print("dt: ", self.dt_seconds, " maxev: ", maxev, " peak_pos: ", peak_pos)
print("bounds: ", bounds_decay)
print("init values: ", init_vals_decay)
print("result: ", res_decay.x)
y = self.decayexp(
res_decay.x,
decay_tb,
np.zeros_like(decay_tb),
fixed_delay=decay_fit_start * self.dt_seconds,
mode=-1,
)
# print(y)
# ax[1].plot(decay_tb, y, 'bo', markersize=3)
ax[1].plot(decay_tb, y, "g-")
if res_rise.x[2] == 0.0:
res_rise.x[2] = 2.0*self.dt_seconds
# now tune by fitting the whole trace, allowing some (but not too much) flexibility
bounds_full = [
[a * 10.0 for a in amp_bounds], # overall amplitude
(0.2 * res_rise.x[1], 5.0 * res_rise.x[1]), # rise tau
(0.2 * res_decay.x[1], 50.0 * res_decay.x[1]), # decay tau
(0.3 * res_rise.x[2], 20.0 * res_rise.x[2]), # delay
# (0, 1), # amplitude of decay component
]
init_vals = [
amp_bounds[1],
res_rise.x[1],
res_decay.x[1],
res_rise.x[2],
] # be sure init values are inside bounds
# if len(label) > 0:
# print('Label: ', label)
# print('bounds full: ', bounds_full)
# print('init_vals: ', init_vals)
cprint("r", "event_fitter: full")
try:
res = scipy.optimize.minimize(
self.doubleexp,
init_vals,
method="L-BFGS-B",
# method="Nelder-Mead",
args=(timebase, evfit, self.risepower, res_rise.x[2], 1),
bounds=bounds_full,
options={"maxiter": 100000},
)
except:
print('Fitting failed in event fitter')
print('evfit: ', evfit)
return None, None
# print('timebase: ', timebase)
# import matplotlib.pyplot as mpl
# mpl.plot(timebase, evfit)
# mpl.show()
# print('risepower: ', self.risepower, res_rise.x[2], bounds_full)
# raise ValueError()
if debug:
print("\nFull fit:")
print("dt: ", self.dt_seconds, " maxev: ", maxev, " peak_pos: ", peak_pos)
print("bounds: ", bounds_full)
print("init values: ", init_vals)
print("result: ", res.x, res_rise.x[2])
f, ax = mpl.subplots(2, 1)
ax[0].plot(timebase, evfit, "k-")
ax[1].plot(timebase, evfit, "k-")
y = self.doubleexp(
res.x,
timebase,
event,
risepower=self.risepower,
fixed_delay=res_rise.x[2],
mode=-1,
)
ax[1].plot(timebase, y, "bo", markersize=3)
f.suptitle("Full fit")
mpl.show()
self.rise_fit = self.risefit(
res_rise.x, timebase, np.zeros_like(timebase), self.risepower, mode=0
)
self.rise_fit[peak_pos:] = 0
self.rise_fit = self.rise_fit * maxev
self.decay_fit = self.decayexp(
self.res_decay.x,
timebase,
np.zeros_like(timebase),
fixed_delay=self.res_rise.x[2],
mode=0,
)
self.decay_fit[:decay_fit_start] = 0 # clip the initial part
self.decay_fit = self.decay_fit * maxev
self.bferr = self.doubleexp(
res.x,
timebase,
event,
risepower=self.risepower,
fixed_delay=decay_fit_start * self.dt_seconds,
mode=1,
)
# print('fit result: ', res.x, res_rise.x[2])
res.x[0] = res.x[0] * maxev # correct for factor
self.peak_val = maxev
return res, res_rise.x[2]
def individual_event_screen(
self, fit_err_limit: float = 2000.0, tau2_range: float = 2.5
) -> None:
"""
Screen events:
error of the fit must be less than a limit,
and
tau2 must fall within a range of the default tau2
and
tau1 must be breater than a minimum tau1
sets:
self.events_ok : the list of fitted events that pass
self.events_notok : the list of fitted events that did not pass
"""
self.events_ok = []
for i in self.fitted_events: # these are the events that were fit
if self.fiterr[i] <= fit_err_limit:
if self.ev_tau2[i] <= self.tau2_range * self.tau2:
if self.ev_fitamp[i] > self.min_event_amplitude:
if self.ev_tau1[i] > self.tau1 / self.tau1_minimum_factor:
self.events_ok.append(i)
self.events_notok = list(set(self.fitted_events).difference(self.events_ok))
def plot_individual_events(
self, fit_err_limit: float = 1000.0, tau2_range: float = 2.5, show: bool = True
) -> None:
if not self.individual_events:
raise
P = PH.regular_grid(
3,
3,
order="columns",
figsize=(8.0, 8.0),
showgrid=False,
verticalspacing=0.1,
horizontalspacing=0.12,
margins={
"leftmargin": 0.12,
"rightmargin": 0.12,
"topmargin": 0.03,
"bottommargin": 0.1,
},
labelposition=(-0.12, 0.95),
)
self.P = P
# evok, notok = self.individual_event_screen(fit_err_limit=fit_err_limit, tau2_range=tau2_range)
evok = self.events_ok
notok = self.events_notok
P.axdict["A"].plot(self.ev_tau1[evok], self.ev_amp[evok], "ko", markersize=4)
P.axdict["A"].set_xlabel(r"$tau_1$ (ms)")
P.axdict["A"].set_ylabel(r"Amp (pA)")
P.axdict["B"].plot(self.ev_tau2[evok], self.ev_amp[evok], "ko", markersize=4)
P.axdict["B"].set_xlabel(r"$tau_2$ (ms)")
P.axdict["B"].set_ylabel(r"Amp (pA)")
P.axdict["C"].plot(self.ev_tau1[evok], self.ev_tau2[evok], "ko", markersize=4)
P.axdict["C"].set_xlabel(r"$\tau_1$ (ms)")
P.axdict["C"].set_ylabel(r"$\tau_2$ (ms)")
P.axdict["D"].plot(self.ev_amp[evok], self.fiterr[evok], "ko", markersize=3)
P.axdict["D"].plot(self.ev_amp[notok], self.fiterr[notok], "ro", markersize=3)
P.axdict["D"].set_xlabel(r"Amp (pA)")
P.axdict["D"].set_ylabel(r"Fit Error (cost)")
for i in notok:
ev_bl = np.mean(self.Summary.allevents[i, 0:5])
P.axdict["E"].plot(
self.avgeventtb, self.Summary.allevents[i] - ev_bl, "b-", linewidth=0.75
)
# P.axdict['E'].plot()
P.axdict["F"].plot(
self.avgeventtb, self.Summary.allevents[i] - ev_bl, "r-", linewidth=0.75
)
P2 = PH.regular_grid(
1,
1,
order="columns",
figsize=(8.0, 8.0),
showgrid=False,
verticalspacing=0.1,
horizontalspacing=0.12,
margins={
"leftmargin": 0.12,
"rightmargin": 0.12,
"topmargin": 0.03,
"bottommargin": 0.1,
},
labelposition=(-0.12, 0.95),
)
P3 = PH.regular_grid(
1,
5,
order="columns",
figsize=(12, 8.0),
showgrid=False,
verticalspacing=0.1,
horizontalspacing=0.12,
margins={
"leftmargin": 0.12,
"rightmargin": 0.12,
"topmargin": 0.03,
"bottommargin": 0.1,
},
labelposition=(-0.12, 0.95),
)
idx = [a for a in P3.axdict.keys()]
ncol = 5
offset2 = 0.0
k = 0
for i in evok:
# print(self.ev_tau1, self.ev_tau2)
offset = i * 3.0
ev_bl = | np.mean(self.Summary.allevents[i, 0:5]) | numpy.mean |
# Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pygc
import random
import unittest
from reference_models.geo import vincenty
def geodesic_iterative(lat1, lon1, lat2, lon2, num_points):
"""Original function using a dual iterative approach.
"""
geodesic = [(lat1, lon1)]
lat, lon = lat1, lon1
dist, bearing, _ = vincenty.GeodesicDistanceBearing(lat, lon, lat2, lon2)
step_km = dist / (float(num_points-1))
for _ in xrange(1, num_points-1):
lat, lon, _ = vincenty.GeodesicPoint(lat, lon, step_km, bearing)
geodesic.append((lat, lon))
_, bearing, _ = vincenty.GeodesicDistanceBearing(lat, lon, lat2, lon2)
geodesic.append((lat2, lon2))
return geodesic
class TestVincenty(unittest.TestCase):
def test_distbear_whensamepoints(self):
self.assertEqual(vincenty.GeodesicDistanceBearing(38, -80, 38, -80),
(0, 0, 0))
def test_distbear(self):
random.seed(69)
for _ in range(1000):
lat1 = random.uniform(-70, 70)
lng1 = random.uniform(-170, 170)
lat2 = lat1 + random.uniform(-10, 10) # up to about 1000km
lng2 = lng1 + random.uniform(-10, 10)
d, az, rev_az = vincenty.GeodesicDistanceBearing(lat1, lng1, lat2, lng2)
self.assertTrue(az >= 0.0 and az < 360 and rev_az >= 0 and rev_az < 360)
p = pygc.great_distance(start_latitude=lat1, start_longitude=lng1,
end_latitude=lat2, end_longitude=lng2)
self.assertAlmostEqual(d*1000.0, p['distance'], 2) # cm precision
self.assertAlmostEqual(az, p['azimuth'], 9)
self.assertAlmostEqual(rev_az, p['reverse_azimuth'], 9)
def test_point(self):
random.seed(69)
for _ in range(1000):
lat = random.uniform(-80, 80)
lng = random.uniform(-180, 180)
dist = random.uniform(10, 1000000)
bearing = random.uniform(-180, 180)
latd, lngd, rev_az = vincenty.GeodesicPoint(lat, lng, dist/1000.0, bearing)
self.assertTrue(rev_az >= 0 and rev_az < 360)
p = pygc.great_circle(latitude=lat, longitude=lng, distance=dist, azimuth=bearing)
self.assertAlmostEqual(latd, p['latitude'])
self.assertAlmostEqual(lngd, p['longitude'])
self.assertAlmostEqual(rev_az, p['reverse_azimuth'])
def test_points(self):
distances = [5,10,50, 100, 500, 1000, 5000]
lat0 = 45
lng0 = -122
bearing = 33
lats, lngs, rev_azis = vincenty.GeodesicPoints(lat0, lng0, distances, bearing)
self.assertTrue(isinstance(lats, list))
# Test with ndarray
lats2, lngs2, rev_azis2 = vincenty.GeodesicPoints(lat0, lng0, np.array(distances), bearing)
self.assertTrue(isinstance(lats2, np.ndarray))
self.assertEqual(np.max(np.abs(np.array(lats) - lats2)), 0)
self.assertEqual(np.max(np.abs( | np.array(lngs) | numpy.array |
"""This module contains benchmarking utility.
"""
import gc
import numbers
import sys
import timeit
from typing import Callable, Iterable, List, Union
import numpy as np
from pywrangler.exceptions import NotProfiledError
from pywrangler.util._pprint import (
enumeration,
header,
pretty_file_size,
pretty_time_duration
)
from pywrangler.util.helper import get_param_names
def allocate_memory(size: float) -> np.ndarray:
"""Helper function to approximately allocate memory by creating numpy array
with given size in MiB.
Numpy is used deliberately to define the used memory via dtype.
Parameters
----------
size: float
Size in MiB to be occupied.
Returns
-------
memory_holder: np.ndarray
"""
if size <= 0:
return None
empty_size = sys.getsizeof(np.ones(0))
size_in_bytes = np.ceil(size * (2 ** 20)).astype(np.int64) - empty_size
memory_holder = np.ones(size_in_bytes, dtype=np.int8)
return memory_holder
class BaseProfiler:
"""Base class defining the interface for all profilers.
Subclasses have to implement `profile` (the actual profiling method) and
`less_is_better` (defining the ranking of profiling measurements).
The private attribute `_measurements` is assumed to be set by `profile`.
Attributes
----------
measurements: list
The actual profiling measurements.
best: float
The best measurement.
median: float
The median of measurements.
worst: float
The worst measurement.
std: float
The standard deviation of measurements.
runs: int
The number of measurements.
Methods
-------
profile
Contains the actual profiling implementation.
report
Print simple report consisting of best, median, worst, standard
deviation and the number of measurements.
profile_report
Calls profile and report in sequence.
"""
@property
def measurements(self) -> List[float]:
"""Return measurements of profiling.
"""
self._check_is_profiled(["_measurements"])
return self._measurements
@property
def best(self) -> float:
"""Returns the best measurement.
"""
if self.less_is_better:
return np.min(self.measurements)
else:
return np.max(self.measurements)
@property
def median(self) -> float:
"""Returns the median of measurements.
"""
return np.median(self.measurements)
@property
def worst(self) -> float:
"""Returns the worst measurement.
"""
if self.less_is_better:
return | np.max(self.measurements) | numpy.max |
"""hear test util objects"""
from importlib_resources import files
import numpy as np
import soundfile as sf
data_files = files('hear') / 'tests' / 'data'
file_0123456789_wav = data_files / '0123456789.wav'
wf_0123456789 = np.arange(10).astype('int16')
def mk_test_files():
file_obj = file_0123456789_wav
n = 10
dtype = 'int16'
written_wf = | np.arange(n) | numpy.arange |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pathlib
def writeIntermediateState(timeStep, model, dataset, epoch, fileWriter, csystem, identifier="PDE"):
"""
Functions that write intermediate solutions to tensorboard
"""
if fileWriter is None:
return
nx = csystem['nx']
ny = csystem['nx']
x, y, t = dataset.getInput(timeStep, csystem)
x = torch.Tensor(x).float().cuda()
y = torch.Tensor(y).float().cuda()
t = torch.Tensor(t).float().cuda()
inputX = torch.stack([x, y, t], 1)
UV = model.forward(inputX).detach().cpu().numpy()
u = UV[:, 0].reshape((nx, ny))
v = UV[:, 1].reshape((nx, ny))
h = u ** 2 + v ** 2
fig = plt.figure()
plt.imshow(u, extent=[-3,3,-3,3], cmap='jet')
plt.colorbar()
fileWriter.add_figure('%s-real/t%.2f' %
(identifier, t[0].cpu().numpy()), fig, epoch)
plt.close(fig)
fig = plt.figure()
plt.imshow(v, extent=[-3,3,-3,3], cmap='jet')
plt.colorbar()
fileWriter.add_figure('%s-imag/t%.2f' %
(identifier, t[0].cpu().numpy()), fig, epoch)
plt.close(fig)
fig = plt.figure()
plt.imshow(h, extent=[-3,3,-3,3], cmap='jet')
plt.colorbar()
fileWriter.add_figure('%s-norm/t%.2f' %
(identifier, t[0].cpu().numpy()), fig, epoch)
plt.close(fig)
def valLoss(model, dataset, timeStep, csystem):
x, y, t = dataset.getInput(timeStep, csystem)
x = torch.Tensor(x).float().cuda()
y = torch.Tensor(y).float().cuda()
t = torch.Tensor(t).float().cuda()
inputX = torch.stack([x, y, t], 1)
UV = model.forward(inputX).detach().cpu().numpy()
uPred = UV[:, 0].reshape(-1)
vPred = UV[:, 1].reshape(-1)
# load label data
uVal, vVal = dataset.getFrame(timeStep, csystem)
uVal = np.array(uVal).reshape(-1)
vVal = np.array(vVal).reshape(-1)
valLoss_u = np.max(abs(uVal - uPred))
valLoss_v = np.max(abs(vVal - vPred))
valSqLoss_u = np.sqrt(np.sum(np.power(uVal - uPred, 2)))
valSqLoss_v = np.sqrt(np.sum(np.power(vVal - vPred, 2)))
return valLoss_u, valLoss_v, valSqLoss_u, valSqLoss_v
def writeValidationLoss(timeStep, model, dataset, epoch, writer, csystem, identifier):
if writer is None:
return
_, _, t = dataset.getInput(timeStep, csystem)
t = torch.Tensor(t).float().cuda()
valLoss_u, valLoss_v, valSqLoss_u, valSqLoss_v = valLoss(
model, dataset, timeStep, csystem)
valLoss_uv = valLoss_u + valLoss_v
valSqLoss_uv = valSqLoss_u + valSqLoss_v
writer.add_scalar("inf: L_%s/u/t%.2f" %
(identifier, t[0].cpu().numpy()), valLoss_u, epoch)
writer.add_scalar("inf: L_%s/v/t%.2f" %
(identifier, t[0].cpu().numpy()), valLoss_v, epoch)
writer.add_scalar("inf: L_%s/uv/t%.2f" %
(identifier, t[0].cpu().numpy()), valLoss_uv, epoch)
writer.add_scalar("2nd: L_%s/u/t%.2f" %
(identifier, t[0].cpu().numpy()), valSqLoss_u, epoch)
writer.add_scalar("2nd: L_%s/v/t%.2f" %
(identifier, t[0].cpu().numpy()), valSqLoss_v, epoch)
writer.add_scalar("2nd: L_%s/uv/t%.2f" %
(identifier, t[0].cpu().numpy()), valSqLoss_uv, epoch)
def save_checkpoint(model, path, epoch):
# print(model.state_dict().keys())
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
state = {
'model': model.state_dict(),
}
torch.save(state, path + 'model_' + str(epoch)+'.pt')
print("saving model to ---> %s" % (path + 'model_' + str(epoch)+'.pt'))
def load_checkpoint(model, path):
device = torch.device('cpu')
checkpoint = torch.load(path, map_location=device)
model.load_state_dict(checkpoint['model'])
def getDefaults():
# static parameter
nx = 200
ny = 200
nt = 1000
xmin = -3
xmax = 3
ymin = -3
ymax = 3
dt = 0.001
tmax = 1
numOfEnergySamplingPointsX = 100
numOfEnergySamplingPointsY = 100
coordinateSystem = {"x_lb": xmin, "x_ub": xmax, "y_lb": ymin,
"y_ub": ymax, "nx": nx, "ny": ny, "nt": nt, "dt": dt}
return coordinateSystem, numOfEnergySamplingPointsX, numOfEnergySamplingPointsY, tmax
def getVariables(model, dataset, cSystem, time, step=1):
x, y, t = dataset.getInput(time, cSystem, step)
x = torch.Tensor(x).float().cuda()
y = torch.Tensor(y).float().cuda()
t = torch.Tensor(t).float().cuda()
u, v, u_yy, v_yy, u_xx, v_xx, u_t, v_t = model.net_uv(x, y, t)
x = x.view(-1)
y = y.view(-1)
X = torch.stack([x, y, u, v, u_yy, v_yy, u_xx, v_xx], 1)
f = -model.forward_hpm(X)
dudt = f[:, 0]
dvdt = f[:, 1]
dudt = dudt.cpu().detach().numpy().reshape(-1, 1)
dvdt = dvdt.cpu().detach().numpy().reshape(-1, 1)
x = x.cpu().detach().numpy().reshape(-1)
y = y.cpu().detach().numpy().reshape(-1)
u = u.cpu().detach().numpy().reshape(-1)
v = v.cpu().detach().numpy().reshape(-1)
u_xx = u_xx.cpu().detach().numpy().reshape(-1)
u_yy = u_yy.cpu().detach().numpy().reshape(-1)
v_xx = v_xx.cpu().detach().numpy().reshape(-1)
v_yy = v_yy.cpu().detach().numpy().reshape(-1)
return x, y, u, v, u_xx, u_yy, v_xx, v_yy, dudt, dvdt
def write_coefficients(model, dataset, epoch, cSystem, time, fileWriter):
x, y, u, v, u_xx, u_yy, v_xx, v_yy, dudt, dvdt = getVariables(
model, dataset, cSystem, time)
l1 = []
l2 = []
for i in range(cSystem['nx']*cSystem['ny']):
if x[i] == 0 and y[i] == 0:
c1 = 0
c2 = 0
else:
a = np.array([[-v_xx[i]-v_yy[i], u_xx[i]+u_yy[i]], [v[i]
* x[i]**2 + v[i]*y[i]**2, -u[i]*x[i]**2 - u[i]*y[i]**2]])
b = np.array([dudt[i], dvdt[i]])
c1, c2 = np.linalg.solve(a, b).reshape(-1)
if c1 > 10:
c1 = 10
elif c1 < -10:
c1 = -10
if c2 > 10:
c2 = 10
elif c2 < -10:
c2 = -10
l1.append(c1)
l2.append(c2)
fileWriter.add_scalar("median(C1)", np.median(l1), epoch)
fileWriter.add_scalar("median(C2)", np.median(l2), epoch)
fileWriter.add_scalar("var(C1)", np.var(l1), epoch)
fileWriter.add_scalar("var(C2)", np.var(l2), epoch)
l1 = np.array(l1).reshape((cSystem['nx'], cSystem['ny']))
fig = plt.figure()
plt.imshow(l1, extent=[-3,3,-3,3], cmap='jet')
plt.colorbar()
fileWriter.add_figure('c1/t%.2f' % (time), fig, epoch)
plt.close(fig)
l2 = | np.array(l2) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.