prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
This file is part of the package FUNtoFEM for coupled aeroelastic simulation
and design optimization.
Copyright (C) 2015 Georgia Tech Research Corporation.
Additional copyright (C) 2015 <NAME>, <NAME> and <NAME>.
All rights reserved.
FUNtoFEM is licensed under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
--------------------------------------------------------------------------------
The Works
--------------------------------------------------------------------------------
The following script demonstrates the displacement transfer's capability to
transfer a variety of types of displacements from a relatively simple structural
mesh to a relatively simple aerodynamic surface mesh
"""
import numpy as np
from mpi4py import MPI
from funtofem import TransferScheme
import sys
sys.path.append('../')
from tecplot_output import writeOutputForTecplot
import meshpy.triangle as triangle
"""
--------------------------------------------------------------------------------
Creating meshes
--------------------------------------------------------------------------------
"""
# Create boundary of high aspect ratio, tapered plate for structure
struct_bound = [(1.791204, 0.654601),
(1.980463, 4.844049),
(3.535093, 4.533113),
(3.994722, 0.654601)]
def round_trip_connect(start, end):
result = []
for i in range(start, end):
result.append((i, i+1))
result.append((end, start))
return result
struct_facets = round_trip_connect(0, len(struct_bound)-1)
# Mesh the plate using Triangle
struct_info = triangle.MeshInfo()
struct_info.set_points(struct_bound)
struct_info.set_facets(struct_facets)
struct_mesh = triangle.build(struct_info, max_volume=1e-1, min_angle=25)
# triangle.write_gnuplot_mesh("triangles.dat", struct_mesh)
# Extracting points and connectivity
z_offset = 0.0
struct_X = []
for point in struct_mesh.points:
point += [z_offset]
struct_X.append(point)
struct_X = np.array(struct_X).flatten()
struct_nnodes = len(struct_X)/3
struct_conn = []
for i, t in enumerate(struct_mesh.elements):
struct_conn += t
struct_conn = np.array(struct_conn) + 1
struct_nelems = len(struct_mesh.elements)
struct_ptr = np.arange(0, 3*struct_nelems+1, 3, dtype='intc')
# Create rectangular plate for aerodynamic surface
aero_bound = [(1.5, 0.0),
(1.5, 6.0),
(4.5, 6.0),
(4.5, 0.0)]
def round_trip_connect(start, end):
result = []
for i in range(start, end):
result.append((i, i+1))
result.append((end, start))
return result
aero_facets = round_trip_connect(0, len(aero_bound)-1)
# Mesh the plate using Triangle
aero_info = triangle.MeshInfo()
aero_info.set_points(aero_bound)
aero_info.set_facets(aero_facets)
aero_mesh = triangle.build(aero_info, max_volume=1e-3, min_angle=25)
# Extracting points and connectivity
z_offset = 1.0
aero_X = []
for point in aero_mesh.points:
point += [z_offset]
aero_X.append(point)
aero_X = np.array(aero_X).flatten()
aero_nnodes = len(aero_X)/3
aero_conn = []
for i, t in enumerate(aero_mesh.elements):
aero_conn += t
aero_conn = np.array(aero_conn) + 1
aero_nelems = len(aero_mesh.elements)
aero_ptr = np.arange(0, 3*aero_nelems+1, 3, dtype='intc')
"""
--------------------------------------------------------------------------------
Defining displacements
--------------------------------------------------------------------------------
"""
# STRETCH
st = 1.0 # stretch factor
stretch = np.array([[1.0, 0.0, 0.0],
[0.0, st, 0.0],
[0.0, 0.0, 1.0]])
stretched = np.dot(stretch, struct_X.reshape((-1,3)).T).T
# SHEAR
sh = 0.25 # 2. / b # shear factor
shear = np.array([[1.0, sh, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
sheared = np.dot(shear, stretched.T).T
# TWIST
theta_tip = -90.0 * np.pi / 180.0 # degrees of twist at tip
twisted = np.zeros(sheared.shape)
y = struct_X[1::3]
b = y.max() - y.min()
for k in range(struct_nnodes):
p = sheared[k,:]
y = p[1]
theta = theta_tip * y / b
twist = np.array([[np.cos(theta), 0.0, np.sin(theta)],
[0.0, 1.0, 0.0],
[-np.sin(theta), 0.0, | np.cos(theta) | numpy.cos |
import torch
from torch.utils.data import Dataset
import numpy as np
import os
import cv2
from torchvision import transforms as T
from utils.util import load_pickle_file
def get_pixelcoords(H, W, mask=None, subsampletype='foreground_pixel', subsamplesize=32, fore_rate=0.9, fore_erode=3):
def sample(indx, indy, n_pixels):
select_indexs = np.random.choice(indx.shape[0], n_pixels, replace=True)
px = indx[select_indexs]
py = indy[select_indexs]
return px, py
if subsampletype == 'pixel':
indx, indy = np.meshgrid(
| np.arange(0, H) | numpy.arange |
import mock
import numpy
from PIL import Image
from types import SimpleNamespace
import unittest
from machine_common_sense.mcs_action import MCS_Action
from machine_common_sense.mcs_goal import MCS_Goal
from machine_common_sense.mcs_object import MCS_Object
from machine_common_sense.mcs_pose import MCS_Pose
from machine_common_sense.mcs_return_status import MCS_Return_Status
from machine_common_sense.mcs_step_output import MCS_Step_Output
from .mock_mcs_controller_ai2thor import Mock_MCS_Controller_AI2THOR
class Test_MCS_Controller_AI2THOR(unittest.TestCase):
def setUp(self):
self.controller = Mock_MCS_Controller_AI2THOR()
self.controller.set_config({ 'metadata': '' })
def create_mock_scene_event(self, mock_scene_event_data):
# Wrap the dict in a SimpleNamespace object to permit property access with dotted notation since the actual
# variable is a class, not a dict.
return SimpleNamespace(**mock_scene_event_data)
def create_retrieve_object_list_scene_event(self):
return {
"events": [self.create_mock_scene_event({
"object_id_to_color": {
"testId1": (12, 34, 56),
"testId2": (98, 76, 54),
"testId3": (101, 102, 103)
}
})],
"metadata": {
"objects": [{
"colorsFromMaterials": ["c1"],
"direction": {
"x": 0,
"y": 0,
"z": 0
},
"distance": 0,
"distanceXZ": 0,
"isPickedUp": True,
"mass": 1,
"objectId": "testId1",
"position": {
"x": 1,
"y": 1,
"z": 2
},
"rotation": {
"x": 1.0,
"y": 2.0,
"z": 3.0
},
"salientMaterials": [],
"shape": "shape1",
"visibleInCamera": True
}, {
"colorsFromMaterials": ["c2", "c3"],
"direction": {
"x": 90,
"y": -30,
"z": 0
},
"distance": 1.5,
"distanceXZ": 1.1,
"isPickedUp": False,
"mass": 12.34,
"objectBounds": {
"objectBoundsCorners": ["p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8"]
},
"objectId": "testId2",
"position": {
"x": 1,
"y": 2,
"z": 3
},
"rotation": {
"x": 1.0,
"y": 2.0,
"z": 3.0
},
"salientMaterials": ["Foobar", "Metal", "Plastic"],
"shape": "shape2",
"visibleInCamera": True
}, {
"colorsFromMaterials": [],
"direction": {
"x": -90,
"y": 180,
"z": 270
},
"distance": 2.5,
"distanceXZ": 2,
"isPickedUp": False,
"mass": 34.56,
"objectBounds": {
"objectBoundsCorners": ["pA", "pB", "pC", "pD", "pE", "pF", "pG", "pH"]
},
"objectId": "testId3",
"position": {
"x": -3,
"y": -2,
"z": -1
},
"rotation": {
"x": 11.0,
"y": 12.0,
"z": 13.0
},
"salientMaterials": ["Wood"],
"shape": "shape3",
"visibleInCamera": False
}]
}
}
def create_wrap_output_scene_event(self):
image_data = numpy.array([[0]], dtype=numpy.uint8)
depth_mask_data = numpy.array([[128]], dtype=numpy.uint8)
object_mask_data = numpy.array([[192]], dtype=numpy.uint8)
return {
"events": [self.create_mock_scene_event({
"depth_frame": depth_mask_data,
"frame": image_data,
"instance_segmentation_frame": object_mask_data,
"object_id_to_color": {
"testId": (12, 34, 56),
"testWallId": (101, 102, 103)
}
})],
"metadata": {
"agent": {
"cameraHorizon": 12.34,
"position": {
"x": 0.12,
"y": -0.23,
"z": 4.5
},
"rotation": {
"x": 1.111,
"y": 2.222,
"z": 3.333
}
},
"cameraPosition": {
"y": 0.1234
},
"clippingPlaneFar": 25,
"clippingPlaneNear": 0,
"fov": 42.5,
"lastActionStatus": "SUCCESSFUL",
"lastActionSuccess": True,
"objects": [{
"colorsFromMaterials": ["c1"],
"direction": {
"x": 90,
"y": -30,
"z": 0
},
"distance": 1.5,
"distanceXZ": 1.1,
"isPickedUp": False,
"mass": 12.34,
"objectBounds": {
"objectBoundsCorners": ["p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8"]
},
"objectId": "testId",
"position": {
"x": 10,
"y": 11,
"z": 12
},
"rotation": {
"x": 1.0,
"y": 2.0,
"z": 3.0
},
"salientMaterials": ["Wood"],
"shape": "shape",
"visibleInCamera": True
}, {
"colorsFromMaterials": [],
"direction": {
"x": -90,
"y": 180,
"z": 270
},
"distance": 2.5,
"distanceXZ": 2.0,
"isPickedUp": False,
"mass": 34.56,
"objectBounds": {
"objectBoundsCorners": ["pA", "pB", "pC", "pD", "pE", "pF", "pG", "pH"]
},
"objectId": "testIdHidden",
"position": {
"x": -3,
"y": -2,
"z": -1
},
"rotation": {
"x": 11.0,
"y": 12.0,
"z": 13.0
},
"salientMaterials": ["Wood"],
"shape": "shapeHidden",
"visibleInCamera": False
}],
"structuralObjects": [{
"colorsFromMaterials": ["c2"],
"direction": {
"x": 180,
"y": -60,
"z": 0
},
"distance": 2.5,
"distanceXZ": 2.2,
"isPickedUp": False,
"mass": 56.78,
"objectBounds": {
"objectBoundsCorners": ["p11", "p12", "p13", "p14", "p15", "p16", "p17", "p18"]
},
"objectId": "testWallId",
"position": {
"x": 20,
"y": 21,
"z": 22
},
"rotation": {
"x": 4.0,
"y": 5.0,
"z": 6.0
},
"salientMaterials": ["Ceramic"],
"shape": "structure",
"visibleInCamera": True
}, {
"colorsFromMaterials": [],
"direction": {
"x": -180,
"y": 60,
"z": 90
},
"distance": 3.5,
"distanceXZ": 3.3,
"isPickedUp": False,
"mass": 78.90,
"objectBounds": {
"objectBoundsCorners": ["pAA", "pBB", "pCC", "pDD", "pEE", "pFF", "pGG", "pHH"]
},
"objectId": "testWallIdHidden",
"position": {
"x": 30,
"y": 31,
"z": 32
},
"rotation": {
"x": 14.0,
"y": 15.0,
"z": 16.0
},
"salientMaterials": ["Ceramic"],
"shape": "structureHidden",
"visibleInCamera": False
}]
}
}, image_data, depth_mask_data, object_mask_data
def test_end_scene(self):
# TODO When this function actually does anything
pass
def test_start_scene(self):
# TODO MCS-15
pass
def test_step(self):
# TODO MCS-15
pass
def test_restrict_goal_output_metadata(self):
goal = MCS_Goal(metadata={
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
actual = self.controller.restrict_goal_output_metadata(goal)
self.assertEqual(actual.metadata, {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
def test_restrict_goal_output_metadata_full(self):
self.controller.set_config({ 'metadata': 'full' })
goal = MCS_Goal(metadata={
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
actual = self.controller.restrict_goal_output_metadata(goal)
self.assertEqual(actual.metadata, {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
def test_restrict_goal_output_metadata_no_navigation(self):
self.controller.set_config({ 'metadata': 'no_navigation' })
goal = MCS_Goal(metadata={
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
actual = self.controller.restrict_goal_output_metadata(goal)
self.assertEqual(actual.metadata, {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
def test_restrict_goal_output_metadata_no_vision(self):
self.controller.set_config({ 'metadata': 'no_vision' })
goal = MCS_Goal(metadata={
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
actual = self.controller.restrict_goal_output_metadata(goal)
self.assertEqual(actual.metadata, {
'target': { 'image': None },
'target_1': { 'image': None },
'target_2': { 'image': None }
})
def test_restrict_goal_output_metadata_none(self):
self.controller.set_config({ 'metadata': 'none' })
goal = MCS_Goal(metadata={
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
actual = self.controller.restrict_goal_output_metadata(goal)
self.assertEqual(actual.metadata, {
'target': { 'image': None },
'target_1': { 'image': None },
'target_2': { 'image': None }
})
def test_restrict_object_output_metadata(self):
test_object = MCS_Object(
color={ 'r': 1, 'g': 2, 'b': 3 },
dimensions={ 'x': 1, 'y': 2, 'z': 3 },
distance=12.34,
distance_in_steps=34.56,
distance_in_world=56.78,
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 },
shape='sofa',
texture_color_list=['c1', 'c2']
)
actual = self.controller.restrict_object_output_metadata(test_object)
self.assertEqual(actual.color, { 'r': 1, 'g': 2, 'b': 3 })
self.assertEqual(actual.dimensions, { 'x': 1, 'y': 2, 'z': 3 })
self.assertEqual(actual.distance, 12.34)
self.assertEqual(actual.distance_in_steps, 34.56)
self.assertEqual(actual.distance_in_world, 56.78)
self.assertEqual(actual.position, { 'x': 4, 'y': 5, 'z': 6 })
self.assertEqual(actual.rotation, { 'x': 7, 'y': 8, 'z': 9 })
self.assertEqual(actual.shape, 'sofa')
self.assertEqual(actual.texture_color_list, ['c1', 'c2'])
def test_restrict_object_output_metadata_full(self):
self.controller.set_config({ 'metadata': 'full' })
test_object = MCS_Object(
color={ 'r': 1, 'g': 2, 'b': 3 },
dimensions={ 'x': 1, 'y': 2, 'z': 3 },
distance=12.34,
distance_in_steps=34.56,
distance_in_world=56.78,
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 },
shape='sofa',
texture_color_list=['c1', 'c2']
)
actual = self.controller.restrict_object_output_metadata(test_object)
self.assertEqual(actual.color, { 'r': 1, 'g': 2, 'b': 3 })
self.assertEqual(actual.dimensions, { 'x': 1, 'y': 2, 'z': 3 })
self.assertEqual(actual.distance, 12.34)
self.assertEqual(actual.distance_in_steps, 34.56)
self.assertEqual(actual.distance_in_world, 56.78)
self.assertEqual(actual.position, { 'x': 4, 'y': 5, 'z': 6 })
self.assertEqual(actual.rotation, { 'x': 7, 'y': 8, 'z': 9 })
self.assertEqual(actual.shape, 'sofa')
self.assertEqual(actual.texture_color_list, ['c1', 'c2'])
def test_restrict_object_output_metadata_no_navigation(self):
self.controller.set_config({ 'metadata': 'no_navigation' })
test_object = MCS_Object(
color={ 'r': 1, 'g': 2, 'b': 3 },
dimensions={ 'x': 1, 'y': 2, 'z': 3 },
distance=12.34,
distance_in_steps=34.56,
distance_in_world=56.78,
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 },
shape='sofa',
texture_color_list=['c1', 'c2']
)
actual = self.controller.restrict_object_output_metadata(test_object)
self.assertEqual(actual.color, { 'r': 1, 'g': 2, 'b': 3 })
self.assertEqual(actual.dimensions, { 'x': 1, 'y': 2, 'z': 3 })
self.assertEqual(actual.distance, 12.34)
self.assertEqual(actual.distance_in_steps, 34.56)
self.assertEqual(actual.distance_in_world, 56.78)
self.assertEqual(actual.position, None)
self.assertEqual(actual.rotation, None)
self.assertEqual(actual.shape, 'sofa')
self.assertEqual(actual.texture_color_list, ['c1', 'c2'])
def test_restrict_object_output_metadata_no_vision(self):
self.controller.set_config({ 'metadata': 'no_vision' })
test_object = MCS_Object(
color={ 'r': 1, 'g': 2, 'b': 3 },
dimensions={ 'x': 1, 'y': 2, 'z': 3 },
distance=12.34,
distance_in_steps=34.56,
distance_in_world=56.78,
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 },
shape='sofa',
texture_color_list=['c1', 'c2']
)
actual = self.controller.restrict_object_output_metadata(test_object)
self.assertEqual(actual.color, None)
self.assertEqual(actual.dimensions, None)
self.assertEqual(actual.distance, None)
self.assertEqual(actual.distance_in_steps, None)
self.assertEqual(actual.distance_in_world, None)
self.assertEqual(actual.position, { 'x': 4, 'y': 5, 'z': 6 })
self.assertEqual(actual.rotation, { 'x': 7, 'y': 8, 'z': 9 })
self.assertEqual(actual.shape, None)
self.assertEqual(actual.texture_color_list, None)
def test_restrict_object_output_metadata_none(self):
self.controller.set_config({ 'metadata': 'none' })
test_object = MCS_Object(
color={ 'r': 1, 'g': 2, 'b': 3 },
dimensions={ 'x': 1, 'y': 2, 'z': 3 },
distance=12.34,
distance_in_steps=34.56,
distance_in_world=56.78,
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 },
shape='sofa',
texture_color_list=['c1', 'c2']
)
actual = self.controller.restrict_object_output_metadata(test_object)
self.assertEqual(actual.color, None)
self.assertEqual(actual.dimensions, None)
self.assertEqual(actual.distance, None)
self.assertEqual(actual.distance_in_steps, None)
self.assertEqual(actual.distance_in_world, None)
self.assertEqual(actual.position, None)
self.assertEqual(actual.rotation, None)
self.assertEqual(actual.shape, None)
self.assertEqual(actual.texture_color_list, None)
def test_restrict_step_output_metadata(self):
step = MCS_Step_Output(
camera_aspect_ratio=(1, 2),
camera_clipping_planes=(3, 4),
camera_field_of_view=5,
camera_height=6,
depth_mask_list=[7],
object_mask_list=[8],
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 }
)
actual = self.controller.restrict_step_output_metadata(step)
self.assertEqual(actual.camera_aspect_ratio, (1, 2))
self.assertEqual(actual.camera_clipping_planes, (3, 4))
self.assertEqual(actual.camera_field_of_view, 5)
self.assertEqual(actual.camera_height, 6)
self.assertEqual(actual.depth_mask_list, [7])
self.assertEqual(actual.object_mask_list, [8])
self.assertEqual(actual.position, { 'x': 4, 'y': 5, 'z': 6 })
self.assertEqual(actual.rotation, { 'x': 7, 'y': 8, 'z': 9 })
def test_restrict_step_output_metadata_full(self):
self.controller.set_config({ 'metadata': 'full' })
step = MCS_Step_Output(
camera_aspect_ratio=(1, 2),
camera_clipping_planes=(3, 4),
camera_field_of_view=5,
camera_height=6,
depth_mask_list=[7],
object_mask_list=[8],
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 }
)
actual = self.controller.restrict_step_output_metadata(step)
self.assertEqual(actual.camera_aspect_ratio, (1, 2))
self.assertEqual(actual.camera_clipping_planes, (3, 4))
self.assertEqual(actual.camera_field_of_view, 5)
self.assertEqual(actual.camera_height, 6)
self.assertEqual(actual.depth_mask_list, [7])
self.assertEqual(actual.object_mask_list, [8])
self.assertEqual(actual.position, { 'x': 4, 'y': 5, 'z': 6 })
self.assertEqual(actual.rotation, { 'x': 7, 'y': 8, 'z': 9 })
def test_restrict_step_output_metadata_no_navigation(self):
self.controller.set_config({ 'metadata': 'no_navigation' })
step = MCS_Step_Output(
camera_aspect_ratio=(1, 2),
camera_clipping_planes=(3, 4),
camera_field_of_view=5,
camera_height=6,
depth_mask_list=[7],
object_mask_list=[8],
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 }
)
actual = self.controller.restrict_step_output_metadata(step)
self.assertEqual(actual.camera_aspect_ratio, (1, 2))
self.assertEqual(actual.camera_clipping_planes, (3, 4))
self.assertEqual(actual.camera_field_of_view, 5)
self.assertEqual(actual.camera_height, 6)
self.assertEqual(actual.depth_mask_list, [7])
self.assertEqual(actual.object_mask_list, [8])
self.assertEqual(actual.position, None)
self.assertEqual(actual.rotation, None)
def test_restrict_step_output_metadata_no_vision(self):
self.controller.set_config({ 'metadata': 'no_vision' })
step = MCS_Step_Output(
camera_aspect_ratio=(1, 2),
camera_clipping_planes=(3, 4),
camera_field_of_view=5,
camera_height=6,
depth_mask_list=[7],
object_mask_list=[8],
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 }
)
actual = self.controller.restrict_step_output_metadata(step)
self.assertEqual(actual.camera_aspect_ratio, None)
self.assertEqual(actual.camera_clipping_planes, None)
self.assertEqual(actual.camera_field_of_view, None)
self.assertEqual(actual.camera_height, None)
self.assertEqual(actual.depth_mask_list, [])
self.assertEqual(actual.object_mask_list, [])
self.assertEqual(actual.position, { 'x': 4, 'y': 5, 'z': 6 })
self.assertEqual(actual.rotation, { 'x': 7, 'y': 8, 'z': 9 })
def test_restrict_step_output_metadata_none(self):
self.controller.set_config({ 'metadata': 'none' })
step = MCS_Step_Output(
camera_aspect_ratio=(1, 2),
camera_clipping_planes=(3, 4),
camera_field_of_view=5,
camera_height=6,
depth_mask_list=[7],
object_mask_list=[8],
position={ 'x': 4, 'y': 5, 'z': 6 },
rotation={ 'x': 7, 'y': 8, 'z': 9 }
)
actual = self.controller.restrict_step_output_metadata(step)
self.assertEqual(actual.camera_aspect_ratio, None)
self.assertEqual(actual.camera_clipping_planes, None)
self.assertEqual(actual.camera_field_of_view, None)
self.assertEqual(actual.camera_height, None)
self.assertEqual(actual.depth_mask_list, [])
self.assertEqual(actual.object_mask_list, [])
self.assertEqual(actual.position, None)
self.assertEqual(actual.rotation, None)
def test_retrieve_action_list(self):
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(), 0), self.controller.ACTION_LIST)
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[]), 0), \
self.controller.ACTION_LIST)
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[[]]), 0), \
self.controller.ACTION_LIST)
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[['MoveAhead',\
'RotateLook,rotation=180']]), 0), ['MoveAhead', 'RotateLook,rotation=180'])
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[['MoveAhead',\
'RotateLook,rotation=180']]), 1), self.controller.ACTION_LIST)
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[['MoveAhead',\
'RotateLook,rotation=180'], []]), 1), self.controller.ACTION_LIST)
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[[],['MoveAhead',\
'RotateLook,rotation=180']]), 0), self.controller.ACTION_LIST)
self.assertEqual(self.controller.retrieve_action_list(MCS_Goal(action_list=[[],['MoveAhead',\
'RotateLook,rotation=180']]), 1), ['MoveAhead', 'RotateLook,rotation=180'])
def test_retrieve_goal(self):
goal_1 = self.controller.retrieve_goal({})
self.assertEqual(goal_1.action_list, None)
self.assertEqual(goal_1.info_list, [])
self.assertEqual(goal_1.last_step, None)
self.assertEqual(goal_1.task_list, [])
self.assertEqual(goal_1.type_list, [])
self.assertEqual(goal_1.metadata, {})
goal_2 = self.controller.retrieve_goal({
"goal": {
}
})
self.assertEqual(goal_2.action_list, None)
self.assertEqual(goal_2.info_list, [])
self.assertEqual(goal_2.last_step, None)
self.assertEqual(goal_2.task_list, [])
self.assertEqual(goal_2.type_list, [])
self.assertEqual(goal_2.metadata, {})
goal_3 = self.controller.retrieve_goal({
"goal": {
"action_list": [["action1"], [], ["action2", "action3", "action4"]],
"info_list": ["info1", "info2", 12.34],
"last_step": 10,
"task_list": ["task1", "task2"],
"type_list": ["type1", "type2"],
"metadata": {
"key": "value"
}
}
})
self.assertEqual(goal_3.action_list, [["action1"], [], ["action2", "action3", "action4"]])
self.assertEqual(goal_3.info_list, ["info1", "info2", 12.34])
self.assertEqual(goal_3.last_step, 10)
self.assertEqual(goal_3.task_list, ["task1", "task2"])
self.assertEqual(goal_3.type_list, ["type1", "type2"])
self.assertEqual(goal_3.metadata, {
"key": "value"
})
def test_retrieve_goal_with_config_metadata(self):
self.controller.set_config({ 'metadata': 'full' })
actual = self.controller.retrieve_goal({
'goal': {
'metadata': {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
}
}
})
self.assertEqual(actual.metadata, {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
self.controller.set_config({ 'metadata': 'no_navigation' })
actual = self.controller.retrieve_goal({
'goal': {
'metadata': {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
}
}
})
self.assertEqual(actual.metadata, {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
})
self.controller.set_config({ 'metadata': 'no_vision' })
actual = self.controller.retrieve_goal({
'goal': {
'metadata': {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
}
}
})
self.assertEqual(actual.metadata, {
'target': { 'image': None },
'target_1': { 'image': None },
'target_2': { 'image': None }
})
self.controller.set_config({ 'metadata': 'none' })
actual = self.controller.retrieve_goal({
'goal': {
'metadata': {
'target': { 'image': [0] },
'target_1': { 'image': [1] },
'target_2': { 'image': [2] }
}
}
})
self.assertEqual(actual.metadata, {
'target': { 'image': None },
'target_1': { 'image': None },
'target_2': { 'image': None }
})
def test_retrieve_head_tilt(self):
mock_scene_event_data = {
"metadata": {
"agent": {
"cameraHorizon": 12.34
}
}
}
actual = self.controller.retrieve_head_tilt(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(actual, 12.34)
mock_scene_event_data = {
"metadata": {
"agent": {
"cameraHorizon": -56.78
}
}
}
actual = self.controller.retrieve_head_tilt(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(actual, -56.78)
def test_retrieve_object_list(self):
mock_scene_event_data = self.create_retrieve_object_list_scene_event()
actual = self.controller.retrieve_object_list(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(len(actual), 2)
self.assertEqual(actual[0].uuid, "testId1")
self.assertEqual(actual[0].color, {
"r": 12,
"g": 34,
"b": 56
})
self.assertEqual(actual[0].dimensions, {})
self.assertEqual(actual[0].direction, {
"x": 0,
"y": 0,
"z": 0
})
self.assertEqual(actual[0].distance, 0)
self.assertEqual(actual[0].distance_in_steps, 0)
self.assertEqual(actual[0].distance_in_world, 0)
self.assertEqual(actual[0].held, True)
self.assertEqual(actual[0].mass, 1)
self.assertEqual(actual[0].material_list, [])
self.assertEqual(actual[0].position, { "x": 1, "y": 1, "z": 2 })
self.assertEqual(actual[0].rotation, 2.0)
self.assertEqual(actual[0].shape, 'shape1')
self.assertEqual(actual[0].texture_color_list, ['c1'])
self.assertEqual(actual[0].visible, True)
self.assertEqual(actual[1].uuid, "testId2")
self.assertEqual(actual[1].color, {
"r": 98,
"g": 76,
"b": 54
})
self.assertEqual(actual[1].dimensions, ["p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8"])
self.assertEqual(actual[1].direction, {
"x": 90,
"y": -30,
"z": 0
})
self.assertEqual(actual[1].distance, 2.2)
self.assertEqual(actual[1].distance_in_steps, 2.2)
self.assertEqual(actual[1].distance_in_world, 1.5)
self.assertEqual(actual[1].held, False)
self.assertEqual(actual[1].mass, 12.34)
self.assertEqual(actual[1].material_list, ["METAL", "PLASTIC"])
self.assertEqual(actual[1].position, { "x": 1, "y": 2, "z": 3 })
self.assertEqual(actual[1].rotation, 2)
self.assertEqual(actual[1].shape, 'shape2')
self.assertEqual(actual[1].texture_color_list, ['c2', 'c3'])
self.assertEqual(actual[1].visible, True)
def test_retrieve_object_list_with_config_metadata_full(self):
self.controller.set_config({ 'metadata': 'full' })
mock_scene_event_data = self.create_retrieve_object_list_scene_event()
actual = self.controller.retrieve_object_list(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(len(actual), 3)
self.assertEqual(actual[0].uuid, "testId1")
self.assertEqual(actual[0].color, {
"r": 12,
"g": 34,
"b": 56
})
self.assertEqual(actual[0].dimensions, {})
self.assertEqual(actual[0].direction, {
"x": 0,
"y": 0,
"z": 0
})
self.assertEqual(actual[0].distance, 0)
self.assertEqual(actual[0].distance_in_steps, 0)
self.assertEqual(actual[0].distance_in_world, 0)
self.assertEqual(actual[0].held, True)
self.assertEqual(actual[0].mass, 1)
self.assertEqual(actual[0].material_list, [])
self.assertEqual(actual[0].position, { "x": 1, "y": 1, "z": 2 })
self.assertEqual(actual[0].rotation, 2.0)
self.assertEqual(actual[0].shape, 'shape1')
self.assertEqual(actual[0].texture_color_list, ['c1'])
self.assertEqual(actual[0].visible, True)
self.assertEqual(actual[1].uuid, "testId2")
self.assertEqual(actual[1].color, {
"r": 98,
"g": 76,
"b": 54
})
self.assertEqual(actual[1].dimensions, ["p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8"])
self.assertEqual(actual[1].direction, {
"x": 90,
"y": -30,
"z": 0
})
self.assertEqual(actual[1].distance, 2.2)
self.assertEqual(actual[1].distance_in_steps, 2.2)
self.assertEqual(actual[1].distance_in_world, 1.5)
self.assertEqual(actual[1].held, False)
self.assertEqual(actual[1].mass, 12.34)
self.assertEqual(actual[1].material_list, ["METAL", "PLASTIC"])
self.assertEqual(actual[1].position, { "x": 1, "y": 2, "z": 3 })
self.assertEqual(actual[1].rotation, 2)
self.assertEqual(actual[1].shape, 'shape2')
self.assertEqual(actual[1].texture_color_list, ['c2', 'c3'])
self.assertEqual(actual[1].visible, True)
self.assertEqual(actual[2].uuid, "testId3")
self.assertEqual(actual[2].color, {
"r": 101,
"g": 102,
"b": 103
})
self.assertEqual(actual[2].dimensions, ["pA", "pB", "pC", "pD", "pE", "pF", "pG", "pH"])
self.assertEqual(actual[2].direction, {
"x": -90,
"y": 180,
"z": 270
})
self.assertEqual(actual[2].distance, 4)
self.assertEqual(actual[2].distance_in_steps, 4)
self.assertEqual(actual[2].distance_in_world, 2.5)
self.assertEqual(actual[2].held, False)
self.assertEqual(actual[2].mass, 34.56)
self.assertEqual(actual[2].material_list, ["WOOD"])
self.assertEqual(actual[2].position, { "x": -3, "y": -2, "z": -1 })
self.assertEqual(actual[2].rotation, 12)
self.assertEqual(actual[2].shape, 'shape3')
self.assertEqual(actual[2].texture_color_list, [])
self.assertEqual(actual[2].visible, False)
def test_retrieve_object_list_with_config_metadata_no_navigation(self):
self.controller.set_config({ 'metadata': 'no_navigation' })
mock_scene_event_data = self.create_retrieve_object_list_scene_event()
actual = self.controller.retrieve_object_list(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(len(actual), 2)
self.assertEqual(actual[0].uuid, "testId1")
self.assertEqual(actual[0].color, {
"r": 12,
"g": 34,
"b": 56
})
self.assertEqual(actual[0].dimensions, {})
self.assertEqual(actual[0].direction, {
"x": 0,
"y": 0,
"z": 0
})
self.assertEqual(actual[0].distance, 0)
self.assertEqual(actual[0].distance_in_steps, 0)
self.assertEqual(actual[0].distance_in_world, 0)
self.assertEqual(actual[0].held, True)
self.assertEqual(actual[0].mass, 1)
self.assertEqual(actual[0].material_list, [])
self.assertEqual(actual[0].position, None)
self.assertEqual(actual[0].rotation, None)
self.assertEqual(actual[0].shape, 'shape1')
self.assertEqual(actual[0].texture_color_list, ['c1'])
self.assertEqual(actual[0].visible, True)
self.assertEqual(actual[1].uuid, "testId2")
self.assertEqual(actual[1].color, {
"r": 98,
"g": 76,
"b": 54
})
self.assertEqual(actual[1].dimensions, ["p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8"])
self.assertEqual(actual[1].direction, {
"x": 90,
"y": -30,
"z": 0
})
self.assertEqual(actual[1].distance, 2.2)
self.assertEqual(actual[1].distance_in_steps, 2.2)
self.assertEqual(actual[1].distance_in_world, 1.5)
self.assertEqual(actual[1].held, False)
self.assertEqual(actual[1].mass, 12.34)
self.assertEqual(actual[1].material_list, ["METAL", "PLASTIC"])
self.assertEqual(actual[1].position, None)
self.assertEqual(actual[1].rotation, None)
self.assertEqual(actual[1].shape, 'shape2')
self.assertEqual(actual[1].texture_color_list, ['c2', 'c3'])
self.assertEqual(actual[1].visible, True)
def test_retrieve_object_list_with_config_metadata_no_vision(self):
self.controller.set_config({ 'metadata': 'no_vision' })
mock_scene_event_data = self.create_retrieve_object_list_scene_event()
actual = self.controller.retrieve_object_list(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(len(actual), 2)
self.assertEqual(actual[0].uuid, "testId1")
self.assertEqual(actual[0].color, None)
self.assertEqual(actual[0].dimensions, None)
self.assertEqual(actual[0].direction, None)
self.assertEqual(actual[0].distance, None)
self.assertEqual(actual[0].distance_in_steps, None)
self.assertEqual(actual[0].distance_in_world, None)
self.assertEqual(actual[0].held, True)
self.assertEqual(actual[0].mass, 1)
self.assertEqual(actual[0].material_list, [])
self.assertEqual(actual[0].position, { "x": 1, "y": 1, "z": 2 })
self.assertEqual(actual[0].rotation, 2.0)
self.assertEqual(actual[0].shape, None)
self.assertEqual(actual[0].texture_color_list, None)
self.assertEqual(actual[0].visible, True)
self.assertEqual(actual[1].uuid, "testId2")
self.assertEqual(actual[1].color, None)
self.assertEqual(actual[1].dimensions, None)
self.assertEqual(actual[1].direction, None)
self.assertEqual(actual[1].distance, None)
self.assertEqual(actual[1].distance_in_steps, None)
self.assertEqual(actual[1].distance_in_world, None)
self.assertEqual(actual[1].held, False)
self.assertEqual(actual[1].mass, 12.34)
self.assertEqual(actual[1].material_list, ["METAL", "PLASTIC"])
self.assertEqual(actual[1].position, { "x": 1, "y": 2, "z": 3 })
self.assertEqual(actual[1].rotation, 2)
self.assertEqual(actual[1].shape, None)
self.assertEqual(actual[1].texture_color_list, None)
self.assertEqual(actual[1].visible, True)
def test_retrieve_object_list_with_config_metadata_none(self):
self.controller.set_config({ 'metadata': 'none' })
mock_scene_event_data = self.create_retrieve_object_list_scene_event()
actual = self.controller.retrieve_object_list(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(len(actual), 2)
self.assertEqual(actual[0].uuid, "testId1")
self.assertEqual(actual[0].color, None)
self.assertEqual(actual[0].dimensions, None)
self.assertEqual(actual[0].direction, None)
self.assertEqual(actual[0].distance, None)
self.assertEqual(actual[0].distance_in_steps, None)
self.assertEqual(actual[0].distance_in_world, None)
self.assertEqual(actual[0].held, True)
self.assertEqual(actual[0].mass, 1)
self.assertEqual(actual[0].material_list, [])
self.assertEqual(actual[0].position, None)
self.assertEqual(actual[0].rotation, None)
self.assertEqual(actual[0].shape, None)
self.assertEqual(actual[0].texture_color_list, None)
self.assertEqual(actual[0].visible, True)
self.assertEqual(actual[1].uuid, "testId2")
self.assertEqual(actual[1].color, None)
self.assertEqual(actual[1].dimensions, None)
self.assertEqual(actual[1].direction, None)
self.assertEqual(actual[1].distance, None)
self.assertEqual(actual[1].distance_in_steps, None)
self.assertEqual(actual[1].distance_in_world, None)
self.assertEqual(actual[1].held, False)
self.assertEqual(actual[1].mass, 12.34)
self.assertEqual(actual[1].material_list, ["METAL", "PLASTIC"])
self.assertEqual(actual[1].position, None)
self.assertEqual(actual[1].rotation, None)
self.assertEqual(actual[1].shape, None)
self.assertEqual(actual[1].texture_color_list, None)
self.assertEqual(actual[1].visible, True)
def test_retrieve_pose(self):
# TODO MCS-18
pass
def test_retrieve_return_status(self):
mock_scene_event_data = {
"metadata": {
"lastActionStatus": "SUCCESSFUL"
}
}
actual = self.controller.retrieve_return_status(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(actual, MCS_Return_Status.SUCCESSFUL.name)
mock_scene_event_data = {
"metadata": {
"lastActionStatus": "FAILED"
}
}
actual = self.controller.retrieve_return_status(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(actual, MCS_Return_Status.FAILED.name)
mock_scene_event_data = {
"metadata": {
"lastActionStatus": "INVALID_STATUS"
}
}
actual = self.controller.retrieve_return_status(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(actual, MCS_Return_Status.UNDEFINED.name)
mock_scene_event_data = {
"metadata": {
"lastActionStatus": None
}
}
actual = self.controller.retrieve_return_status(self.create_mock_scene_event(mock_scene_event_data))
self.assertEqual(actual, MCS_Return_Status.UNDEFINED.name)
def test_save_images(self):
image_data = numpy.array([[0]], dtype=numpy.uint8)
depth_mask_data = numpy.array([[128]], dtype=numpy.uint8)
object_mask_data = numpy.array([[192]], dtype=numpy.uint8)
mock_scene_event_data = {
"events": [self.create_mock_scene_event({
"depth_frame": depth_mask_data,
"frame": image_data,
"instance_segmentation_frame": object_mask_data
})]
}
image_list, depth_mask_list, object_mask_list = self.controller.save_images(self.create_mock_scene_event(
mock_scene_event_data))
self.assertEqual(len(image_list), 1)
self.assertEqual(len(depth_mask_list), 1)
self.assertEqual(len(object_mask_list), 1)
self.assertEqual(numpy.array(image_list[0]), image_data)
self.assertEqual(numpy.array(depth_mask_list[0]), depth_mask_data)
self.assertEqual(numpy.array(object_mask_list[0]), object_mask_data)
def test_save_images_with_multiple_images(self):
image_data_1 = numpy.array([[64]], dtype=numpy.uint8)
depth_mask_data_1 = numpy.array([[128]], dtype=numpy.uint8)
object_mask_data_1 = numpy.array([[192]], dtype=numpy.uint8)
image_data_2 = | numpy.array([[32]], dtype=numpy.uint8) | numpy.array |
# -*- coding: utf-8 -*-
# pylint: disable=E1101, C0330, C0103
# E1101: Module X has no Y member
# C0330: Wrong continued indentation
# C0103: Invalid attribute/variable/method name
"""
polyobjects.py
==============
This contains all of the PolyXXX objects used by :mod:`wx.lib.plot`.
"""
__docformat__ = "restructuredtext en"
# Standard Library
import time as _time
import wx
import warnings
from collections import namedtuple
# Third-Party
try:
import numpy as np
except:
msg = """
This module requires the NumPy module, which could not be
imported. It probably is not installed (it's not part of the
standard Python distribution). See the Numeric Python site
(http://numpy.scipy.org) for information on downloading source or
binaries."""
raise ImportError("NumPy not found.\n" + msg)
# Package
from .utils import pendingDeprecation
from .utils import TempStyle
from .utils import pairwise
class PolyPoints(object):
"""
Base Class for lines and markers.
:param points: The points to plot
:type points: list of ``(x, y)`` pairs
:param attr: Additional attributes
:type attr: dict
.. warning::
All methods are private.
"""
def __init__(self, points, attr):
self._points = np.array(points).astype(np.float64)
self._logscale = (False, False)
self._absScale = (False, False)
self._symlogscale = (False, False)
self._pointSize = (1.0, 1.0)
self.currentScale = (1, 1)
self.currentShift = (0, 0)
self.scaled = self.points
self.attributes = {}
self.attributes.update(self._attributes)
for name, value in attr.items():
if name not in self._attributes.keys():
err_txt = "Style attribute incorrect. Should be one of {}"
raise KeyError(err_txt.format(self._attributes.keys()))
self.attributes[name] = value
@property
def logScale(self):
"""
A tuple of ``(x_axis_is_log10, y_axis_is_log10)`` booleans. If a value
is ``True``, then that axis is plotted on a logarithmic base 10 scale.
:getter: Returns the current value of logScale
:setter: Sets the value of logScale
:type: tuple of bool, length 2
:raises ValueError: when setting an invalid value
"""
return self._logscale
@logScale.setter
def logScale(self, logscale):
if not isinstance(logscale, tuple) or len(logscale) != 2:
raise ValueError("`logscale` must be a 2-tuple of bools")
self._logscale = logscale
def setLogScale(self, logscale):
"""
Set to change the axes to plot Log10(values)
Value must be a tuple of booleans (x_axis_bool, y_axis_bool)
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PolyPoints.logScale`
property instead.
"""
pendingDeprecation("self.logScale property")
self._logscale = logscale
@property
def symLogScale(self):
"""
.. warning::
Not yet implemented.
A tuple of ``(x_axis_is_SymLog10, y_axis_is_SymLog10)`` booleans.
If a value is ``True``, then that axis is plotted on a symmetric
logarithmic base 10 scale.
A Symmetric Log10 scale means that values can be positive and
negative. Any values less than
:attr:`~wx.lig.plot.PolyPoints.symLogThresh` will be plotted on
a linear scale to avoid the plot going to infinity near 0.
:getter: Returns the current value of symLogScale
:setter: Sets the value of symLogScale
:type: tuple of bool, length 2
:raises ValueError: when setting an invalid value
.. notes::
This is a simplified example of how SymLog works::
if x >= thresh:
x = Log10(x)
elif x =< thresh:
x = -Log10(Abs(x))
else:
x = x
.. seealso::
+ :attr:`~wx.lib.plot.PolyPoints.symLogThresh`
+ See http://matplotlib.org/examples/pylab_examples/symlog_demo.html
for an example.
"""
return self._symlogscale
# TODO: Implement symmetric log scale
@symLogScale.setter
def symLogScale(self, symlogscale, thresh):
raise NotImplementedError("Symmetric Log Scale not yet implemented")
if not isinstance(symlogscale, tuple) or len(symlogscale) != 2:
raise ValueError("`symlogscale` must be a 2-tuple of bools")
self._symlogscale = symlogscale
@property
def symLogThresh(self):
"""
.. warning::
Not yet implemented.
A tuple of ``(x_thresh, y_thresh)`` floats that define where the plot
changes to linear scale when using a symmetric log scale.
:getter: Returns the current value of symLogThresh
:setter: Sets the value of symLogThresh
:type: tuple of float, length 2
:raises ValueError: when setting an invalid value
.. notes::
This is a simplified example of how SymLog works::
if x >= thresh:
x = Log10(x)
elif x =< thresh:
x = -Log10(Abs(x))
else:
x = x
.. seealso::
+ :attr:`~wx.lib.plot.PolyPoints.symLogScale`
+ See http://matplotlib.org/examples/pylab_examples/symlog_demo.html
for an example.
"""
return self._symlogscale
# TODO: Implement symmetric log scale threshold
@symLogThresh.setter
def symLogThresh(self, symlogscale, thresh):
raise NotImplementedError("Symmetric Log Scale not yet implemented")
if not isinstance(symlogscale, tuple) or len(symlogscale) != 2:
raise ValueError("`symlogscale` must be a 2-tuple of bools")
self._symlogscale = symlogscale
@property
def absScale(self):
"""
A tuple of ``(x_axis_is_abs, y_axis_is_abs)`` booleans. If a value
is ``True``, then that axis is plotted on an absolute value scale.
:getter: Returns the current value of absScale
:setter: Sets the value of absScale
:type: tuple of bool, length 2
:raises ValueError: when setting an invalid value
"""
return self._absScale
@absScale.setter
def absScale(self, absscale):
if not isinstance(absscale, tuple) and len(absscale) == 2:
raise ValueError("`absscale` must be a 2-tuple of bools")
self._absScale = absscale
@property
def points(self):
"""
Get or set the plotted points.
:getter: Returns the current value of points, adjusting for the
various scale options such as Log, Abs, or SymLog.
:setter: Sets the value of points.
:type: list of `(x, y)` pairs
.. Note::
Only set unscaled points - do not perform the log, abs, or symlog
adjustments yourself.
"""
data = np.array(self._points, copy=True) # need the copy
# TODO: get rid of the
# need for copy
# work on X:
if self.absScale[0]:
data = self._abs(data, 0)
if self.logScale[0]:
data = self._log10(data, 0)
if self.symLogScale[0]:
# TODO: implement symLogScale
# Should symLogScale override absScale? My vote is no.
# Should symLogScale override logScale? My vote is yes.
# - symLogScale could be a parameter passed to logScale...
pass
# work on Y:
if self.absScale[1]:
data = self._abs(data, 1)
if self.logScale[1]:
data = self._log10(data, 1)
if self.symLogScale[1]:
# TODO: implement symLogScale
pass
return data
@points.setter
def points(self, points):
self._points = points
def _log10(self, data, index):
""" Take the Log10 of the data, dropping any negative values """
data = np.compress(data[:, index] > 0, data, 0)
data[:, index] = np.log10(data[:, index])
return data
def _abs(self, data, index):
""" Take the Abs of the data """
data[:, index] = np.abs(data[:, index])
return data
def boundingBox(self):
"""
Returns the bouding box for the entire dataset as a tuple with this
format::
((minX, minY), (maxX, maxY))
:returns: boundingbox
:rtype: numpy array of ``[[minX, minY], [maxX, maxY]]``
"""
if len(self.points) == 0:
# no curves to draw
# defaults to (-1,-1) and (1,1) but axis can be set in Draw
minXY = np.array([-1.0, -1.0])
maxXY = np.array([1.0, 1.0])
else:
minXY = np.minimum.reduce(self.points)
maxXY = np.maximum.reduce(self.points)
return minXY, maxXY
def scaleAndShift(self, scale=(1, 1), shift=(0, 0)):
"""
Scales and shifts the data for plotting.
:param scale: The values to scale the data by.
:type scale: list of floats: ``[x_scale, y_scale]``
:param shift: The value to shift the data by. This should be in scaled
units
:type shift: list of floats: ``[x_shift, y_shift]``
:returns: None
"""
if len(self.points) == 0:
# no curves to draw
return
# TODO: Can we remove the if statement alltogether? Does
# scaleAndShift ever get called when the current value equals
# the new value?
# cast everything to list: some might be np.ndarray objects
if (list(scale) != list(self.currentScale)
or list(shift) != list(self.currentShift)):
# update point scaling
self.scaled = scale * self.points + shift
self.currentScale = scale
self.currentShift = shift
# else unchanged use the current scaling
def getLegend(self):
return self.attributes['legend']
def getClosestPoint(self, pntXY, pointScaled=True):
"""
Returns the index of closest point on the curve, pointXY,
scaledXY, distance x, y in user coords.
if pointScaled == True, then based on screen coords
if pointScaled == False, then based on user coords
"""
if pointScaled:
# Using screen coords
p = self.scaled
pxy = self.currentScale * np.array(pntXY) + self.currentShift
else:
# Using user coords
p = self.points
pxy = np.array(pntXY)
# determine distance for each point
d = np.sqrt(np.add.reduce((p - pxy) ** 2, 1)) # sqrt(dx^2+dy^2)
pntIndex = np.argmin(d)
dist = d[pntIndex]
return [pntIndex,
self.points[pntIndex],
self.scaled[pntIndex] / self._pointSize,
dist]
class PolyLine(PolyPoints):
"""
Creates PolyLine object
:param points: The points that make up the line
:type points: list of ``[x, y]`` values
:param **attr: keyword attributes
=========================== ============= ====================
Keyword and Default Description Type
=========================== ============= ====================
``colour='black'`` Line color :class:`wx.Colour`
``width=1`` Line width float
``style=wx.PENSTYLE_SOLID`` Line style :class:`wx.PenStyle`
``legend=''`` Legend string str
``drawstyle='line'`` see below str
=========================== ============= ====================
================== ==================================================
Draw style Description
================== ==================================================
``'line'`` Draws an straight line between consecutive points
``'steps-pre'`` Draws a line down from point A and then right to
point B
``'steps-post'`` Draws a line right from point A and then down
to point B
``'steps-mid-x'`` Draws a line horizontally to half way between A
and B, then draws a line vertically, then again
horizontally to point B.
``'steps-mid-y'`` Draws a line vertically to half way between A
and B, then draws a line horizonatally, then
again vertically to point B.
*Note: This typically does not look very good*
================== ==================================================
.. warning::
All methods except ``__init__`` are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.PENSTYLE_SOLID,
'legend': '',
'drawstyle': 'line',
}
_drawstyles = ("line", "steps-pre", "steps-post",
"steps-mid-x", "steps-mid-y")
def __init__(self, points, **attr):
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord=None):
"""
Draw the lines.
:param dc: The DC to draw on.
:type dc: :class:`wx.DC`
:param printerScale:
:type printerScale: float
:param coord: The legend coordinate?
:type coord: ???
"""
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
style = self.attributes['style']
drawstyle = self.attributes['drawstyle']
if not isinstance(colour, wx.Colour):
colour = wx.Colour(colour)
pen = wx.Pen(colour, width, style)
pen.SetCap(wx.CAP_BUTT)
dc.SetPen(pen)
if coord is None:
if len(self.scaled): # bugfix for Mac OS X
for c1, c2 in zip(self.scaled, self.scaled[1:]):
self._path(dc, c1, c2, drawstyle)
else:
dc.DrawLines(coord) # draw legend line
def getSymExtent(self, printerScale):
"""
Get the Width and Height of the symbol.
:param printerScale:
:type printerScale: float
"""
h = self.attributes['width'] * printerScale * self._pointSize[0]
w = 5 * h
return (w, h)
def _path(self, dc, coord1, coord2, drawstyle):
"""
Calculates the path from coord1 to coord 2 along X and Y
:param dc: The DC to draw on.
:type dc: :class:`wx.DC`
:param coord1: The first coordinate in the coord pair
:type coord1: list, length 2: ``[x, y]``
:param coord2: The second coordinate in the coord pair
:type coord2: list, length 2: ``[x, y]``
:param drawstyle: The type of connector to use
:type drawstyle: str
"""
if drawstyle == 'line':
# Straight line between points.
line = [coord1, coord2]
elif drawstyle == 'steps-pre':
# Up/down to next Y, then right to next X
intermediate = [coord1[0], coord2[1]]
line = [coord1, intermediate, coord2]
elif drawstyle == 'steps-post':
# Right to next X, then up/down to Y
intermediate = [coord2[0], coord1[1]]
line = [coord1, intermediate, coord2]
elif drawstyle == 'steps-mid-x':
# need 3 lines between points: right -> up/down -> right
mid_x = ((coord2[0] - coord1[0]) / 2) + coord1[0]
intermediate1 = [mid_x, coord1[1]]
intermediate2 = [mid_x, coord2[1]]
line = [coord1, intermediate1, intermediate2, coord2]
elif drawstyle == 'steps-mid-y':
# need 3 lines between points: up/down -> right -> up/down
mid_y = ((coord2[1] - coord1[1]) / 2) + coord1[1]
intermediate1 = [coord1[0], mid_y]
intermediate2 = [coord2[0], mid_y]
line = [coord1, intermediate1, intermediate2, coord2]
else:
err_txt = "Invalid drawstyle '{}'. Must be one of {}."
raise ValueError(err_txt.format(drawstyle, self._drawstyles))
dc.DrawLines(line)
class PolySpline(PolyLine):
"""
Creates PolySpline object
:param points: The points that make up the spline
:type points: list of ``[x, y]`` values
:param **attr: keyword attributes
=========================== ============= ====================
Keyword and Default Description Type
=========================== ============= ====================
``colour='black'`` Line color :class:`wx.Colour`
``width=1`` Line width float
``style=wx.PENSTYLE_SOLID`` Line style :class:`wx.PenStyle`
``legend=''`` Legend string str
=========================== ============= ====================
.. warning::
All methods except ``__init__`` are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.PENSTYLE_SOLID,
'legend': ''}
def __init__(self, points, **attr):
PolyLine.__init__(self, points, **attr)
def draw(self, dc, printerScale, coord=None):
""" Draw the spline """
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
style = self.attributes['style']
if not isinstance(colour, wx.Colour):
colour = wx.Colour(colour)
pen = wx.Pen(colour, width, style)
pen.SetCap(wx.CAP_ROUND)
dc.SetPen(pen)
if coord is None:
if len(self.scaled) >= 3:
dc.DrawSpline(self.scaled)
else:
dc.DrawLines(coord) # draw legend line
class PolyMarker(PolyPoints):
"""
Creates a PolyMarker object.
:param points: The marker coordinates.
:type points: list of ``[x, y]`` values
:param **attr: keyword attributes
================================= ============= ====================
Keyword and Default Description Type
================================= ============= ====================
``marker='circle'`` see below str
``size=2`` Marker size float
``colour='black'`` Outline color :class:`wx.Colour`
``width=1`` Outline width float
``style=wx.PENSTYLE_SOLID`` Outline style :class:`wx.PenStyle`
``fillcolour=colour`` fill color :class:`wx.Colour`
``fillstyle=wx.BRUSHSTYLE_SOLID`` fill style :class:`wx.BrushStyle`
``legend=''`` Legend string str
================================= ============= ====================
=================== ==================================
Marker Description
=================== ==================================
``'circle'`` A circle of diameter ``size``
``'dot'`` A dot. Does not have a size.
``'square'`` A square with side length ``size``
``'triangle'`` An upward-pointed triangle
``'triangle_down'`` A downward-pointed triangle
``'cross'`` An "X" shape
``'plus'`` A "+" shape
=================== ==================================
.. warning::
All methods except ``__init__`` are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'size': 2,
'fillcolour': None,
'fillstyle': wx.BRUSHSTYLE_SOLID,
'marker': 'circle',
'legend': ''}
def __init__(self, points, **attr):
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord=None):
""" Draw the points """
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
size = self.attributes['size'] * printerScale * self._pointSize[0]
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
marker = self.attributes['marker']
if colour and not isinstance(colour, wx.Colour):
colour = wx.Colour(colour)
if fillcolour and not isinstance(fillcolour, wx.Colour):
fillcolour = wx.Colour(fillcolour)
dc.SetPen(wx.Pen(colour, width))
if fillcolour:
dc.SetBrush(wx.Brush(fillcolour, fillstyle))
else:
dc.SetBrush(wx.Brush(colour, fillstyle))
if coord is None:
if len(self.scaled): # bugfix for Mac OS X
self._drawmarkers(dc, self.scaled, marker, size)
else:
self._drawmarkers(dc, coord, marker, size) # draw legend marker
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
s = 5 * self.attributes['size'] * printerScale * self._pointSize[0]
return (s, s)
def _drawmarkers(self, dc, coords, marker, size=1):
f = getattr(self, "_{}".format(marker))
f(dc, coords, size)
def _circle(self, dc, coords, size=1):
fact = 2.5 * size
wh = 5.0 * size
rect = np.zeros((len(coords), 4), np.float) + [0.0, 0.0, wh, wh]
rect[:, 0:2] = coords - [fact, fact]
dc.DrawEllipseList(rect.astype(np.int32))
def _dot(self, dc, coords, size=1):
dc.DrawPointList(coords)
def _square(self, dc, coords, size=1):
fact = 2.5 * size
wh = 5.0 * size
rect = np.zeros((len(coords), 4), np.float) + [0.0, 0.0, wh, wh]
rect[:, 0:2] = coords - [fact, fact]
dc.DrawRectangleList(rect.astype(np.int32))
def _triangle(self, dc, coords, size=1):
shape = [(-2.5 * size, 1.44 * size),
(2.5 * size, 1.44 * size), (0.0, -2.88 * size)]
poly = np.repeat(coords, 3, 0)
poly.shape = (len(coords), 3, 2)
poly += shape
dc.DrawPolygonList(poly.astype(np.int32))
def _triangle_down(self, dc, coords, size=1):
shape = [(-2.5 * size, -1.44 * size),
(2.5 * size, -1.44 * size), (0.0, 2.88 * size)]
poly = np.repeat(coords, 3, 0)
poly.shape = (len(coords), 3, 2)
poly += shape
dc.DrawPolygonList(poly.astype(np.int32))
def _cross(self, dc, coords, size=1):
fact = 2.5 * size
for f in [[-fact, -fact, fact, fact], [-fact, fact, fact, -fact]]:
lines = | np.concatenate((coords, coords), axis=1) | numpy.concatenate |
import numpy as np
from game import BouncyBalls
from model import *
class Env(object):
r"""The main OpenAI Gym class. It encapsulates an environment with
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
step
reset
render
close
seed
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
The methods are accessed publicly as "step", "reset", etc.. The
non-underscored versions are wrapper methods to which we may add
functionality over time.
"""
# Set this in SOME subclasses
metadata = {'render.modes': []}
reward_range = (-float('inf'), float('inf'))
spec = None
# Set these in ALL subclasses
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
def reset(self):
"""Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation.
"""
raise NotImplementedError
def render(self, mode='human'):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
Example:
class MyEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def render(self, mode='human'):
if mode == 'rgb_array':
return np.array(...) # return RGB frame suitable for video
elif mode == 'human':
... # pop up a window and render
else:
super(MyEnv, self).render(mode=mode) # just raise an exception
"""
raise NotImplementedError
def close(self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
return
@property
def unwrapped(self):
"""Completely unwrap this env.
Returns:
gym.Env: The base non-wrapped gym.Env instance
"""
return self
def __str__(self):
if self.spec is None:
return '<{} instance>'.format(type(self).__name__)
else:
return '<{}<{}>>'.format(type(self).__name__, self.spec.id)
def __enter__(self):
"""Support with-statement for the environment. """
return self
def __exit__(self, *args):
"""Support with-statement for the environment. """
self.close()
# propagate exception
return False
class dummy_env(Env):
def __init__(self):
super().__init__()
self.action_space = np.zeros(3)
self.observation_space = np.zeros(1)
print('created')
def step(self, action):
obs = np.zeros(1)
reward = action[0]
done = True
info = {}
#print(reward)
return obs, reward, done, info
def reset(self):
obs = np.zeros(1)
return obs
class ball_env_1(Env):
"""
Ball environment #1
Only one time step.
"""
def __init__(self):
super().__init__()
self.action_space = np.zeros(6)
self.observation_space = np.zeros(1)
self.game = BouncyBalls()
print('created')
def step(self, action):
obs = np.zeros(1)
ball_posi = self.game.run_one_episode(action)
reward = self.posi_reward(ball_posi)
done = True
info = {}
#print(reward)
return obs, reward, done, info
def posi_reward(self, posi):
reward = 0.0
if posi[0] >= 550 and posi[1]> 150:
reward = 1.0
return reward
def reset(self):
obs = np.zeros(1)
return obs
class ball_env_2(Env):
"""
Ball environment #2
Three time steps, each for one platform.
"""
action_space = np.zeros(2)
observation_space = np.zeros(9)
def __init__(self):
super().__init__()
#self.action_space = np.array([0,0])
#self.observation_space = np.array([0,0,0,0,0,0,0,0,0])
self.game = BouncyBalls()
self.step_count = 0
self.cumulative_action = np.zeros(6)
print('created')
def step(self, action):
self.step_count += 1
if self.step_count >= 3:
# let the ball go
ball_posi = self.game.run_one_episode(self.cumulative_action)
reward = self.posi_reward(ball_posi)
done = True
# reset
self.step_count = 0
self.cumulative_action = np.zeros(6)
else:
self.cumulative_action[self.step_count*2-2:self.step_count*2] = action
reward = 0
done = False
obs = np.zeros(9)
for i in range(self.step_count):
obs[i*3] = 1
obs[i*3+1:i*3+3] = action
info = {}
#print(reward)
return obs, reward, done, info
def posi_reward(self, posi):
reward = 0.0
if posi[0] >= 550 and posi[1]> 150:
reward = 1.0
return reward
def reset(self):
obs = self.observation_space
return obs
class ball_env_3(Env):
"""
Ball environment #3
Three time steps, each for one platform. With predicted ball position as observation.
"""
action_space = np.zeros(2)
observation_space = np.zeros(11)
def __init__(self):
super().__init__()
#self.action_space = np.array([0,0])
#self.observation_space = np.array([0,0,0,0,0,0,0,0,0])
self.game = BouncyBalls()
self.step_count = 0
self.cumulative_action = np.zeros(6)
print('created')
def step(self, action):
self.step_count += 1
if self.step_count >= 3:
# let the ball go
ball_posi = self.game.run_one_episode(self.cumulative_action)
reward = self.posi_reward(ball_posi)
done = True
# reset
self.step_count = 0
self.cumulative_action = np.array([0,0,0,0,0,0])
else:
self.cumulative_action[self.step_count*2-2:self.step_count*2] = action
reward = 0
done = False
obs = np.array([0,0,0,0,0,0,0,0,0])
for i in range(self.step_count):
obs[i*3] = 1
obs[i*3+1:i*3+3] = action
info = {}
#print(reward)
return obs, reward, done, info
def posi_reward(self, posi):
reward = 0.0
if posi[0] >= 550 and posi[1]> 150:
reward = 1.0
return reward
def reset(self):
obs = self.observation_space
return obs
class ball_env_4(Env):
"""
Ball environment #4
Ten time steps. At each time step, change position or let the ball go. No prediction model.
"""
action_space = np.zeros(6+1)# posi for three platforms + let the ball go
observation_space = np.zeros(7)
def __init__(self):
super().__init__()
#self.action_space = np.array([0,0])
#self.observation_space = np.array([0,0,0,0,0,0,0,0,0])
self.game = BouncyBalls()
self.step_count = 0
self.cumulative_action = np.zeros(6)
print('created')
def step(self, action):
self.step_count += 1
if action[6] > 0 or self.step_count >= 10:
# let the ball go
obs = action
ball_posi = self.game.run_one_episode(self.cumulative_action)
reward = self.posi_reward(ball_posi)
# special case for leting the ball go on first step
if self.step_count == 1:
reward = 0
done = True
# reset
self.step_count = 0
#self.cumulative_action = np.array([0,0,0,0,0,0])
else:
self.cumulative_action = action
obs = action
reward = 0
done = False
#obs = np.array([0,0,0,0,0,0,0,0,0])
#for i in range(self.step_count):
# obs[i*3] = 1
# obs[i*3+1:i*3+3] = action
info = {}
#print(reward)
return obs, reward, done, info
def posi_reward(self, posi):
reward = 0.0
if posi[0] >= 550 and posi[1]> 150:
reward = 1.0
return reward
def reset(self):
obs = self.observation_space
return obs
class ball_env_5(Env):
"""
Ball environment #5
Ten time steps. At each time step, change position or let the ball go. With predicted ball position as observation.
"""
action_space = np.zeros(6+1)# posi for three platforms + let the ball go
observation_space = np.zeros(7+2)# action + predicted ball posi
def __init__(self):
super().__init__()
#self.action_space = np.array([0,0])
#self.observation_space = np.array([0,0,0,0,0,0,0,0,0])
self.game = BouncyBalls()
self.step_count = 0
#self.cumulative_action = np.zeros(6)
self.pred_net = LSTM_Init_To_Many_3()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.pred_net.load_state_dict(torch.load('preTrained/CP_epoch30.pth', map_location=self.device))
self.pred_net.to(device=self.device)
self.pred_net.eval()
print('created')
def step(self, action):
self.step_count += 1
if action[6] > 0 or self.step_count >= 10:
# let the ball go
ball_posi = self.game.run_one_episode(action)
reward = self.posi_reward(ball_posi)
# special case for leting the ball go on first step
if self.step_count == 1:
reward = 0
done = True
# reset
self.step_count = 0
#self.cumulative_action = np.array([0,0,0,0,0,0])
else:
#self.cumulative_action[self.step_count*2-2:self.step_count*2] = action
reward = 0
done = False
# prediction
mean = np.array([ 30.893, 270.33, 200.388, 199.573, 350.057, 200.53 ])
std = np.array([14.54288661, 14.70269023, 14.31668453, 14.40488358, 14.85717843, 15.25080654])
normalized_platform_posi = (action[:6]-mean)/std
with torch.no_grad():
pred_input = torch.from_numpy(np.expand_dims(normalized_platform_posi, axis=0)).float().to(self.device)
pred_output = self.pred_net(pred_input).cpu().numpy()
def get_pred_ball_posi(pred_output, x_min=20, x_max=550, y_min=50, y_max=550):
mean = np.array([163.29437530326638, 279.7768839198992])
std = np.array([138.14349185245848, 113.09608505385799])
last_posi =pred_output[0,-1]
pred_output_denormlilzed = pred_output*std+mean
for i in range(pred_output.shape[1]):
if pred_output_denormlilzed[0,i,0] < x_min or pred_output_denormlilzed[0,i,0] > x_max or pred_output_denormlilzed[0,i,1] < y_min or pred_output_denormlilzed[0,i,1] > y_max:
last_posi =pred_output[0,i]
break
return last_posi
last_posi = get_pred_ball_posi(pred_output)
obs = np.zeros(9)
obs[:7]=action
obs[7:]=last_posi
info = {}
#print(reward)
return obs, reward, done, info
def posi_reward(self, posi):
reward = 0.0
if posi[0] >= 550 and posi[1]> 150:
reward = 1.0
return reward
def reset(self):
obs = self.observation_space
return obs
class ball_env_6(Env):
"""
Ball environment #6
Ten time steps. At each time step, change position or let the ball go. With ORACLE predicted ball position as observation.
"""
action_space = np.zeros(6+1)# posi for three platforms + let the ball go
observation_space = | np.zeros(7+2) | numpy.zeros |
'''
Based on https://www.mattkeeter.com/projects/contours/
and http://www.iquilezles.org/
'''
import numpy as np
from util import *
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
class ImplicitObject:
def __init__(self, implicit_lambda_function):
self.implicit_lambda_function = implicit_lambda_function
def eval_point(self, two_d_point):
assert two_d_point.shape == (2, 1) # not allow vectorize yet
value = self.implicit_lambda_function(two_d_point[0][0], two_d_point[1][0])
return value;
def is_point_inside(self, two_d_point):
assert two_d_point.shape == (2, 1), "two_d_point format incorrect, {}".format(two_d_point)
value = self.eval_point(two_d_point)
if value <= 0:
return True
else:
return False
def union(self, ImplicitObjectInstance):
return ImplicitObject(lambda x, y: min(
self.eval_point(np.array([[x], [y]])),
ImplicitObjectInstance.eval_point( | np.array([[x], [y]]) | numpy.array |
import logging
import pickle
import random
from collections import Counter
from itertools import chain, permutations
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from snorkel.analysis import Scorer
from snorkel.labeling.analysis import LFAnalysis
from snorkel.labeling.model.graph_utils import get_clique_tree
from snorkel.labeling.model.logger import Logger
from snorkel.types import Config
from snorkel.utils import probs_to_preds
from snorkel.utils.config_utils import merge_config
from snorkel.utils.lr_schedulers import LRSchedulerConfig
from snorkel.utils.optimizers import OptimizerConfig
Metrics = Dict[str, float]
class TrainConfig(Config):
"""Settings for the fit() method of LabelModel.
Parameters
----------
n_epochs
The number of epochs to train (where each epoch is a single optimization step)
lr
Base learning rate (will also be affected by lr_scheduler choice and settings)
l2
Centered L2 regularization strength
optimizer
Which optimizer to use (one of ["sgd", "adam", "adamax"])
optimizer_config
Settings for the optimizer
lr_scheduler
Which lr_scheduler to use (one of ["constant", "linear", "exponential", "step"])
lr_scheduler_config
Settings for the LRScheduler
prec_init
LF precision initializations / priors
seed
A random seed to initialize the random number generator with
log_freq
Report loss every this many epochs (steps)
mu_eps
Restrict the learned conditional probabilities to [mu_eps, 1-mu_eps]
"""
n_epochs: int = 100
lr: float = 0.01
l2: float = 0.0
optimizer: str = "sgd"
optimizer_config: OptimizerConfig = OptimizerConfig() # type: ignore
lr_scheduler: str = "constant"
lr_scheduler_config: LRSchedulerConfig = LRSchedulerConfig() # type: ignore
prec_init: float = 0.7
seed: int = np.random.randint(1e6)
log_freq: int = 10
mu_eps: Optional[float] = None
class LabelModelConfig(Config):
"""Settings for the LabelModel initialization.
Parameters
----------
verbose
Whether to include print statements
device
What device to place the model on ('cpu' or 'cuda:0', for example)
"""
verbose: bool = True
device: str = "cpu"
class _CliqueData(NamedTuple):
start_index: int
end_index: int
max_cliques: Set[int]
class LabelModel(nn.Module):
r"""A model for learning the LF accuracies and combining their output labels.
This class learns a model of the labeling functions' conditional probabilities
of outputting the true (unobserved) label `Y`, `P(\lf | Y)`, and uses this learned
model to re-weight and combine their output labels.
This class is based on the approach in [Training Complex Models with Multi-Task
Weak Supervision](https://arxiv.org/abs/1810.02840), published in AAAI'19. In this
approach, we compute the inverse generalized covariance matrix of the junction tree
of a given LF dependency graph, and perform a matrix completion-style approach with
respect to these empirical statistics. The result is an estimate of the conditional
LF probabilities, `P(\lf | Y)`, which are then set as the parameters of the label
model used to re-weight and combine the labels output by the LFs.
Currently this class uses a conditionally independent label model, in which the LFs
are assumed to be conditionally independent given `Y`.
Examples
--------
>>> label_model = LabelModel()
>>> label_model = LabelModel(cardinality=3)
>>> label_model = LabelModel(cardinality=3, device='cpu')
>>> label_model = LabelModel(cardinality=3)
Parameters
----------
cardinality
Number of classes, by default 2
**kwargs
Arguments for changing config defaults
Raises
------
ValueError
If config device set to cuda but only cpu is available
Attributes
----------
cardinality
Number of classes, by default 2
config
Training configuration
seed
Random seed
"""
def __init__(self, cardinality: int = 2, **kwargs: Any) -> None:
super().__init__()
self.config: LabelModelConfig = LabelModelConfig(**kwargs)
self.cardinality = cardinality
# Confirm that cuda is available if config is using CUDA
if self.config.device != "cpu" and not torch.cuda.is_available():
raise ValueError("device=cuda but CUDA not available.")
# By default, put model in eval mode; switch to train mode in training
self.eval()
def _create_L_ind(self, L: np.ndarray) -> np.ndarray:
"""Convert a label matrix with labels in 0...k to a one-hot format.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
Returns
-------
np.ndarray
An [n,m*k] dense np.ndarray with values in {0,1}
"""
L_ind = np.zeros((self.n, self.m * self.cardinality))
for y in range(1, self.cardinality + 1):
# A[x::y] slices A starting at x at intervals of y
# e.g., np.arange(9)[0::3] == np.array([0,3,6])
L_ind[:, (y - 1) :: self.cardinality] = np.where(L == y, 1, 0)
return L_ind
def _get_augmented_label_matrix(
self, L: np.ndarray, higher_order: bool = False
) -> np.ndarray:
"""Create augmented version of label matrix.
In augmented version, each column is an indicator
for whether a certain source or clique of sources voted in a certain
pattern.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
higher_order
Whether to include higher-order correlations (e.g. LF pairs) in matrix
Returns
-------
np.ndarray
An [n,m*k] dense matrix with values in {0,1}
"""
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
self.c_data: Dict[int, _CliqueData] = {}
for i in range(self.m):
self.c_data[i] = _CliqueData(
start_index=i * self.cardinality,
end_index=(i + 1) * self.cardinality,
max_cliques=set(
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.node[j]["members"]
]
),
)
L_ind = self._create_L_ind(L)
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if higher_order:
L_aug = np.copy(L_ind)
for item in chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.node[item]
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
else:
raise ValueError(item)
members = list(C["members"])
# With unary maximal clique, just store its existing index
C["start_index"] = members[0] * self.cardinality
C["end_index"] = (members[0] + 1) * self.cardinality
return L_aug
else:
return L_ind
def _build_mask(self) -> None:
"""Build mask applied to O^{-1}, O for the matrix approx constraint."""
self.mask = torch.ones(self.d, self.d).byte()
for ci in self.c_data.values():
si = ci.start_index
ei = ci.end_index
for cj in self.c_data.values():
sj, ej = cj.start_index, cj.end_index
# Check if ci and cj are part of the same maximal clique
# If so, mask out their corresponding blocks in O^{-1}
if len(ci.max_cliques.intersection(cj.max_cliques)) > 0:
self.mask[si:ei, sj:ej] = 0
self.mask[sj:ej, si:ei] = 0
def _generate_O(self, L: np.ndarray, higher_order: bool = False) -> None:
"""Generate overlaps and conflicts matrix from label matrix.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
higher_order
Whether to include higher-order correlations (e.g. LF pairs) in matrix
"""
L_aug = self._get_augmented_label_matrix(L, higher_order=higher_order)
self.d = L_aug.shape[1]
self.O = (
torch.from_numpy(L_aug.T @ L_aug / self.n).float().to(self.config.device)
)
def _init_params(self) -> None:
r"""Initialize the learned params.
- \mu is the primary learned parameter, where each row corresponds to
the probability of a clique C emitting a specific combination of labels,
conditioned on different values of Y (for each column); that is:
self.mu[i*self.cardinality + j, y] = P(\lambda_i = j | Y = y)
and similarly for higher-order cliques.
Raises
------
ValueError
If prec_init shape does not match number of LFs
"""
# Initialize mu so as to break basic reflective symmetry
# Note that we are given either a single or per-LF initial precision
# value, prec_i = P(Y=y|\lf=y), and use:
# mu_init = P(\lf=y|Y=y) = P(\lf=y) * prec_i / P(Y=y)
# Handle single values
if isinstance(self.train_config.prec_init, (int, float)):
self._prec_init = self.train_config.prec_init * torch.ones(self.m)
if self._prec_init.shape[0] != self.m:
raise ValueError(f"prec_init must have shape {self.m}.")
# Get the per-value labeling propensities
# Note that self.O must have been computed already!
lps = torch.diag(self.O).cpu().detach().numpy()
# TODO: Update for higher-order cliques!
self.mu_init = torch.zeros(self.d, self.cardinality)
for i in range(self.m):
for y in range(self.cardinality):
idx = i * self.cardinality + y
mu_init = torch.clamp(lps[idx] * self._prec_init[i] / self.p[y], 0, 1)
self.mu_init[idx, y] += mu_init
# Initialize randomly based on self.mu_init
self.mu = nn.Parameter(self.mu_init.clone() * np.random.random()).float()
# Build the mask over O^{-1}
self._build_mask()
def _get_conditional_probs(self, mu: np.ndarray) -> np.ndarray:
r"""Return the estimated conditional probabilities table given parameters mu.
Given a parameter vector mu, return the estimated conditional probabilites
table cprobs, where cprobs is an (m, k+1, k)-dim np.ndarray with:
cprobs[i, j, k] = P(\lf_i = j-1 | Y = k)
where m is the number of LFs, k is the cardinality, and cprobs includes the
conditional abstain probabilities P(\lf_i = -1 | Y = y).
Parameters
----------
mu
An [m * k, k] np.ndarray with entries in [0, 1]
Returns
-------
np.ndarray
An [m, k + 1, k] np.ndarray conditional probabilities table.
"""
cprobs = np.zeros((self.m, self.cardinality + 1, self.cardinality))
for i in range(self.m):
# si = self.c_data[(i,)]['start_index']
# ei = self.c_data[(i,)]['end_index']
# mu_i = mu[si:ei, :]
mu_i = mu[i * self.cardinality : (i + 1) * self.cardinality, :]
cprobs[i, 1:, :] = mu_i
# The 0th row (corresponding to abstains) is the difference between
# the sums of the other rows and one, by law of total probability
cprobs[i, 0, :] = 1 - mu_i.sum(axis=0)
return cprobs
def get_conditional_probs(self) -> np.ndarray:
r"""Return the estimated conditional probabilities table.
Return the estimated conditional probabilites table cprobs, where cprobs is an
(m, k+1, k)-dim np.ndarray with:
cprobs[i, j, k] = P(\lf_i = j-1 | Y = k)
where m is the number of LFs, k is the cardinality, and cprobs includes the
conditional abstain probabilities P(\lf_i = -1 | Y = y).
Returns
-------
np.ndarray
An [m, k + 1, k] np.ndarray conditional probabilities table.
"""
return self._get_conditional_probs(self.mu.cpu().detach().numpy())
def get_weights(self) -> np.ndarray:
"""Return the vector of learned LF weights for combining LFs.
Returns
-------
np.ndarray
[m,1] vector of learned LF weights for combining LFs.
Example
-------
>>> L = np.array([[1, 1, 1], [1, 1, -1], [-1, 0, 0], [0, 0, 0]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L, seed=123)
>>> np.around(label_model.get_weights(), 2) # doctest: +SKIP
array([0.99, 0.99, 0.99])
"""
accs = np.zeros(self.m)
cprobs = self.get_conditional_probs()
for i in range(self.m):
accs[i] = np.diag(cprobs[i, 1:, :] @ self.P.cpu().detach().numpy()).sum()
return np.clip(accs / self.coverage, 1e-6, 1.0)
def predict_proba(self, L: np.ndarray) -> np.ndarray:
r"""Return label probabilities P(Y | \lambda).
Parameters
----------
L
An [n,m] matrix with values in {-1,0,1,...,k-1}f
Returns
-------
np.ndarray
An [n,k] array of probabilistic labels
Example
-------
>>> L = np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L, seed=123)
>>> np.around(label_model.predict_proba(L), 1) # doctest: +SKIP
array([[1., 0.],
[0., 1.],
[0., 1.]])
"""
L_shift = L + 1 # convert to {0, 1, ..., k}
self._set_constants(L_shift)
L_aug = self._get_augmented_label_matrix(L_shift)
mu = self.mu.cpu().detach().numpy()
jtm = np.ones(L_aug.shape[1])
# Note: We omit abstains, effectively assuming uniform distribution here
X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.p))
Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.cardinality)
return X / Z
def predict(
self,
L: np.ndarray,
return_probs: Optional[bool] = False,
tie_break_policy: str = "abstain",
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return predicted labels, with ties broken according to policy.
Policies to break ties include:
"abstain": return an abstain vote (-1)
"true-random": randomly choose among the tied options
"random": randomly choose among tied option using deterministic hash
NOTE: if tie_break_policy="true-random", repeated runs may have slightly different
results due to difference in broken ties
Parameters
----------
L
An [n,m] matrix with values in {-1,0,1,...,k-1}
return_probs
Whether to return probs along with preds
tie_break_policy
Policy to break ties when converting probabilistic labels to predictions
Returns
-------
np.ndarray
An [n,1] array of integer labels
(np.ndarray, np.ndarray)
An [n,1] array of integer labels and an [n,k] array of probabilistic labels
Example
-------
>>> L = np.array([[0, 0, -1], [1, 1, -1], [0, 0, -1]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L)
>>> label_model.predict(L)
array([0, 1, 0])
"""
Y_probs = self.predict_proba(L)
Y_p = probs_to_preds(Y_probs, tie_break_policy)
if return_probs:
return Y_p, Y_probs
return Y_p
def score(
self,
L: np.ndarray,
Y: np.ndarray,
metrics: Optional[List[str]] = ["accuracy"],
tie_break_policy: str = "abstain",
) -> Dict[str, float]:
"""Calculate one or more scores from user-specified and/or user-defined metrics.
Parameters
----------
L
An [n,m] matrix with values in {-1,0,1,...,k-1}
Y
Gold labels associated with data points in L
metrics
A list of metric names
tie_break_policy
Policy to break ties when converting probabilistic labels to predictions
Returns
-------
Dict[str, float]
A dictionary mapping metric names to metric scores
Example
-------
>>> L = np.array([[1, 1, -1], [0, 0, -1], [1, 1, -1]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L)
>>> label_model.score(L, Y=np.array([1, 1, 1]))
{'accuracy': 0.6666666666666666}
>>> label_model.score(L, Y=np.array([1, 1, 1]), metrics=["f1"])
{'f1': 0.8}
"""
if tie_break_policy == "abstain": # pragma: no cover
logging.warning(
"Metrics calculated over data points with non-abstain labels only"
)
Y_pred, Y_prob = self.predict(
L, return_probs=True, tie_break_policy=tie_break_policy
)
scorer = Scorer(metrics=metrics)
results = scorer.score(Y, Y_pred, Y_prob)
return results
# These loss functions get all their data directly from the LabelModel
# (for better or worse). The unused *args make these compatible with the
# Classifer._train() method which expect loss functions to accept an input.
def _loss_l2(self, l2: float = 0) -> torch.Tensor:
r"""L2 loss centered around mu_init, scaled optionally per-source.
In other words, diagonal Tikhonov regularization,
||D(\mu-\mu_{init})||_2^2
where D is diagonal.
Parameters
----------
l2
A float or np.array representing the per-source regularization
strengths to use, by default 0
Returns
-------
torch.Tensor
L2 loss between learned mu and initial mu
"""
if isinstance(l2, (int, float)):
D = l2 * torch.eye(self.d)
else:
D = torch.diag(torch.from_numpy(l2)).type(torch.float32)
D = D.to(self.config.device)
# Note that mu is a matrix and this is the *Frobenius norm*
return torch.norm(D @ (self.mu - self.mu_init)) ** 2
def _loss_mu(self, l2: float = 0) -> torch.Tensor:
r"""Overall mu loss.
Parameters
----------
l2
A float or np.array representing the per-source regularization
strengths to use, by default 0
Returns
-------
torch.Tensor
Overall mu loss between learned mu and initial mu
"""
loss_1 = torch.norm((self.O - self.mu @ self.P @ self.mu.t())[self.mask]) ** 2
loss_2 = torch.norm(torch.sum(self.mu @ self.P, 1) - torch.diag(self.O)) ** 2
return loss_1 + loss_2 + self._loss_l2(l2=l2)
def _set_class_balance(
self, class_balance: Optional[List[float]], Y_dev: np.ndarray
) -> None:
"""Set a prior for the class balance.
In order of preference:
1) Use user-provided class_balance
2) Estimate balance from Y_dev
3) Assume uniform class distribution
"""
if class_balance is not None:
self.p = np.array(class_balance)
if len(self.p) != self.cardinality:
raise ValueError(
f"class_balance has {len(self.p)} entries. Does not match LabelModel cardinality {self.cardinality}."
)
elif Y_dev is not None:
class_counts = Counter(Y_dev)
sorted_counts = np.array([v for k, v in sorted(class_counts.items())])
self.p = sorted_counts / sum(sorted_counts)
if len(self.p) != self.cardinality:
raise ValueError(
f"Y_dev has {len(self.p)} class(es). Does not match LabelModel cardinality {self.cardinality}."
)
else:
self.p = (1 / self.cardinality) * | np.ones(self.cardinality) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 10:34:34 2019
@author: Toonw
"""
import gaze_data_analyzer as gda
import numpy as np
def analyze(session_folder):
try:
print("Running for " + session_folder)
print("------------------------")
# Setting path and files
session_path = "session_data/" + session_folder + "/"
test_folder = session_path + "test_" + type_of_cal + "/"
config_filename = session_path + "config.csv"
# cal_filename = test_folder + "training_fixation.csv"
cal_filename = test_folder + "training_fixation_2.csv"
# cal_filename = test_folder + "training_pursuit_circle.csv"
# cal_filename = test_folder + "training_pursuit_linear.csv"
# cal_filename = test_folder + "training_pursuit_spiral.csv"
# remove_outliers = True
remove_outliers = False
print("")
print("Computing analyze linear transformation")
print("------------------------")
analyzer = gda.GazeDataAnalyzer()
print("\nSETUP TRANSFORMATION")
# analyzer.cross_validation(config_filename, cal_filename, "dbscan_fixation", k = 2)
# analyzer.cross_validation(config_filename, cal_filename, "dbscan_pursuit", k = 5)
analyzer.setup_affine2(config_filename, cal_filename, "dbscan_fixation")
# analyzer.setup_affine2(config_filename, cal_filename, "dbscan_pursuit")
# print("\nTRAINING DATA")
# analyzer.analyze(cal_filename, "dbscan_fixation")
try:
print("\nTEST DATA - FIXATION")
training_filename = test_folder + "training_fixation.csv"
angle_avg, angle_avg_corrected = analyzer.analyze_affine2(training_filename, "dbscan_fixation", "values", remove_outliers = remove_outliers)
# if len(angle_avg) < (5*90*0.3):
# raise ValueError('Not enough data')
fixation_deg_raw.append(np.mean(angle_avg))
fixation_deg_cor.append(np.mean(angle_avg_corrected))
except:
print("-----------------------------")
print("SKIPPING FIXATION")
print("-----------------------------")
fixation_deg_raw.append(0)
fixation_deg_cor.append(0)
try:
print("\nTEST DATA - FIXATION_2")
training_filename = test_folder + "training_fixation_2.csv"
angle_avg, angle_avg_corrected = analyzer.analyze_affine2(training_filename, "dbscan_fixation", "values", remove_outliers = remove_outliers)
# if len(angle_avg) < (5*90*0.3):
# raise ValueError('Not enough data')
fixation_2_deg_raw.append(np.mean(angle_avg))
fixation_2_deg_cor.append(np.mean(angle_avg_corrected))
except:
print("-----------------------------")
print("SKIPPING FIXATION 2")
print("-----------------------------")
fixation_2_deg_raw.append(0)
fixation_2_deg_cor.append(0)
try:
print("\nTEST DATA - PURSUIT (CIRCLE)")
training_filename = test_folder + "training_pursuit_circle.csv"
angle_avg, angle_avg_corrected = analyzer.analyze_affine2(training_filename, "dbscan_pursuit", "values", remove_outliers = remove_outliers)
# if len(angle_avg) < (5*90*0.3):
# raise ValueError('Not enough data')
pursuit_circle_deg_raw.append(np.mean(angle_avg))
pursuit_circle_deg_cor.append(np.mean(angle_avg_corrected))
except:
print("-----------------------------")
print("SKIPPING PURSUIT CIRCLE")
print("-----------------------------")
pursuit_circle_deg_raw.append(0)
pursuit_circle_deg_cor.append(0)
try:
print("\nTEST DATA - PURSUIT (CIRCLE REVERT)")
training_filename = test_folder + "training_pursuit_circle_revert.csv"
angle_avg, angle_avg_corrected = analyzer.analyze_affine2(training_filename, "dbscan_pursuit", "values", remove_outliers = remove_outliers)
# if len(angle_avg) < (5*90*0.3):
# raise ValueError('Not enough data')
pursuit_circle_revert_deg_raw.append(np.mean(angle_avg))
pursuit_circle_revert_deg_cor.append(np.mean(angle_avg_corrected))
except:
print("-----------------------------")
print("SKIPPING PURSUIT CIRCLE REVERT")
print("-----------------------------")
pursuit_circle_revert_deg_raw.append(0)
pursuit_circle_revert_deg_cor.append(0)
try:
print("\nTEST DATA - PURSUIT (LINEAR)")
training_filename = test_folder + "training_pursuit_linear.csv"
angle_avg, angle_avg_corrected = analyzer.analyze_affine2(training_filename, "dbscan_pursuit", "values", remove_outliers = remove_outliers)
# if len(angle_avg) < (5*90*0.3):
# raise ValueError('Not enough data')
pursuit_linear_deg_raw.append( | np.mean(angle_avg) | numpy.mean |
import numpy as np
from ..cluster import cluster
import time
class Options():
def __init__(self):
self.max_iter = 0 # maximum number of iterations
self.max_error = -np.inf # maximum allowed error
self.limit_space = True # optional coordinate bound control
self.sparse_program = False # optional regularization for sparsity
self.sparsity_weight = 1.0 # weight to penalize density of program
self.active_callback = True # activates reports to terminal
self.dynamic = True # temporary experimental implementation
class Optimizer():
def __init__(self, M=None, options=Options):
self.p = None # this is necessary for cluster.map call
self.options = options()
if M is not None:
self.connect_model(M)
self.restart()
def connect_model(self, M):
self.M = M # reference to model instance
def disconnect_model(self):
self.M = None
def restart(self):
self.terminate = False # termination condition state
self.epoch_time = 0.0 # time elapsed during last epoch
self.total_time = 0.0 # total elapsed time during training
self.iter = 0
self.options.max_iter = 0
self.initialize_state()
self.initialize_parameter_array()
self.results = []
@cluster.on_master
def initialize_state(self):
"""This function initializes variables for specific algorithm"""
pass
def initialize_parameter_array(self):
if cluster.global_rank == 0:
shape = self.p.shape
if self.options.dynamic:
self.p_array = np.zeros((shape[0] + 1, shape[1]))
else:
self.p_array = np.zeros(shape)
else:
self.p_array = None
@cluster.on_master
def fill_parameter_array(self):
if self.options.dynamic:
self.p_array[:-1] = self.p # add new population of the method
self.p_array[-1] = self.M.p # add current gbest (loss can be dynamic)
else:
self.p_array = self.p
@cluster.on_master
def resolve_gbest_result(self):
if self.options.dynamic:
self.M.y = self.results[-1][0]
@cluster.on_master
def extract_results(self):
# mutate r into an array so each value type corresponds to a single row
# r[0,:]=losses, r[1,:]=densities, r[2,:]=return codes, r[3,:]=sim. elapsed times
if self.options.dynamic:
r = np.array(self.results[:-1]).transpose()
else:
r = np.array(self.results).transpose()
total = r.shape[1]
# read results by type (rows)
self.y[:] = r[0,:]
if self.options.sparse_program:
self.y += r[1,:]
self.failed = np.count_nonzero(r[2,:])
self.survived = total - self.failed
self.mean_sim_cpu_time = | np.sum(r[3,:]) | numpy.sum |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
import datetime
import glob
import itertools
import json
import logging
import math
import os
import re
import warnings
import xml.etree.ElementTree as ET
from collections import defaultdict
from io import StringIO
from pathlib import Path
from typing import DefaultDict, List, Optional, Tuple, Union
import numpy as np
from monty.dev import deprecated
from monty.io import reverse_readfile, zopen
from monty.json import MSONable, jsanitize
from monty.os.path import zpath
from monty.re import regrep
from scipy.interpolate import RegularGridInterpolator
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
get_reconstructed_band_structure,
)
from pymatgen.electronic_structure.core import Magmom, Orbital, OrbitalType, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar
from pymatgen.io.wannier90 import Unk
from pymatgen.util.io_utils import clean_lines, micro_pyawk
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
if val_type == "int":
return int(val)
if val_type == "string":
return val.strip()
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
r"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains *** for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise OSError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise OSError("Error in parsing vasprun.xml")
return val
def _parse_varray(elem):
if elem.get("type", None) == "logical":
m = [[i == "T" for i in v.text.split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listdir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as np.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == "*" * len(f):
warnings.warn("Float overflow (*******) encountered in vasprun")
return np.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smallish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atomindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-array}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: projected_magnetisation
Final projected magnetisation as a numpy array with the shape (nkpoints, nbands,
natoms, norbitals, 3). Where the last axis is the contribution in the 3
cartesian directions. This attribute is only set if spin-orbit coupling
(LSORBIT = True) or non-collinear magnetism (LNONCOLLINEAR = True) is turned
on in the INCAR.
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D numpy array of shape (natoms, natoms, 3, 3).
.. attribute:: normalmode_eigenvals
Normal mode frequencies.
1D numpy array of size 3*natoms.
.. attribute:: normalmode_eigenvecs
Normal mode eigen vectors.
3D numpy array of shape (3*natoms, natoms, 3).
**Vasp inputs**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actually used, including all
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: <NAME>
"""
def __init__(
self,
filename,
ionic_step_skip=None,
ionic_step_offset=0,
parse_dos=True,
parse_eigen=True,
parse_projected_eigen=False,
parse_potcar_file=True,
occu_tol=1e-8,
separate_spins=False,
exception_on_bad_xml=True,
):
"""
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues and magnetisation. Defaults to False. Set to True to obtain
projected eigenvalues and magnetisation. **Note that this can take an
extreme amount of time and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
separate_spins (bool): Whether the band gap, CBM, and VBM should be
reported for each individual spin channel. Defaults to False,
which computes the eigenvalue band properties independent of
the spin orientation. If True, the calculation must be spin-polarized.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
"""
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.separate_spins = separate_spins
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset :: int(ionic_step_skip)]
# add the tailing information in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(preamble, to_parse, steps[-1].split("</calculation>")[-1])
else:
to_parse = f"{preamble}<calculation>{to_parse}"
self._parse(
StringIO(to_parse),
parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen,
)
else:
self._parse(
f,
parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen,
)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") not in ["CHI", "BSE"] and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.projected_magnetisation = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
if not hasattr(self, "kpoints"):
(
self.kpoints,
self.actual_kpoints,
self.actual_kpoints_weights,
) = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == "initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.append(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shielding_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
(
self.projected_eigenvalues,
self.projected_magnetisation,
) = self._parse_projected_eigen(elem)
elif tag == "dielectricfunction":
if (
"comment" not in elem.attrib
or elem.attrib["comment"] == "INVERSE MACROSCOPIC DIELECTRIC TENSOR (including "
"local field effects in RPA (Hartree))"
):
if "density" not in self.dielectric_data:
self.dielectric_data["density"] = self._parse_diel(elem)
elif "velocity" not in self.dielectric_data:
# "velocity-velocity" is also named
# "current-current" in OUTCAR
self.dielectric_data["velocity"] = self._parse_diel(elem)
else:
raise NotImplementedError("This vasprun.xml has >2 unlabelled dielectric functions")
else:
comment = elem.attrib["comment"]
# VASP 6+ has labels for the density and current
# derived dielectric constants
if comment == "density-density":
self.dielectric_data["density"] = self._parse_diel(elem)
elif comment == "current-current":
self.dielectric_data["velocity"] = self._parse_diel(elem)
else:
self.other_dielectric[comment] = self._parse_diel(elem)
elif tag == "varray" and elem.attrib.get("name") == "opticaltransitions":
self.optical_transition = np.array(_parse_varray(elem))
elif tag == "structure" and elem.attrib.get("name") == "finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = np.array(hessian)
self.force_constants = | np.zeros((natoms, natoms, 3, 3), dtype="double") | numpy.zeros |
from __future__ import division
import numpy as np
from scipy import linalg
from matplotlib import pyplot as plt
from fatiando.gravmag import sphere
from fatiando import mesher, gridder, utils
from fatiando.vis import mpl
import scipy.special
import scipy.interpolate
class GeometricElement(object):
"""
Base class for all geometric elements.
"""
def __init__(self, props):
self.props = {}
if props is not None:
for p in props:
self.props[p] = props[p]
def addprop(self, prop, value):
"""
Add a physical property to this geometric element.
If it already has the property, the given value will overwrite the
existing one.
Parameters:
* prop : str
Name of the physical property.
* value : float
The value of this physical property.
"""
self.props[prop] = value
class Ellipsoid (GeometricElement):
'''
'''
def __init__(self, xp, yp, zp, xc, yc, zc, a, b, c, alfa, delta, gamma, props):
GeometricElement.__init__(self, props)
self.xc = float(xc)
self.yc = float(yc)
self.zc = float(zc)
self.a = float(a)
self.b = float(b)
self.c = float(c)
self.alfa = float(alfa)
self.delta = float(delta)
self.gamma = float(gamma)
self.xp = (xp)
self.yp = (yp)
self.zp = (zp)
self.l1 = self.l1_v()
self.l2 = self.l2_v()
self.l3 = self.l3_v()
self.m1 = self.m1_v()
self.m2 = self.m2_v()
self.m3 = self.m3_v()
self.n1 = self.n1_v()
self.n2 = self.n2_v()
self.n3 = self.n3_v()
self.ln = np.cos(self.props['remanence'][2])*np.cos(self.props['remanence'][1])
self.mn = np.sin(self.props['remanence'][2])*np.cos(self.props['remanence'][1])
self.nn = np.sin(self.props['remanence'][1])
self.mcon = np.array([[self.l1, self.m1, self.n1],[self.l2, self.m2, self.n2],[self.l3, self.m3, self.n3]])
self.mconT = (self.mcon).T
self.k_dec = np.array([[props['k1'][2]],[props['k2'][2]],[props['k3'][2]]])
self.k_int = np.array([[props['k1'][0]],[props['k2'][0]],[props['k3'][0]]])
self.k_inc = np.array([[props['k1'][1]],[props['k2'][1]],[props['k3'][1]]])
if self.k_int[0] == self.k_int[1] and self.k_int[0] == self.k_int[2]:
self.km = self.k_matrix2 ()
else:
self.Lr = self.Lr_v ()
self.Mr = self.Mr_v ()
self.Nr = self.Nr_v ()
self.km = self.k_matrix ()
self.x1 = self.x1_e()
self.x2 = self.x2_e()
self.x3 = self.x3_e()
self.p0 = self.p0_e()
self.p1 = self.p1_e()
self.p2 = self.p2_e()
self.p = self.p_e()
self.q = self.q_e()
self.teta = self.teta_e()
self.lamb = self.lamb_e()
self.F,self.E,self.F2,self.E2,self.k,self.teta_linha = self.parametros_integrais()
self.JN = self.JN_e ()
self.N1,self.N2,self.N3 = self.N_desmag ()
def __str__(self):
"""Return a string representation of the ellipsoids."""
names = [('xc', self.xc), ('yc', self.yc), ('zc', self.zc),
('a', self.a), ('b', self.b), ('c', self.c),
('alfa', self.alfa),('delta', self.delta),('gamma', self.gamma)]
names.extend((p, self.props[p]) for p in sorted(self.props))
return ' | '.join('%s:%g' % (n, v) for n, v in names)
def l1_v (self):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
l1 = (-np.cos(self.alfa)*np.cos(self.delta))
return l1
def l2_v (self):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
l2 = (np.cos(self.alfa)*np.cos(self.gamma)*np.sin(self.delta)+np.sin(self.alfa)*np.sin(self.gamma))
return l2
def l3_v (self):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
l3 = (np.sin(self.alfa)*np.cos(self.gamma)-np.cos(self.alfa)*np.sin(self.gamma)*np.sin(self.delta))
return l3
def m1_v (self):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
m1 = (-np.sin(self.alfa)*np.cos(self.delta))
return m1
def m2_v (self):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
m2 = (np.sin(self.alfa)*np.cos(self.gamma)*np.sin(self.delta)-np.cos(self.alfa)*np.sin(self.gamma))
return m2
def m3_v (self):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
m3 = (-np.cos(self.alfa)*np.cos(self.gamma)-np.sin(self.alfa)*np.sin(self.gamma)*np.sin(self.delta))
return m3
def n1_v (self):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
n1 = (-np.sin(self.delta))
return n1
def n2_v (self):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
n2 = (-np.cos(self.gamma)*np.cos(self.delta))
return n2
def n3_v (self):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
n3 = (np.sin(self.gamma)*np.cos(self.delta))
return n3
def x1_e (self):
'''
Calculo da coordenada x no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l1,m1,n1 - Orientacao do elipsoide (eixo x)
output:
x1 - Coordenada x do elipsoide.
'''
x1 = (self.xp-self.xc)*self.l1+(self.yp-self.yc)*self.m1+(self.zp-self.zc)*self.n1
return x1
def x2_e (self):
'''
Calculo da coordenada y no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l2,m2,n2 - Orientacao do elipsoide (eixo y).
output:
x2 - Coordenada y do elipsoide.
'''
x2 = (self.xp-self.xc)*self.l2+(self.yp-self.yc)*self.m2+(self.zp-self.zc)*self.n2
return x2
def x3_e (self):
'''
Calculo da coordenada z no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l3,m3,n3 - Orientacao do elipsoide (eixo z).
output:
x3 - Coordenada z do elipsoide.
'''
x3 = (self.xp-self.xc)*self.l3+(self.yp-self.yc)*self.m3+(self.zp-self.zc)*self.n3
return x3
def Lr_v (self):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_dec - declinacoes dos vetores de susceptibilidade.
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Lr = np.zeros(3)
for i in range (3):
Lr[i] = np.cos(self.k_dec[i])*np.cos(self.k_inc[i])
return Lr
def Mr_v (self):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_dec - declinacoes dos vetores de susceptibilidade.
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Mr = np.zeros(3)
for i in range (3):
Mr[i] = np.sin(self.k_dec[i])*np.cos(self.k_inc[i])
return Mr
def Nr_v (self):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Nr = np.zeros(3)
for i in range (3):
Nr[i] = np.sin(self.k_inc[i])
return Nr
def JN_e (self):
'''
transformacao do Vetor de magnetizacao remanente para as coordenadas nos eixos do elipsoide.
'''
JN = self.props['remanence'][0]*np.ravel(np.array([[(self.ln*self.l1+self.mn*self.m1+self.nn*self.n1)], [(self.ln*self.l2+self.mn*self.m2+self.nn*self.n2)], [(self.ln*self.l3+self.mn*self.m3+self.nn*self.n3)]]))
return JN
def N_desmag (self):
'''
Fator de desmagnetizacao ao longo do eixo de revolucao (N1) e em relacao ao plano equatorial (N2).
'''
N1 = ((4.*np.pi*self.a*self.b*self.c)/((self.a**2-self.b**2)*(self.a**2-self.c**2)**0.5)) * (self.F2-self.E2)
N2 = (((4.*np.pi*self.a*self.b*self.c)*(self.a**2-self.c**2)**0.5)/((self.a**2-self.b**2)*(self.b**2-self.c**2))) * (self.E2 - ((self.b**2-self.c**2)/(self.a**2-self.c**2)) * self.F2 - ((self.c*(self.a**2-self.b**2))/(self.a*self.b*(self.a**2-self.c**2)**0.5)))
N3 = ((4.*np.pi*self.a*self.b*self.c)/((self.b**2-self.c**2)*(self.a**2-self.c**2)**0.5)) * (((self.b*(self.a**2-self.c**2)**0.5)/(self.a*self.c)) - self.E2)
return N1, N2, N3
def k_matrix (self):
'''
Matriz de tensores de susceptibilidade.
'''
l = np.array([[self.l1],[self.l2],[self.l3]])
m = np.array([[self.m1],[self.m2],[self.m3]])
n = np.array([[self.n1],[self.n2],[self.n3]])
k = np.zeros([3,3])
for i in range (3):
for j in range (3):
for r in range (3):
k[i,j] = k[i,j] + (self.k_int[r]*(self.Lr[r]*l[i] + self.Mr[r]*m[i] + self.Nr[r]*n[i])*(self.Lr[r]*l[j] + self.Mr[r]*m[j] + self.Nr[r]*n[j]))
return k
def k_matrix2 (self):
'''
Matriz de tensores de susceptibilidade.
'''
l = np.array([[self.l1],[self.l2],[self.l3]])
m = np.array([[self.m1],[self.m2],[self.m3]])
n = np.array([[self.n1],[self.n2],[self.n3]])
k = np.zeros([3,3])
for i in range (3):
for j in range (3):
for r in range (3):
k[i,j] = k[i,j] + (self.k_int[r]*(l[r]*l[i] + m[r]*m[i] + n[r]*n[i])*(l[r]*l[j] + m[r]*m[j] + n[r]*n[j]))
return k
# Calculos auxiliares
def p0_e (self):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p0 = (self.a*self.b*self.c)**2-(self.b*self.c*self.x1)**2-(self.c*self.a*self.x2)**2-(self.a*self.b*self.x3)**2
return p0
def p1_e (self):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p1 = (self.a*self.b)**2+(self.b*self.c)**2+(self.c*self.a)**2-(self.b**2+self.c**2)*self.x1**2-(self.c**2+self.a**2)*self.x2**2-(self.a**2+self.b**2)*self.x3**2
return p1
def p2_e (self):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p2 = self.a**2+self.b**2+self.c**2-self.x1**2-self.x2**2-self.x3**2
return p2
def p_e (self):
'''
Constante
input:
p1,p2 - constantes da equacao cubica
output:
p - Constante.
'''
p = self.p1-(self.p2**2)/3.
return p
def q_e (self):
'''
Constante
input:
p0,p1,p2 - constantes da equacao cubica
output:
q - Constante.
'''
q = self.p0-((self.p1*self.p2)/3.)+2*(self.p2/3.)**3
return q
def teta_e (self):
'''
Constante angular (radianos)
input:
p - constante da equacao cubica
q - constante
output:
teta - Constante.
'''
teta = np.arccos(-self.q/(2*np.sqrt((-self.p/3.)**3)))
return teta
def lamb_e (self):
'''
Maior raiz real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
lamb - Maior raiz real.
'''
lamb = 2.*((-self.p/3.)**0.5)*np.cos(self.teta/3.)-(self.p2/3.)
return lamb
def parametros_integrais(self):
'''
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
lamb - Maior raiz real da equacao cubica.
'''
k = np.zeros_like(self.lamb)
k1 = ((self.a**2-self.b**2)/(self.a**2-self.c**2))**0.5
k.fill(k1)
k2 = ((self.a**2-self.b**2)/(self.a**2-self.c**2))**0.5
teta_linha = np.arcsin(((self.a**2-self.c**2)/(self.a**2+self.lamb))**0.5)
teta_linha2 = np.arccos(self.c/self.a)
F = scipy.special.ellipkinc(teta_linha, k)
E = scipy.special.ellipeinc(teta_linha, k)
F2 = scipy.special.ellipkinc(teta_linha2, k2)
E2 = scipy.special.ellipeinc(teta_linha2, k2)
return F,E,F2,E2,k,teta_linha
def elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids):
'''
Calcula as tres componentes do campo magnetico de um elipsoide.
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
h: escalar - profundidade
alfa: escalar - azimute do elipsoide em relacao ao "a"
delta: escalar - inclinacao do elipsoide em relacao ao "a"
gamma: escalar - angulo entre o semi eixo "b" e a projecao do centro do elipsoide no plano xy
xp: matriz - malha do eixo x
yp: matriz - malha do eixo y
zp: matriz - malha do eixo z
xc: escalar - posicao x do centro do elipsoide
yc: escalar - posicao y do centro do elipsoide
J: vetor - magnetizacao do corpo
'''
# Calculo do vetor de magnetizacao resultante
lt = ln_v (dec, inc)
mt = mn_v (dec, inc)
nt = nn_v (inc)
Ft = F_e (inten,lt,mt,nt,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
JR = JR_e (ellipsoids.km,ellipsoids.JN,Ft)
JRD = JRD_e (ellipsoids.km,ellipsoids.N1,ellipsoids.N2,ellipsoids.N3,JR)
JRD_carte = (ellipsoids.mconT).dot(JRD)
JRD_ang = utils.vec2ang(JRD_carte)
#print JRD_ang
# Derivadas de lambda em relacao as posicoes
dlambx1 = dlambx1_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,ellipsoids.x1,ellipsoids.x2,ellipsoids.x3,ellipsoids.lamb)
dlambx2 = dlambx2_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,ellipsoids.x1,ellipsoids.x2,ellipsoids.x3,ellipsoids.lamb)
dlambx3 = dlambx3_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,ellipsoids.x1,ellipsoids.x2,ellipsoids.x3,ellipsoids.lamb)
#print dlambx1,dlambx2,dlambx3
# Calculo das integrais
A, B, C = integrais_elipticas(ellipsoids.a,ellipsoids.b,ellipsoids.c,ellipsoids.k,ellipsoids.teta_linha,ellipsoids.F,ellipsoids.E)
# Geometria para o calculo de B (eixo do elipsoide)
cte = cte_m (ellipsoids.a,ellipsoids.b,ellipsoids.c,ellipsoids.lamb)
V1, V2, V3 = v_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,ellipsoids.x1,ellipsoids.x2,ellipsoids.x3,ellipsoids.lamb)
# Calculo matriz geometria para B1
m11 = (cte*dlambx1*V1) - A
m12 = cte*dlambx1*V2
m13 = cte*dlambx1*V3
# Calculo matriz geometria para B2
m21 = cte*dlambx2*V1
m22 = (cte*dlambx2*V2) - B
m23 = cte*dlambx2*V3
# Calculo matriz geometria para B3
m31 = cte*dlambx3*V1
m32 = cte*dlambx3*V2
m33 = (cte*dlambx3*V3) - C
# Problema Direto (Calcular o campo externo nas coordenadas do elipsoide)
B1 = B1_e (m11,m12,m13,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
B2 = B2_e (m21,m22,m23,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
B3 = B3_e (m31,m32,m33,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
# Problema Direto (Calcular o campo externo nas coordenadas geograficas)
Bx = Bx_c (B1,B2,B3,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3)
By = By_c (B1,B2,B3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3)
Bz = Bz_c (B1,B2,B3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
return Bx,By,Bz
# Problema Direto (Calcular o campo externo e anomalia nas coordenadas geograficas no SI)
def bx_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += bx
res = res*ctemag
return res
def by_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += by
res = res*ctemag
return res
def bz_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = | np.zeros(size, dtype=np.float) | numpy.zeros |
import numpy as np
class ExpansionProcedure:
"""Function which takes a node::PartitionNode and returns a set of node obtained by partitioning the cell associated to the node.
"""
def _get_side_to_split(self, node):
side_lengths = np.abs(node.partition[:, 1] - node.partition[:, 0])
max_len = np.max(side_lengths)
return | np.argmax(side_lengths) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 10:26:50 2019
@author: joaor
"""
import numpy as np
import pandas as pd
n_instances = 400
n_time_points = 5
def generate_binomial_1(n_instances,n_time_points):
n_features=2
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_1_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_1_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_2(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.9, 1)
if data[i,2] == 0 and data[i,3] == 1:
data[i,4] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 1 and data[i,3] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+7] == 0 and data[i,t*n_features+8] == 1:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 1 and data[i,t*n_features+8] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.1, 1)
if data[i,2] == 1 and data[i,4] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 0 and data[i,4] == 1:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+7] == 1 and data[i,t*n_features+9] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 0 and data[i,t*n_features+9] == 1:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_2_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_2_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_3(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = | np.random.binomial(1, 0.1, 1) | numpy.random.binomial |
import os
import numpy as np
import seaborn as sns
import json
from scipy import stats
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
batches = [1,2,4,8,16,32,64]
model_names = [
"t5-small-lm-adapt",
"t5-base-lm-adapt",
"t5-large-lm-adapt",
"t5-xl-lm-adapt",
]
# batches = [1,2,4,8,16]
model_name = "t5-xl-lm-adapt"
model_keys = [
"S",
"M",
"L",
"XL"
]
id = "XL"
true_latency = []
pred_latency = []
def reject_outliers(data, m = 2.):
data = | np.array(data) | numpy.array |
from typing import Dict, Tuple
import numpy
import math
from HyQPy import HyQObservation
from gym_robo.utils import quaternion_to_euler
from gym_robo.robots import HyQSim
from gym_robo.tasks import HyQState
from gym.spaces import Box
class HyQTask63:
def __init__(self, robot: HyQSim, max_time_step: int = 1000,
accepted_dist_to_bounds=0.01, reach_bounds_penalty=0.0, fall_penalty=0.0,
boundary_radius: float = 0.5, out_of_boundary_penalty: float =0.0):
self.robot = robot
self._max_time_step = max_time_step
self.accepted_dist_to_bounds = accepted_dist_to_bounds
self.reach_bounds_penalty = reach_bounds_penalty
self.fall_penalty = fall_penalty
self.boundary_radius = boundary_radius
self.out_of_boundary_penalty = out_of_boundary_penalty
print(f'-------------------------------Setting task parameters-------------------------------')
print('max_time_step: %8d # Maximum time step before stopping the episode' % self._max_time_step)
print('accepted_dist_to_bounds: %8.7f # Allowable distance to joint limits (radians)' % self.accepted_dist_to_bounds)
print('reach_bounds_penalty: %8.7f # Reward penalty when reaching joint limit' % self.reach_bounds_penalty)
print('fall_penalty: %8.7f # Reward penalty for falling' % self.fall_penalty)
print(f'boundary_radius: {self.boundary_radius} # Radius of boundary that the robot is required to stay in')
print(f'out_of_boundary_penalty: {self.out_of_boundary_penalty} # Penalty given when the robot centre moved outside of the specified boundary')
print(f'-------------------------------------------------------------------------------------')
assert self.accepted_dist_to_bounds >= 0.0, 'Allowable distance to joint limits should be positive'
assert self.fall_penalty >= 0.0, 'Contact penalty should be positive'
assert self.reach_bounds_penalty >= 0.0, 'Reach bounds penalty should be positive'
assert self.out_of_boundary_penalty >= 0.0, 'Out of boundary penalty should be positive'
assert self.boundary_radius >= 0.01, 'Boundary radius too small, expected to be at least greater than 0.01'
self._max_time_step = max_time_step
obs = self.robot.get_observations()
self.initial_coords = numpy.array([obs.pose.position.x, obs.pose.position.y, obs.pose.position.z])
self.previous_coords = numpy.array([obs.pose.position.x, obs.pose.position.y, obs.pose.position.z])
self.previous_action = numpy.zeros((12,))
self.sum_action_deviation: float = 0.0
self.__reset_count: int = 0
self.__reach_count: int = 0
self.__ep_step_num: int = 0
self.__cycle_len: int = 0
self.__gait_step: int = 0
self.__gait_stepC: int = 0
self.__total_step_num: int = 0
self.c: int = 0
self.a_real=0
self.a_noise=0
self.__robot_obs_space = self.robot.get_observation_space()
def is_done(self, obs: HyQObservation) -> Tuple[bool, Dict]:
failed, state = self.__is_failed(obs)
info_dict = {'state': state}
if state != HyQState.Undefined: # Undefined is basically not reaching any of the failure conditions
return failed, info_dict
info_dict['state'] = HyQState.InProgress
return False, info_dict
def compute_reward(self, obs: HyQObservation, state: HyQState, *args) -> Tuple[float, Dict]:
current_coords = numpy.array([obs.pose.position.x, obs.pose.position.y, obs.pose.position.z])
assert state != HyQState.Undefined, f'State cannot be undefined, please check logic'
reward = 0.0
reward_info = {'current_coords': current_coords}
x = abs(current_coords[0] - self.previous_coords[0])
y = abs(current_coords[1] - self.previous_coords[1])
z = abs(current_coords[2] - self.previous_coords[2])
reward = 1.2 - 300*x - 300*y - 300*z
self.previous_coords = current_coords
# Scaling reward penalties
total_penalty_factor = self.__calc_rew_penalty_scale(obs.pose, reward_info)
if reward > 0.0:
reward *= total_penalty_factor
reward_info['standing_reward'] = reward
reward_info["a_real"] = self.a_real
reward_info["a_noise"] = self.a_noise
# Checking Gait
self.__gait_step = (self.__ep_step_num - 1) % self.__cycle_len # gait_step in % but at 100% timing might have a bit issues
self.__gait_stepC = (self.__ep_step_num - 1) % (self.__cycle_len * 2)
self.__gait_stepC = (self.__gait_stepC / (self.__cycle_len * 2)) * 100
num_contact = int(obs.lf_foot_contact) + int(obs.lh_foot_contact) + int(obs.rf_foot_contact) + int(obs.rh_foot_contact)
reward_for_correct_contact = 0.20
if 0 <= self.__gait_stepC <= 10 or 50 < self.__gait_stepC <= 60:
reward += ((reward_for_correct_contact * num_contact) - (0.005*sum(obs.applied_joint_energies)))
elif 10 < self.__gait_stepC <= 50:
if obs.lf_foot_contact: reward += (reward_for_correct_contact - 0.02*sum(obs.applied_joint_energies[0:3])) # else: reward -=1
if obs.rh_foot_contact: reward += (reward_for_correct_contact - 0.02*sum(obs.applied_joint_energies[9:])) # else: reward -=1
if not obs.lh_foot_contact: reward += reward_for_correct_contact # else: reward -=1
if not obs.rf_foot_contact: reward += reward_for_correct_contact # else: reward -=1
elif 60 < self.__gait_stepC <= 100:
if obs.lh_foot_contact: reward += (reward_for_correct_contact - 0.02*sum(obs.applied_joint_energies[3:6])) # else: reward -=1
if obs.rf_foot_contact: reward += (reward_for_correct_contact - 0.02*sum(obs.applied_joint_energies[6:9])) # else: reward -=1
if not obs.lf_foot_contact: reward += reward_for_correct_contact # else: reward -=1
if not obs.rh_foot_contact: reward += reward_for_correct_contact # else: reward -=1
# Check if it has approached any joint limits
if state == HyQState.ApproachJointLimits:
reward -= self.reach_bounds_penalty
# Check if it went out of bounds
if state == HyQState.OutOfBounds:
reward -= self.out_of_boundary_penalty
# Check for fall
if state == HyQState.Fallen:
reward -= self.fall_penalty
act_dev = 0.4 * self.sum_action_deviation
reward_info["act_dev"] = act_dev
reward -= act_dev
ang_vel = 0.5 * (abs(obs.angular_velocity.x) + abs(obs.angular_velocity.y) + abs(obs.angular_velocity.z))
reward_info["ang_vel"] = ang_vel
reward -= ang_vel
reward_info["reward"] = reward
return reward, reward_info
def get_obs_noise_action(self, a_real, a_noise):
action_deviation = abs(numpy.subtract(a_real, a_noise))
self.a_real=a_real
self.a_noise=a_noise
self.sum_action_deviation = sum(action_deviation)
def set_action(self, action: numpy.ndarray) -> None:
b = 5
true_action = numpy.zeros((12,))
Ttrue_action = numpy.zeros((12,))
true_action[0] = -0.14 + action[0]
#true_action[0] = (0.10 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) - 0.10) + action[0]
Ttrue_action[1] = 0.81611
Ttrue_action[2] = -1.43081
true_action[3] = -0.14 + action[3]
#true_action[3] = (0.10 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) - 0.10) + action[3]
Ttrue_action[4] = (-0.10 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) - 0.90)
Ttrue_action[5] = (0.4 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) + 1.8)
true_action[6] = -0.14 + action[6]
#true_action[6] = (0.10 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) - 0.10) + action[6]
Ttrue_action[7] = (0.10 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) + 0.90)
Ttrue_action[8] = (-0.4 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) - 1.8)
true_action[9] = -0.14 + action[9]
#true_action[9] = (0.10 * math.sin((self.__ep_step_num - (b * 0.5 * math.pi)) / b) - 0.10) + action[9]
Ttrue_action[10] = -0.82278
Ttrue_action[11] = 1.453150
if self.c % 2 == 0:
true_action[1] = Ttrue_action[1] + action[1]
true_action[2] = Ttrue_action[2] + action[2]
true_action[4] = Ttrue_action[4] + action[4]
true_action[5] = Ttrue_action[5] + action[5]
true_action[7] = Ttrue_action[7] + action[7]
true_action[8] = Ttrue_action[8] + action[8]
true_action[10] = Ttrue_action[10] + action[10]
true_action[11] = Ttrue_action[11] + action[11]
else:
true_action[1] = Ttrue_action[7] + action[1]
true_action[2] = Ttrue_action[8] + action[2]
true_action[4] = Ttrue_action[10] + action[4]
true_action[5] = Ttrue_action[11] + action[5]
true_action[7] = Ttrue_action[1] + action[7]
true_action[8] = Ttrue_action[2] + action[8]
true_action[10] = Ttrue_action[4] + action[10]
true_action[11] = Ttrue_action[5] + action[11]
#print (self.__gait_step1)
if self.__gait_step == 0:
self.c += 1
self.__ep_step_num += 1
self.__cycle_len = int(b * 2 * math.pi)
self.robot.set_action(true_action)
return {}
def reset(self):
obs = self.robot.get_observations()
self.initial_coords = numpy.array([obs.pose.position.x, obs.pose.position.y, obs.pose.position.z])
self.previous_coords = numpy.array([obs.pose.position.x, obs.pose.position.y, obs.pose.position.z])
self.sum_action_deviation = 0.0
self.__reset_count += 1
self.__total_step_num += self.__ep_step_num
self.__ep_step_num = 0
self.__gait_step = 0
self.__gait_stepC = 0
self.c = 0
def get_observations(self, obs_data_struct: HyQObservation):
np_obs = HyQSim.convert_obs_to_numpy(obs_data_struct)
add_observation = numpy.append(self.previous_action, self.__gait_stepC)
return numpy.append(np_obs, add_observation), {}
def get_observation_space(self):
robot_obs_space: gym.spaces.Box = self.robot.get_observation_space()
add_low = [-0.2]*12 + [0.0]
add_high = [0.2]*12 + [100.0]
new_low = numpy.append(robot_obs_space.low, add_low)
new_high = numpy.append(robot_obs_space.high, add_high)
return Box(new_low, new_high)
def get_action_space(self):
return Box(-0.2, 0.2, (12,))
def __is_failed(self, obs: HyQObservation) -> Tuple[bool, HyQState]:
info_dict = {'state': HyQState.Undefined}
# Check if time step exceeds limits, i.e. timed out
# Time step starts from 1, that means if we only want to run 2 steps time_step will be 1,2
if self.__ep_step_num >= self._max_time_step:
return True, HyQState.Timeout
if obs.trunk_contact:
return True, HyQState.Fallen
# Check for out of bounds
current_x = obs.pose.position.x
current_y = obs.pose.position.y
current_coords_2d = numpy.array([current_x, current_y])
#initial_coords_2d = numpy.array([0.0, 0.0])
initial_coords_2d = numpy.array([self.initial_coords[0], self.initial_coords[1]])
dist_from_origin = numpy.linalg.norm(current_coords_2d - initial_coords_2d)
if dist_from_origin > self.boundary_radius:
return True, HyQState.OutOfBounds
# Check that joint values are not approaching limits
joint_angles = | numpy.array(obs.joint_positions) | numpy.array |
import numpy as np
### from https://github.com/rflamary/POT/blob/master/ot/bregman.py ###
def sinkhorn_knopp(a, b, M, reg, numItermax=1000,
stopThr=1e-9, verbose=False, log=False, **kwargs):
r"""
Solve the entropic regularization optimal transport problem and return the OT matrix
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (dim_a, dim_b) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (histograms, both sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [2]_
Parameters
----------
a : ndarray, shape (dim_a,)
samples weights in the source domain
b : ndarray, shape (dim_b,) or ndarray, shape (dim_b, n_hists)
samples in the target domain, compute sinkhorn with multiple targets
and fixed M if b is a matrix (return OT loss + dual variables in log)
M : ndarray, shape (dim_a, dim_b)
loss matrix
reg : float
Regularization term >0
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : ndarray, shape (dim_a, dim_b)
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> a=[.5, .5]
>>> b=[.5, .5]
>>> M=[[0., 1.], [1., 0.]]
>>> ot.sinkhorn(a, b, M, 1)
array([[0.36552929, 0.13447071],
[0.13447071, 0.36552929]])
References
----------
.. [2] <NAME>, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
ot.lp.emd : Unregularized OT
ot.optim.cg : General regularized OT
"""
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
if len(a) == 0:
a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0]
if len(b) == 0:
b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1]
# init data
dim_a = len(a)
dim_b = len(b)
if len(b.shape) > 1:
n_hists = b.shape[1]
else:
n_hists = 0
if log:
log = {'err': []}
# we assume that no distances are null except those of the diagonal of
# distances
if n_hists:
u = np.ones((dim_a, n_hists)) / dim_a
v = np.ones((dim_b, n_hists)) / dim_b
else:
u = np.ones(dim_a) / dim_a
v = np.ones(dim_b) / dim_b
# print(reg)
# Next 3 lines equivalent to K= np.exp(-M/reg), but faster to compute
K = np.empty(M.shape, dtype=M.dtype)
np.divide(M, -reg, out=K)
np.exp(K, out=K)
# print(np.min(K))
tmp2 = np.empty(b.shape, dtype=M.dtype)
Kp = (1 / (a+ 1e-299)).reshape(-1, 1) * K
cpt = 0
err = 1
while (err > stopThr and cpt < numItermax):
uprev = u
vprev = v
KtransposeU = np.dot(K.T, u)
v = np.divide(b, (KtransposeU+ 1e-299))
u = 1. / (np.dot(Kp, v)+ 1e-299)
if (np.any(KtransposeU == 0)
or np.any(np.isnan(u)) or np.any(np.isnan(v))
or np.any(np.isinf(u)) or np.any(np.isinf(v))):
# we have reached the machine precision
# come back to previous solution and quit loop
print('Warning: numerical errors at iteration', cpt)
u = uprev
v = vprev
break
if cpt % 10 == 0:
# we can speed up the process by checking for the error only all
# the 10th iterations
if n_hists:
np.einsum('ik,ij,jk->jk', u, K, v, out=tmp2)
else:
# compute right marginal tmp2= (diag(u)Kdiag(v))^T1
np.einsum('i,ij,j->j', u, K, v, out=tmp2)
err = | np.linalg.norm(tmp2 - b) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import pytest
import numpy as np
from copy import deepcopy
import warnings
import os
import sys
import shutil
from hera_sim.antpos import linear_array, hex_array
from hera_sim.vis import sim_red_data
from hera_sim.sigchain import gen_gains
from .. import redcal as om
from .. import io, abscal
from ..utils import split_pol, conj_pol, split_bl
from ..apply_cal import calibrate_in_place
from ..data import DATA_PATH
from ..datacontainer import DataContainer
np.random.seed(0)
class TestMethods(object):
def test_check_polLists_minV(self):
polLists = [['xy']]
assert not om._check_polLists_minV(polLists)
polLists = [['xx', 'xy']]
assert not om._check_polLists_minV(polLists)
polLists = [['xx', 'xy', 'yx']]
assert not om._check_polLists_minV(polLists)
polLists = [['xy', 'yx'], ['xx'], ['yy'], ['xx'], ['yx', 'xy'], ['yy']]
assert om._check_polLists_minV(polLists)
def test_parse_pol_mode(self):
reds = [[(0, 1, 'xx')]]
assert om.parse_pol_mode(reds) == '1pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '2pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy')], [(0, 1, 'yx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '4pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '4pol_minV'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yx')], [(0, 1, 'LR')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xy')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yy')], [(0, 1, 'yx')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
def test_get_pos_red(self):
pos = hex_array(3, sep=14.6, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) == 30
pos = hex_array(7, sep=14.6, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) == 234
for ant, r in pos.items():
pos[ant] += [0, 0, 1 * r[0] - .5 * r[1]]
assert len(om.get_pos_reds(pos)) == 234
pos = hex_array(7, sep=1, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) < 234
assert len(om.get_pos_reds(pos, bl_error_tol=.1)) == 234
pos = hex_array(7, sep=14.6, split_core=False, outriggers=0)
blerror = 1.0 - 1e-12
error = blerror / 4
for key, val in pos.items():
th = np.random.choice([0, np.pi / 2, np.pi])
phi = np.random.choice([0, np.pi / 2, np.pi, 3 * np.pi / 2])
pos[key] = val + error * np.array([np.sin(th) * np.cos(phi), np.sin(th) * np.sin(phi), np.cos(th)])
assert len(om.get_pos_reds(pos, bl_error_tol=1.0)) == 234
assert len(om.get_pos_reds(pos, bl_error_tol=.99)) > 234
pos = {0: np.array([0, 0, 0]), 1: np.array([20, 0, 0]), 2: np.array([10, 0, 0])}
assert om.get_pos_reds(pos) == [[(0, 2), (2, 1)], [(0, 1)]]
# test branch cut
pos = {0: np.array([-.03, 1., 0.]),
1: np.array([1., 1., 0.]),
2: np.array([0.03, 0.0, 0.]),
3: np.array([1., 0., 0.])}
assert len(om.get_pos_reds(pos, bl_error_tol=.1)) == 4
def test_filter_reds(self):
antpos = linear_array(7)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
# exclude ants
red = om.filter_reds(reds, ex_ants=[0, 4])
assert red == [[(1, 2, 'xx'), (2, 3, 'xx'), (5, 6, 'xx')], [(1, 3, 'xx'), (3, 5, 'xx')], [(2, 5, 'xx'), (3, 6, 'xx')],
[(1, 5, 'xx'), (2, 6, 'xx')], [(1, 6, 'xx')]]
# include ants
red = om.filter_reds(reds, ants=[0, 1, 4, 5, 6])
assert red == [[(0, 1, 'xx'), (4, 5, 'xx'), (5, 6, 'xx')], [(4, 6, 'xx')], [(1, 4, 'xx')], [(0, 4, 'xx'), (1, 5, 'xx')],
[(0, 5, 'xx'), (1, 6, 'xx')], [(0, 6, 'xx')]]
# exclued bls
red = om.filter_reds(reds, ex_bls=[(0, 2), (1, 2), (0, 6)])
assert red == [[(0, 1, 'xx'), (2, 3, 'xx'), (3, 4, 'xx'), (4, 5, 'xx'), (5, 6, 'xx')],
[(1, 3, 'xx'), (2, 4, 'xx'), (3, 5, 'xx'), (4, 6, 'xx')], [(0, 3, 'xx'), (1, 4, 'xx'), (2, 5, 'xx'), (3, 6, 'xx')],
[(0, 4, 'xx'), (1, 5, 'xx'), (2, 6, 'xx')], [(0, 5, 'xx'), (1, 6, 'xx')]]
# include bls
red = om.filter_reds(reds, bls=[(0, 1), (1, 2)])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx')]]
# include ubls
red = om.filter_reds(reds, ubls=[(0, 2), (1, 4)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx'), (2, 4, 'xx'), (3, 5, 'xx'), (4, 6, 'xx')],
[(0, 3, 'xx'), (1, 4, 'xx'), (2, 5, 'xx'), (3, 6, 'xx')]]
# exclude ubls
red = om.filter_reds(reds, ex_ubls=[(0, 2), (1, 4), (4, 5), (0, 5), (2, 3), (0, 6)])
assert red == [[(0, 4, 'xx'), (1, 5, 'xx'), (2, 6, 'xx')]]
# exclude crosspols
# reds = omni.filter_reds(self.info.get_reds(), ex_crosspols=()
def test_filter_reds_2pol(self):
antpos = linear_array(4)
reds = om.get_reds(antpos, pols=['xx', 'yy'], pol_mode='1pol')
# include pols
red = om.filter_reds(reds, pols=['xx'])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 3, 'xx')]]
# exclude pols
red = om.filter_reds(reds, ex_pols=['yy'])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 3, 'xx')]]
# exclude ants
red = om.filter_reds(reds, ex_ants=[0])
assert red == [[(1, 2, 'xx'), (2, 3, 'xx')], [(1, 3, 'xx')], [(1, 2, 'yy'), (2, 3, 'yy')], [(1, 3, 'yy')]]
# include ants
red = om.filter_reds(reds, ants=[1, 2, 3])
red = om.filter_reds(reds, ex_ants=[0])
# exclued bls
red = om.filter_reds(reds, ex_bls=[(1, 2), (0, 3)])
assert red == [[(0, 1, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 1, 'yy'), (2, 3, 'yy')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# include bls
red = om.filter_reds(reds, bls=[(0, 1), (1, 2)])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx')], [(0, 1, 'yy'), (1, 2, 'yy')]]
# include ubls
red = om.filter_reds(reds, ubls=[(0, 2)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# exclude ubls
red = om.filter_reds(reds, ex_ubls=[(2, 3), (0, 3)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# test baseline length min and max cutoffs
antpos = hex_array(4, sep=14.6, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
assert om.filter_reds(reds, antpos=antpos, min_bl_cut=85) == reds[-3:]
assert om.filter_reds(reds, antpos=antpos, max_bl_cut=15) == reds[:3]
def test_filter_reds_max_dim(self):
# build hex array with 4 on a side and 7 total rows
antpos = hex_array(4, split_core=False, outriggers=0)
antpos[37] = np.array([np.pi, np.pi, 0]) # add one off-grid antenna
reds = om.get_reds(antpos)
# remove third, fourth, fifth, and sixth rows
reds = om.filter_reds(reds, ex_ants=list(range(9, 33)))
# Max 1 dimension means largest 1D array
new_reds = om.filter_reds(reds, max_dims=1)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == set(range(4, 9))
# Max 2 dimensions means only rows 1 and 2
new_reds = om.filter_reds(reds, max_dims=2)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == set(range(0, 9))
# Max 3 dimensions means all 3 good rows, but keeps out the off-grid antenna
new_reds = om.filter_reds(reds, max_dims=3)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == (set(range(0, 9)) | set(range(33, 37)))
def test_add_pol_reds(self):
reds = [[(1, 2)]]
polReds = om.add_pol_reds(reds, pols=['xx'], pol_mode='1pol')
assert polReds == [[(1, 2, 'xx')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'yy'], pol_mode='2pol')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'yy')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'xy', 'yx', 'yy'], pol_mode='4pol')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'xy')], [(1, 2, 'yx')], [(1, 2, 'yy')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'xy', 'yx', 'yy'], pol_mode='4pol_minV')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'xy'), (1, 2, 'yx')], [(1, 2, 'yy')]]
def test_reds_to_antpos(self):
# Test 1D
true_antpos = linear_array(10)
reds = om.get_reds(true_antpos, pols=['xx', 'yy'], pol_mode='2pol', bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds,)
for pos in inferred_antpos.values():
assert len(pos) == 1
new_reds = om.get_reds(inferred_antpos, pols=['xx', 'yy'], pol_mode='2pol', bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D
true_antpos = hex_array(5, split_core=False, outriggers=0)
reds = om.get_reds(true_antpos, pols=['xx'], pol_mode='1pol', bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 2
new_reds = om.get_reds(inferred_antpos, pols=['xx'], pol_mode='1pol', bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D with split
true_antpos = hex_array(5, split_core=True, outriggers=0)
reds = om.get_pos_reds(true_antpos, bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 2
new_reds = om.get_pos_reds(inferred_antpos, bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D with additional degeneracy
true_antpos = {0: [0, 0], 1: [1, 0], 2: [0, 1], 3: [1, 1],
4: [100, 100], 5: [101, 100], 6: [100, 101], 7: [101, 101]}
reds = om.get_pos_reds(true_antpos, bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 3
new_reds = om.get_pos_reds(inferred_antpos, bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
def test_find_polarity_flipped_ants(self):
# test normal operation
antpos = hex_array(3, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['ee'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(.1, .2, 100)
ants = [(ant, 'Jee') for ant in antpos]
gains = gen_gains(freqs, ants)
for ant in [3, 10, 11]:
gains[ant, 'Jee'] *= -1
_, true_vis, data = sim_red_data(reds, gains=gains, shape=(2, len(freqs)))
meta, g_fc = rc.firstcal(data, freqs)
for ant in antpos:
if ant in [3, 10, 11]:
assert np.all(meta['polarity_flips'][ant, 'Jee'])
else:
assert not np.any(meta['polarity_flips'][ant, 'Jee'])
# test operation where no good answer is possible, so we expect it to fail
data[(0, 1, 'ee')] *= -1
meta, g_fc = rc.firstcal(data, freqs)
for ant in meta['polarity_flips']:
assert np.all([m is None for m in meta['polarity_flips'][ant]])
# test errors
with pytest.raises(ValueError):
om._build_polarity_baseline_groups(data, reds, edge_cut=100)
with pytest.raises(ValueError):
om._build_polarity_baseline_groups(data, reds, max_rel_angle=np.pi)
class TestRedundantCalibrator(object):
def test_init(self):
# test a very small array
pos = hex_array(3, split_core=False, outriggers=0)
pos = {ant: pos[ant] for ant in range(4)}
reds = om.get_reds(pos)
rc = om.RedundantCalibrator(reds)
with pytest.raises(ValueError):
rc = om.RedundantCalibrator(reds, check_redundancy=True)
# test disconnected redundant array
pos = hex_array(5, split_core=False, outriggers=0)
pos = {ant: pos[ant] for ant in pos if ant in [0, 1, 5, 6, 54, 55, 59, 60]}
reds = om.get_reds(pos)
try:
rc = om.RedundantCalibrator(reds, check_redundancy=True)
except ValueError:
assert False, 'This array is actually redundant, so check_redundancy should not raise a ValueError.'
def test_build_eq(self):
antpos = linear_array(3)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3
assert eqs['g_0_Jxx * g_1_Jxx_ * u_0_xx'] == (0, 1, 'xx')
assert eqs['g_1_Jxx * g_2_Jxx_ * u_0_xx'] == (1, 2, 'xx')
assert eqs['g_0_Jxx * g_2_Jxx_ * u_1_xx'] == (0, 2, 'xx')
reds = om.get_reds(antpos, pols=['xx', 'yy', 'xy', 'yx'], pol_mode='4pol')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3 * 4
assert eqs['g_0_Jxx * g_1_Jyy_ * u_4_xy'] == (0, 1, 'xy')
assert eqs['g_1_Jxx * g_2_Jyy_ * u_4_xy'] == (1, 2, 'xy')
assert eqs['g_0_Jxx * g_2_Jyy_ * u_5_xy'] == (0, 2, 'xy')
assert eqs['g_0_Jyy * g_1_Jxx_ * u_6_yx'] == (0, 1, 'yx')
assert eqs['g_1_Jyy * g_2_Jxx_ * u_6_yx'] == (1, 2, 'yx')
assert eqs['g_0_Jyy * g_2_Jxx_ * u_7_yx'] == (0, 2, 'yx')
reds = om.get_reds(antpos, pols=['xx', 'yy', 'xy', 'yx'], pol_mode='4pol_minV')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3 * 4
assert eqs['g_0_Jxx * g_1_Jyy_ * u_4_xy'] == (0, 1, 'xy')
assert eqs['g_1_Jxx * g_2_Jyy_ * u_4_xy'] == (1, 2, 'xy')
assert eqs['g_0_Jxx * g_2_Jyy_ * u_5_xy'] == (0, 2, 'xy')
assert eqs['g_0_Jyy * g_1_Jxx_ * u_4_xy'] == (0, 1, 'yx')
assert eqs['g_1_Jyy * g_2_Jxx_ * u_4_xy'] == (1, 2, 'yx')
assert eqs['g_0_Jyy * g_2_Jxx_ * u_5_xy'] == (0, 2, 'yx')
with pytest.raises(KeyError):
info.build_eqs({})
def test_solver(self):
antpos = linear_array(3)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds)
w = {}
w = dict([(k, 1.) for k in d.keys()])
def solver(data, wgts, **kwargs):
np.testing.assert_equal(data['g_0_Jxx * g_1_Jxx_ * u_0_xx'], d[0, 1, 'xx'])
np.testing.assert_equal(data['g_1_Jxx * g_2_Jxx_ * u_0_xx'], d[1, 2, 'xx'])
np.testing.assert_equal(data['g_0_Jxx * g_2_Jxx_ * u_1_xx'], d[0, 2, 'xx'])
if len(wgts) == 0:
return
np.testing.assert_equal(wgts['g_0_Jxx * g_1_Jxx_ * u_0_xx'], w[0, 1, 'xx'])
np.testing.assert_equal(wgts['g_1_Jxx * g_2_Jxx_ * u_0_xx'], w[1, 2, 'xx'])
np.testing.assert_equal(wgts['g_0_Jxx * g_2_Jxx_ * u_1_xx'], w[0, 2, 'xx'])
return
info._solver(solver, d)
info._solver(solver, d, w)
def test_firstcal_iteration(self):
NANTS = 18
NFREQ = 64
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
fqs = np.linspace(.1, .2, NFREQ)
g, true_vis, d = sim_red_data(reds, shape=(1, NFREQ), gain_scatter=0)
delays = {k: np.random.randn() * 30 for k in g.keys()} # in ns
fc_gains = {k: np.exp(2j * np.pi * v * fqs) for k, v in delays.items()}
delays = {k: np.array([[v]]) for k, v in delays.items()}
fc_gains = {i: v.reshape(1, NFREQ) for i, v in fc_gains.items()}
gains = {k: v * fc_gains[k] for k, v in g.items()}
gains = {k: v.astype(np.complex64) for k, v in gains.items()}
calibrate_in_place(d, gains, old_gains=g, gain_convention='multiply')
d = {k: v.astype(np.complex64) for k, v in d.items()}
dly_sol, off_sol = info._firstcal_iteration(d, df=fqs[1] - fqs[0], f0=fqs[0], medfilt=False)
sol_degen = info.remove_degen_gains(dly_sol, degen_gains=delays, mode='phase')
for i in range(NANTS):
assert dly_sol[(i, 'Jxx')].dtype == np.float64
assert dly_sol[(i, 'Jxx')].shape == (1, 1)
assert np.allclose(np.round(sol_degen[(i, 'Jxx')] - delays[(i, 'Jxx')], 0), 0)
def test_firstcal(self):
np.random.seed(21)
antpos = hex_array(2, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(1e8, 2e8, 1024)
# test firstcal where the degeneracies of the phases and delays have already been removed so no abscal is necessary
gains, true_vis, d = sim_red_data(reds, gain_scatter=0, shape=(2, len(freqs)))
fc_delays = {ant: [[100e-9 * np.random.randn()]] for ant in gains.keys()} # in s
fc_delays = rc.remove_degen_gains(fc_delays)
fc_offsets = {ant: [[.49 * np.pi * (np.random.rand() > .90)]] for ant in gains.keys()} # the .49 removes the possibly of phase wraps that need abscal
fc_offsets = rc.remove_degen_gains(fc_offsets)
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay - 1.0j * fc_offsets[ant]), (1, len(freqs)))
for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
meta, g_fc = rc.firstcal(d, freqs, conv_crit=0)
np.testing.assert_array_almost_equal(np.linalg.norm([g_fc[ant] - gains[ant] for ant in g_fc]), 0, decimal=3)
# test firstcal with only phases (no delays)
gains, true_vis, d = sim_red_data(reds, gain_scatter=0, shape=(2, len(freqs)))
fc_delays = {ant: [[0 * np.random.randn()]] for ant in gains.keys()} # in s
fc_offsets = {ant: [[.49 * np.pi * (np.random.rand() > .90)]] for ant in gains.keys()} # the .49 removes the possibly of phase wraps that need abscal
fc_offsets = rc.remove_degen_gains(fc_offsets)
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay - 1.0j * fc_offsets[ant]), (1, len(freqs)))
for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
meta, g_fc = rc.firstcal(d, freqs, conv_crit=0)
np.testing.assert_array_almost_equal(np.linalg.norm([g_fc[ant] - gains[ant] for ant in g_fc]), 0, decimal=10) # much higher precision
def test_logcal(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.05)
w = dict([(k, 1.) for k in d.keys()])
meta, sol = info.logcal(d)
for i in range(NANTS):
assert sol[(i, 'Jxx')].shape == (10, 10)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.shape == (10, 10)
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=10)
for k in d.keys():
d[k] = np.zeros_like(d[k])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
meta, sol = info.logcal(d)
om.make_sol_finite(sol)
for red in reds:
np.testing.assert_array_equal(sol[red[0]], 0.0)
for ant in gains.keys():
np.testing.assert_array_equal(sol[ant], 1.0)
def test_omnical(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
meta, sol = info.omnical(d, sol0, conv_crit=1e-12, gain=.5, maxiter=500, check_after=30, check_every=6)
for i in range(NANTS):
assert sol[(i, 'Jxx')].shape == (10, 10)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.shape == (10, 10)
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=10)
def test_omnical64(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
d = {k: v.astype(np.complex64) for k, v in d.items()}
sol0 = {k: v.astype(np.complex64) for k, v in sol0.items()}
meta, sol = info.omnical(d, sol0, gain=.5, maxiter=500, check_after=30, check_every=6)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.dtype == np.complex64
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=6)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=6)
def test_omnical128(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
d = {k: v.astype(np.complex128) for k, v in d.items()}
sol0 = {k: v.astype(np.complex128) for k, v in sol0.items()}
meta, sol = info.omnical(d, sol0, conv_crit=1e-12, gain=.5, maxiter=500, check_after=30, check_every=6)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.dtype == np.complex128
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=10)
def test_lincal(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
meta, sol = info.lincal(d, sol0)
for i in range(NANTS):
assert sol[(i, 'Jxx')].shape == (10, 10)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.shape == (10, 10)
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
assert np.allclose(np.abs(d_bl), np.abs(mdl), atol=1e-10)
assert np.allclose(np.angle(d_bl * mdl.conj()), 0, atol=1e-10)
def test_lincal64(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
d = {k: v.astype(np.complex64) for k, v in d.items()}
sol0 = {k: v.astype(np.complex64) for k, v in sol0.items()}
meta, sol = info.lincal(d, sol0, maxiter=12, conv_crit=1e-6)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.dtype == np.complex64
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=6)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=6)
def test_lincal128(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
d = {k: v.astype(np.complex128) for k, v in d.items()}
sol0 = {k: v.astype(np.complex128) for k, v in sol0.items()}
meta, sol = info.lincal(d, sol0, maxiter=12)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.dtype == np.complex128
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=10)
def test_svd_convergence(self):
for hexnum in (2, 3, 4):
for dtype in (np.complex64, np.complex128):
antpos = hex_array(hexnum, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
gains, _, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.01)
d = {k: dk.astype(dtype) for k, dk in d.items()}
w = {k: 1. for k in d.keys()}
gains = {k: gk.astype(dtype) for k, gk in gains.items()}
sol0 = {k: np.ones_like(gk) for k, gk in gains.items()}
sol0.update(rc.compute_ubls(d, sol0))
meta, sol = rc.lincal(d, sol0) # should not raise 'np.linalg.linalg.LinAlgError: SVD did not converge'
def test_remove_degen_firstcal_1D(self):
pol = 'xx'
xhat = np.array([1., 0, 0])
dtau_dx = 10.
antpos = linear_array(10)
reds = om.get_reds(antpos, pols=[pol], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
# put in a linear slope in delays, see that it is taken out
true_dlys = {(i, split_pol(pol)[0]): np.array([[np.dot(xhat, antpos[i]) * dtau_dx]]) for i in range(len(antpos))}
dlys = rc.remove_degen_gains(true_dlys, mode='phase')
for k in dlys:
np.testing.assert_almost_equal(dlys[k], 0, decimal=10)
dlys = rc.remove_degen_gains(true_dlys, degen_gains=true_dlys, mode='phase')
for k in dlys:
np.testing.assert_almost_equal(dlys[k], true_dlys[k], decimal=10)
def test_remove_degen_firstcal_2D(self):
pol = 'xx'
xhat = np.array([1., 0, 0])
yhat = np.array([0., 1, 0])
dtau_dx = 10.
dtau_dy = -5.
antpos = hex_array(5, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=[pol], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
# put in a linear slope in delays, see that it is taken out
true_dlys = {(i, split_pol(pol)[0]):
np.array([[np.dot(xhat, antpos[i]) * dtau_dx + np.dot(yhat, antpos[i]) * dtau_dy]])
for i in range(len(antpos))}
dlys = rc.remove_degen_gains(true_dlys, mode='phase')
for k in dlys:
| np.testing.assert_almost_equal(dlys[k], 0, decimal=10) | numpy.testing.assert_almost_equal |
"""
@Author: <NAME>, <NAME>, University of Colorado Boulder
The vector and contour map plots are implemented for initial investigation.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import TwoSlopeNorm, Normalize, SymLogNorm, NoNorm, LogNorm #, CenteredNorm
from .config import CONFIG
from itertools import chain
from matplotlib.path import Path
from pyspedas.secs import read_data_files
from datetime import datetime, timedelta
from pyspedas.utilities.dailynames import dailynames
from mpl_toolkits.basemap.solar import daynight_terminator
import logging
#os.environ['PROJ_LIB'] = '/Users/cao/anaconda3/envs/secs/share/proj'
#if an error about "dedent" occurs, downgrade the matplotlib version to 3.2.0 by using "pip install -U matplotlib==3.2"
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
logging.info('Error importing Basemap; installation instructions can be found at: https://matplotlib.org/basemap/users/installing.html or https://anaconda.org/anaconda/basemap')
class CenteredNorm(Normalize):
"""
This function is directly contributed from the latest matplotlib version,
but not included in the 3.2.0 version, so is duplicated here.
"""
def __init__(self, vcenter=0, halfrange=None, clip=False):
"""
Normalize symmetrical data around a center (0 by default).
Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change
around the center.
Useful when mapping symmetrical data around a conceptual center
e.g., data that range from -2 to 4, with 0 as the midpoint, and
with equal rates of change around that midpoint.
Parameters
----------
vcenter : float, default: 0
The data value that defines ``0.5`` in the normalization.
halfrange : float, optional
The range of data values that defines a range of ``0.5`` in the
normalization, so that *vcenter* - *halfrange* is ``0.0`` and
*vcenter* + *halfrange* is ``1.0`` in the normalization.
Defaults to the largest absolute difference to *vcenter* for
the values in the dataset.
Examples
--------
This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0
(assuming equal rates of change above and below 0.0):
>>> import matplotlib.colors as mcolors
>>> norm = mcolors.CenteredNorm(halfrange=4.0)
>>> data = [-2., 0., 4.]
>>> norm(data)
array([0.25, 0.5 , 1. ])
"""
super().__init__(vmin=None, vmax=None, clip=clip)
self._vcenter = vcenter
# calling the halfrange setter to set vmin and vmax
self.halfrange = halfrange
def _set_vmin_vmax(self):
"""
Set *vmin* and *vmax* based on *vcenter* and *halfrange*.
"""
self.vmax = self._vcenter + self._halfrange
self.vmin = self._vcenter - self._halfrange
def autoscale(self, A):
"""
Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.
"""
A = np.asanyarray(A)
self._halfrange = max(self._vcenter-A.min(),
A.max()-self._vcenter)
self._set_vmin_vmax()
def autoscale_None(self, A):
"""Set *vmin* and *vmax*."""
A = np.asanyarray(A)
if self._halfrange is None and A.size:
self.autoscale(A)
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, vcenter):
if vcenter != self._vcenter:
self._vcenter = vcenter
self._changed()
if self.vmax is not None:
# recompute halfrange assuming vmin and vmax represent
# min and max of data
self._halfrange = max(self._vcenter-self.vmin,
self.vmax-self._vcenter)
self._set_vmin_vmax()
@property
def halfrange(self):
return self._halfrange
@halfrange.setter
def halfrange(self, halfrange):
if halfrange is None:
self._halfrange = None
self.vmin = None
self.vmax = None
else:
self._halfrange = abs(halfrange)
def __call__(self, value, clip=None):
if self._halfrange is not None:
# enforce symmetry, reset vmin and vmax
self._set_vmin_vmax()
return super().__call__(value, clip=clip)
def draw_map(m, scale=0.2):
"""
This function is for data visualization with geographic map.
Parameter m: basemap object
"""
# draw a shaded-relief image
m.shadedrelief(scale=scale)
# lats and longs are returned as a dictionary
lats = m.drawparallels(np.linspace(-90, 90, 13))
lons = m.drawmeridians(np.linspace(-180, 180, 13))
# keys contain the plt.Line2D instances
lat_lines = chain(*(tup[1][0] for tup in lats.items()))
lon_lines = chain(*(tup[1][0] for tup in lons.items()))
all_lines = chain(lat_lines, lon_lines)
# cycle through these lines and set the desired style
for line in all_lines:
line.set(linestyle='-', alpha=0.3, color='w')
return
def noon_midnight_meridian(dtime=None, delta=0.25):
"""
This function calculates the longtitude and latitude of the noon-midnight meridian
based on a given UTC time.
:param dtime: the str of UTC time, which is of the format of 'year-month-day/hour:minute:second'
:param delta: float or int, in degree, which is the interval of neighbor points on the meridian.
:return: dictionary with keys: 'lons_noonmidnight', 'lats_noonmidnight', 'lons_noon', 'lats_noon'
'lons_midnight', 'lats_midnight'
with values of numpy arrays
"""
# method2:
n_interval = 360 / delta + 1
ni_half = int(np.floor(n_interval / 2))
ni_otherhalf = int(n_interval - ni_half)
time_current_UTC = datetime.strptime(dtime, '%Y-%m-%d/%H:%M:%S')
dtime_noon = dtime[0:11] + '12:00:00'
# print('dtime_noon: ', dtime_noon)
time_noon = datetime.strptime(dtime_noon, '%Y-%m-%d/%H:%M:%S')
time_diff = time_noon - time_current_UTC
diff_in_hours = time_diff.total_seconds() / 3600 # within [-12,12] hours due to the same day.
if diff_in_hours == 0:
lons_latmax = 0 # current UTC time is just at noon
lons_latmin = 180 # midnight longitude
elif diff_in_hours > 0:
lons_latmax = 0 + 15 * diff_in_hours # longitude for noon line
lons_latmin = lons_latmax - 180 # longitude for midnight line
elif diff_in_hours < 0:
lons_latmax = 0 - 15 * diff_in_hours # longitude for noon line
lons_latmin = lons_latmax + 180 # longitude for midnight line
#
lons_max_arr = np.full((1, ni_half), lons_latmax) # for noon line
lats_max_arr = np.linspace(-90, 90, ni_half) # for noon line
lons_min_arr = np.full((1, ni_otherhalf), lons_latmin) # for midnight line
lats_min_arr = np.linspace(90, -90, ni_otherhalf) # for midnight line
lons_arr = np.concatenate((lons_max_arr, lons_min_arr), axis=None)
lats_arr = np.concatenate((lats_max_arr, lats_min_arr), axis=None)
lons_nm, lats_nm = lons_arr, lats_arr # the whole noon-midnight circle
lons_n, lats_n = lons_max_arr[0], lats_max_arr # the noon semi-circle
lons_m, lats_m = lons_min_arr[0], lats_min_arr # the midnight semi-circle
noon_midnight = {'lons_noonmidnight': lons_nm, 'lats_noonmidnight': lats_nm,
'lons_noon': lons_n, 'lats_noon': lats_n,
'lons_midnight': lons_m, 'lats_midnight': lats_m}
return noon_midnight
def _make_EICS_plots(dtime=None, vplot_sized=False, contour_den=8, s_loc=False, quiver_scale=30, pred = False, pred_var = None, plot_title_addon = None, colormaplimit=None, EICS_component='Jy'):
"""
@Parameter: dtime input as a string
@Parameter: s_loc input as a bool, which means the locations of the virtual stations.
"""
dtype = 'EICS'
if not os.path.exists(CONFIG['plots_dir']):
os.makedirs(CONFIG['plots_dir'])
dtime_range = [dtime, dtime]
pathformat_prefix = dtype + '/%Y/%m/'
pathformat_unzipped = pathformat_prefix + '%d/' + dtype + '%Y%m%d_%H%M%S.dat'
filename_unzipped = dailynames(file_format=pathformat_unzipped, trange=dtime_range, res=10)
out_files_unzipped = [CONFIG['local_data_dir'] + rf_res for rf_res in filename_unzipped]
Data_Days_time = read_data_files(out_files=out_files_unzipped, dtype=dtype, out_type='df')
if pred == True: # XC
obs_var = Data_Days_time['Jy']
print(np.size(pred_var), np.size(Data_Days_time['Jy']), np.shape(pred_var), np.shape(Data_Days_time['Jy']))
if np.size(pred_var) != | np.size(Data_Days_time['Jy']) | numpy.size |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / (( | np.max(df['seconds_in_bucket']) | numpy.max |
from keras.models import Model
from keras.layers import Dense, Input
# from keras.layers import Dropout
from keras import regularizers
# from keras.layers import BatchNormalization
from keras.callbacks import CSVLogger
# from keras import initializers
import tensorflow as tf
from keras.backend import tensorflow_backend as K
from preprocess import unsw, nslkdd
from netlearner.utils import permutate_dataset, min_max_scale
import numpy as np
import logging
def multicore_session():
config = tf.ConfigProto(intra_op_parallelism_threads=32,
inter_op_parallelism_threads=32,
allow_soft_placement=True,
log_device_placement=False,
device_count={'CPU': 64})
session = tf.Session(config=config)
K.set_session(session)
def process_unsw(root='/home/naruto/NetLearner'):
unsw.generate_dataset(False)
raw_X_train = np.load('%s/UNSW/train_dataset.npy' % root)
y_train = np.load('%s/UNSW/train_labels.npy' % root)
raw_X_test = np.load('%s/UNSW/test_dataset.npy' % root)
y_test = np.load('%s/UNSW/test_labels.npy' % root)
[X_train, _, X_test] = min_max_scale(raw_X_train, None, raw_X_test)
permutate_dataset(X_train, y_train)
permutate_dataset(X_test, y_test)
print('Training set', X_train.shape, y_train.shape)
print('Test set', X_test.shape, y_test.shape)
return {'X': X_train, 'y': y_train, 'X_test': X_test, 'y_test': y_test}
def process_nsl(root='/home/naruto/NetLearner'):
nslkdd.generate_datasets(binary_label=True, one_hot_encoding=False)
raw_X_train = | np.load('%s/NSLKDD/train_dataset.npy' % root) | numpy.load |
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
import numpy as np
from .. import legion
if TYPE_CHECKING:
import numpy.typing as npt
from . import Point
class Transform:
trans: npt.NDArray[np.int64]
def __init__(self, M: int, N: int, eye: bool = True):
"""
A Transform wraps an `legion_transform_{m}x{n}_t` in the Legion C API.
A transform is simply an MxN matrix that can be used to convert Point
objects from one coordinate space to another.
"""
self.M = M
self.N = N
if eye:
self.trans = np.eye(M, N, dtype=np.int64)
else:
self.trans = np.zeros((M, N), dtype=np.int64)
self.handle: Optional[Any] = None
def apply(self, point: Point) -> tuple[float, ...]:
"""
Convert an N-D Point into an M-D point using this transform
"""
if len(point) != self.N:
raise ValueError("Dimension mismatch")
result: list[float] = []
for m in range(self.M):
value = 0
for n in range(self.N):
value += self.trans[m, n] * point[n]
result.append(value)
return tuple(result)
def compose(self, outer: Transform) -> Transform:
"""
Construct a composed transform of this transform with another transform
"""
if outer.N != self.M:
raise ValueError("Dimension mismatch")
result = Transform(outer.M, self.N, eye=False)
np.matmul(outer.trans, self.trans, out=result.trans)
return result
def raw(self) -> Any:
if self.handle is None:
self.handle = legion.legion_domain_transform_identity(
self.M, self.N
)
self.handle.m = self.M
self.handle.n = self.N
for m in range(self.M):
for n in range(self.N):
self.handle.matrix[m * self.N + n] = self.trans[m, n]
return self.handle
def __eq__(self, other: object) -> bool:
if not isinstance(other, Transform):
return NotImplemented
return (
self.M == other.M
and self.N == other.N
and np.array_equal(self.trans, other.trans)
)
def __hash__(self) -> int:
return hash(self.trans.tobytes())
def __str__(self) -> str:
return np.array_repr(self.trans).replace("\n", "").replace(" ", "")
# An Affine Transform for points in one space to points in another
class AffineTransform:
transform: npt.NDArray[np.int64]
def __init__(self, M: int, N: int, eye: bool = True):
"""
An AffineTransform wraps a `legion_affine_transform_{m}x{n}_t` in the
Legion C API. The AffineTransform class represents an affine transform
as a MxN affine transform as an (M+1)x(N+1) matrix and can used to
transform N-D Point objects into M-D Point objects. AffineTransform
objects can also be naturally composed to construct new
AffineTransforms.
"""
self.M = M
self.N = N
if eye:
self.transform = | np.eye(M + 1, N + 1, dtype=np.int64) | numpy.eye |
from datashape import dshape
import pandas as pd
import numpy as np
import pytest
from datashader.glyphs import (Point, _build_draw_line, _build_map_onto_pixel,
_build_extend_line, _build_draw_triangle,
_build_extend_triangles)
from datashader.utils import ngjit
def test_point_bounds_check():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [5, 6, 7]})
p = Point('x', 'y')
assert p._compute_x_bounds(df['x'].values) == (1, 3)
assert p._compute_y_bounds(df['y'].values) == (5, 7)
def test_point_validate():
p = Point('x', 'y')
p.validate(dshape("{x: int32, y: float32}"))
with pytest.raises(ValueError):
p.validate(dshape("{x: string, y: float32}"))
@ngjit
def append(i, x, y, agg):
agg[y, x] += 1
@ngjit
def tri_append(x, y, agg, n):
agg[y, x] += n
def new_agg():
return np.zeros((5, 5), dtype='i4')
mapper = ngjit(lambda x: x)
map_onto_pixel = _build_map_onto_pixel(mapper, mapper)
# Line rasterization
draw_line = _build_draw_line(append)
extend_line = _build_extend_line(draw_line, map_onto_pixel)
# Triangles rasterization
draw_triangle, draw_triangle_interp = _build_draw_triangle(tri_append)
extend_triangles = _build_extend_triangles(draw_triangle, draw_triangle_interp, map_onto_pixel)
bounds = (-3, 1, -3, 1)
vt = (1., 3., 1., 3.)
def test_draw_line():
x0, y0 = (0, 0)
x1, y1 = (3, 3)
out = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]])
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, True, False, agg)
np.testing.assert_equal(agg, out)
# plot_start = False
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, False, False, agg)
out[0, 0] = 0
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, False, False, agg)
out[0, 0] = 1
out[3, 3] = 0
np.testing.assert_equal(agg, out)
# Flip coords
x0, y0 = (0, 4)
x1, y1 = (3, 1)
out = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]])
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, True, False, agg)
np.testing.assert_equal(agg, out)
# plot_start = False
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, False, False, agg)
out[4, 0] = 0
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, False, False, agg)
out[4, 0] = 1
out[1, 3] = 0
def test_draw_line_same_point():
x0, y0 = (3, 3)
x1, y1 = (3, 3)
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
assert agg.sum() == 2
assert agg[3, 3] == 2
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, False, False, agg)
assert agg.sum() == 1
assert agg[3, 3] == 1
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, True, agg)
assert agg.sum() == 1
assert agg[3, 3] == 1
def test_draw_line_vertical_horizontal():
# Vertical
x0, y0 = (3, 3)
x1, y1 = (3, 0)
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
out = new_agg()
out[:4, 3] = 1
np.testing.assert_equal(agg, out)
# Horizontal
agg = new_agg()
draw_line(y0, x0, y1, x1, 0, True, False, agg)
out = new_agg()
out[3, :4] = 1
np.testing.assert_equal(agg, out)
def test_extend_lines():
xs = | np.array([0, -2, -2, 0, 0]) | numpy.array |
"""
N元逻辑回归2分类
"""
import tensorflow as tf
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import utils
# 预置参数
plt.rcParams['font.sans-serif'] = "Arial Unicode MS"
plt.rcParams['axes.unicode_minus'] = False # 正常显示负号
plt.figure(figsize=(8, 4))
cm_pt = mpl.colors.ListedColormap(['royalblue', 'orangered'])
cm_bg = mpl.colors.ListedColormap(['cyan', 'gold'])
iris_train = utils.get_arr_from_csv('iris_training.csv')
iris_test = utils.get_arr_from_csv('iris_test.csv')
# 处理数据集
def pre_precess(dataset):
x = dataset[:, :4] # 提取前2列特征
y = dataset[:, 4] # 提取标签
# 过滤 label=2 的数据
x = x[y < 2]
y = y[y < 2]
# 均值化
x = x - np.mean(x, axis=0)
return x, y
train_x, train_y = pre_precess(iris_train)
test_x, test_y = pre_precess(iris_test)
n1 = len(train_x)
n2 = len(test_x)
print(n1, n2)
lr = 0.01
iters = 500
step = 50
train_X = tf.cast(tf.concat((np.ones(n1, ).reshape(-1, 1), train_x), axis=1), tf.float32)
test_X = tf.cast(tf.concat((np.ones(n2, ).reshape(-1, 1), test_x), axis=1), tf.float32)
train_Y = tf.cast(train_y.reshape(-1, 1), tf.float32)
test_Y = tf.cast(test_y.reshape(-1, 1), tf.float32)
print(train_X.shape, train_Y.shape)
W = tf.Variable( | np.random.randn(5, 1) | numpy.random.randn |
import numpy as np
import tensorflow as tf
import cv2
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import random
import BasicImagePairAugmentation
class ElasticTransformer(BasicImagePairAugmentation.BasicImagePairAugmentation):
""" computes an elastic deformation simultaneous for a pair of image and mask
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
and https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
TODO: replace ndimage.gaussian_filter(image, sigma) by blur_size = int(4*sigma) | 1; cv2.GaussianBlur(image, ksize=(blur_size, blur_size), sigmaX=sigma)
TODO: generalize for image.shape = shape1,shape2,numchannels (colorized image)
"""
def __init__(self,alpha=1.5, sigma=0.07, alpha_affine=0.03, random_state=None, parameters=None):
super().__init__(str(type(self).__name__))
if random_state is None:
self.random_state = np.random.RandomState(None)
if (parameters is None):
self.parameters['alpha'] = alpha
self.parameters['sigma'] = sigma
self.parameters['alpha_affine'] = alpha_affine
else:
self.parameters = parameters
def modifyImagePair(self, inputImg, outputImg):
mergedImage = np.concatenate((inputImg[..., None], outputImg[..., None]), axis=2)
shapeImg = mergedImage.shape
shape_size = shapeImg[:2]
alpha = shapeImg[1]*self.parameters['alpha']
sigma = shapeImg[1] * self.parameters['sigma']
alpha_affine = shapeImg[1] * self.parameters['alpha_affine']
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0]+square_size,
center_square[1]-square_size], center_square - square_size])
pts2 = pts1 + self.random_state.uniform(-alpha_affine,
alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
mergedImage = cv2.warpAffine(mergedImage, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((self.random_state.rand(*shapeImg) * 2 - 1), sigma) * alpha
dy = gaussian_filter((self.random_state.rand(*shapeImg) * 2 - 1), sigma) * alpha
dz = | np.zeros_like(dx) | numpy.zeros_like |
import numpy as np
def energy(X,W_fixed):
"""
Input X is a vector (N by 1), and the shape of W_fixed is N by N.\n
Return a 2D array E with shape (1,1)
"""
return (-1/2) * np.dot(np.dot(np.transpose(X),W_fixed),X)
def f(W,X):
"""
W : W_fixed after storaging phase with shape (N,N) \n
X : testing pattern X with shape (N,1) \n
Return X_new with shape (N,1)
"""
i = 0
v = np.dot(W,X)
for value in v :
if value > 0 :
v[i,0] = 1
elif value < 0 :
v[i,0] = -1
else :
v[i,0] = X[i,0]
i += 1
return v
X0 = np.array([ 1,-1, 1,-1, 1,-1])
X2 = np.array([-1, 1,-1, 1,-1, 1])
X4 = np.array([ 1, 1, 1, 1, 1, 1])
X6 = | np.array([-1,-1,-1,-1,-1,-1]) | numpy.array |
""" Data generators for training/inference with siamese Keras model.
"""
import warnings
from typing import List, Iterator, NamedTuple
import numpy as np
import pandas as pd
from tensorflow.keras.utils import Sequence
from .typing import BinnedSpectrumType
class SpectrumPair(NamedTuple):
"""
Represents a pair of binned spectrums
"""
spectrum1: BinnedSpectrumType
spectrum2: BinnedSpectrumType
class DataGeneratorBase(Sequence):
def __init__(self, binned_spectrums: List[BinnedSpectrumType],
reference_scores_df: pd.DataFrame, dim: int, **settings):
"""Base for data generator generating data for a siamese model.
Parameters
----------
binned_spectrums
List of BinnedSpectrum objects with the binned peak positions and intensities.
reference_scores_df
Pandas DataFrame with reference similarity scores (=labels) for compounds identified
by inchikeys (first 14 characters). Columns and index should be inchikeys, the value
in a row x column depicting the similarity score for that pair. Must be symmetric
(reference_scores_df[i,j] == reference_scores_df[j,i]) and column names should be
identical to the index.
dim
Input vector dimension.
As part of **settings, defaults for the following parameters can be set:
batch_size
Number of pairs per batch. Default=32.
num_turns
Number of pairs for each InChiKey14 during each epoch. Default=1.
shuffle
Set to True to shuffle IDs every epoch. Default=True
ignore_equal_pairs
Set to True to ignore pairs of two identical spectra. Default=True
same_prob_bins
List of tuples that define ranges of the true label to be trained with
equal frequencies. Default is set to [(0, 0.5), (0.5, 1)], which means
that pairs with scores <=0.5 will be picked as often as pairs with scores
> 0.5.
augment_removal_max
Maximum fraction of peaks (if intensity < below augment_removal_intensity)
to be removed randomly. Default is set to 0.2, which means that between
0 and 20% of all peaks with intensities < augment_removal_intensity
will be removed.
augment_removal_intensity
Specifying that only peaks with intensities < max_intensity will be removed.
augment_intensity
Change peak intensities by a random number between 0 and augment_intensity.
Default=0.1, which means that intensities are multiplied by 1+- a random
number within [0, 0.1].
augment_noise_max
Max number of 'new' noise peaks to add to the spectrum, between 0 to `augment_noise_max`
of peaks are added.
augment_noise_intensity
Intensity of the 'new' noise peaks to add to the spectrum
use_fixed_set
Toggles using a fixed dataset, if set to True the same dataset will be generated each
epoch. Default is False.
"""
self._validate_labels(reference_scores_df)
# Set all other settings to input (or otherwise to defaults):
self._set_generator_parameters(**settings)
self.binned_spectrums = binned_spectrums
self.reference_scores_df = self._exclude_nans_from_labels(reference_scores_df)
self.reference_scores_df = self._transform_to_inchikey14(self.reference_scores_df)
self._collect_and_validate_inchikeys()
self.dim = dim
self.fixed_set = dict()
def _collect_and_validate_inchikeys(self):
"""Collect all inchikeys14 (first 14 characters) of all binned_spectrums
and check if all are present in the reference scores as well.
"""
self.spectrum_inchikeys = np.array([s.get("inchikey")[:14] for s in self.binned_spectrums])
for inchikey in np.unique(self.spectrum_inchikeys):
assert inchikey in self.reference_scores_df.index, \
"InChIKey in given spectrum not found in reference scores"
@staticmethod
def _validate_labels(reference_scores_df: pd.DataFrame):
if set(reference_scores_df.index) != set(reference_scores_df.columns):
raise ValueError("index and columns of reference_scores_df are not identical")
@staticmethod
def _transform_to_inchikey14(reference_scores_df: pd.DataFrame):
"""Transform index and column names from potential full InChIKeys to InChIKey14"""
reference_scores_df.index = [x[:14] for x in reference_scores_df.index]
reference_scores_df.columns = [x[:14] for x in reference_scores_df.columns]
return reference_scores_df
@staticmethod
def _exclude_nans_from_labels(reference_scores_df: pd.DataFrame):
"""Exclude nans in reference_scores_df, exclude columns and rows if there is any NaN
value"""
clean_df = reference_scores_df.dropna(axis='rows') # drop rows with any NaN
clean_df = clean_df[clean_df.index] # drop corresponding columns
n_dropped = len(reference_scores_df) - len(clean_df)
if n_dropped > 0:
print(f"{n_dropped} nans among {len(reference_scores_df)} labels will be excluded.")
return clean_df
def _set_generator_parameters(self, **settings):
"""Set parameter for data generator. Use below listed defaults unless other
input is provided.
Parameters
----------
batch_size
Number of pairs per batch. Default=32.
num_turns
Number of pairs for each InChiKey14 during each epoch. Default=1
shuffle
Set to True to shuffle IDs every epoch. Default=True
ignore_equal_pairs
Set to True to ignore pairs of two identical spectra. Default=True
same_prob_bins
List of tuples that define ranges of the true label to be trained with
equal frequencies. Default is set to [(0, 0.5), (0.5, 1)], which means
that pairs with scores <=0.5 will be picked as often as pairs with scores
> 0.5.
augment_removal_max
Maximum fraction of peaks (if intensity < below augment_removal_intensity)
to be removed randomly. Default is set to 0.2, which means that between
0 and 20% of all peaks with intensities < augment_removal_intensity
will be removed.
augment_removal_intensity
Specifying that only peaks with intensities < max_intensity will be removed.
augment_intensity
Change peak intensities by a random number between 0 and augment_intensity.
Default=0.1, which means that intensities are multiplied by 1+- a random
number within [0, 0.1].
augment_noise_max
Max number of 'new' noise peaks to add to the spectrum, between 0 to `augment_noise_max`
of peaks are added.
augment_noise_intensity
Intensity of the 'new' noise peaks to add to the spectrum
use_fixed_set
Toggles using a fixed dataset, if set to True the same dataset will be generated each
epoch. Default is False.
"""
defaults = dict(
batch_size=32,
num_turns=1,
ignore_equal_pairs=True,
shuffle=True,
same_prob_bins=[(0, 0.5), (0.5, 1)],
augment_removal_max= 0.3,
augment_removal_intensity=0.2,
augment_intensity=0.4,
augment_noise_max=10,
augment_noise_intensity=0.01,
use_fixed_set=False
)
# Set default parameters or replace by **settings input
for key in defaults:
if key in settings:
print("The value for {} is set from {} (default) to {}".format(key, defaults[key],
settings[key]))
else:
settings[key] = defaults[key]
assert 0.0 <= settings["augment_removal_max"] <= 1.0, "Expected value within [0,1]"
assert 0.0 <= settings["augment_removal_intensity"] <= 1.0, "Expected value within [0,1]"
if settings["use_fixed_set"] and settings["shuffle"]:
warnings.warn('When using a fixed set, data will not be shuffled')
if settings["use_fixed_set"]:
np.random.seed(42)
self.settings = settings
def _find_match_in_range(self, inchikey1, target_score_range):
"""Randomly pick ID for a pair with inchikey_id1 that has a score in
target_score_range. When no such score exists, iteratively widen the range
in steps of 0.1.
Parameters
----------
inchikey1
Inchikey (first 14 characters) to be paired up with another compound within
target_score_range.
target_score_range
lower and upper bound of label (score) to find an ID of.
"""
# Part 1 - find match within range (or expand range iteratively)
extend_range = 0
low, high = target_score_range
inchikey2 = None
while inchikey2 is None:
matching_inchikeys = self.reference_scores_df.index[
(self.reference_scores_df[inchikey1] > low - extend_range)
& (self.reference_scores_df[inchikey1] <= high + extend_range)]
if self.settings["ignore_equal_pairs"]:
matching_inchikeys = matching_inchikeys[matching_inchikeys != inchikey1]
if len(matching_inchikeys) > 0:
inchikey2 = np.random.choice(matching_inchikeys)
extend_range += 0.1
return inchikey2
def __getitem__(self, batch_index: int):
"""Generate one batch of data.
If use_fixed_set=True we try retrieving the batch from self.fixed_set (or store it if
this is the first epoch). This ensures a fixed set of data is generated each epoch.
"""
if self.settings['use_fixed_set'] and batch_index in self.fixed_set:
return self.fixed_set[batch_index]
if self.settings['use_fixed_set'] and batch_index == 0:
np.random.seed(42)
spectrum_pairs = self._spectrum_pair_generator(batch_index)
X, y = self.__data_generation(spectrum_pairs)
if self.settings['use_fixed_set']:
self.fixed_set[batch_index] = (X, y)
return X, y
def _data_augmentation(self, spectrum_binned):
"""Data augmentation.
Parameters
----------
spectrum_binned
Dictionary with the binned peak positions and intensities.
"""
idx = np.array([int(x) for x in spectrum_binned.keys()])
values = np.array(list(spectrum_binned.values()))
# Augmentation 1: peak removal (peaks < augment_removal_max)
if self.settings["augment_removal_max"] or self.settings["augment_removal_intensity"]:
# TODO: Factor out function with documentation + example?
indices_select = np.where(values < self.settings["augment_removal_max"])[0]
removal_part = np.random.random(1) * self.settings["augment_removal_max"]
indices_select = np.random.choice(indices_select,
int(np.ceil((1 - removal_part)*len(indices_select))))
indices = np.concatenate((indices_select,
| np.where(values >= self.settings["augment_removal_intensity"]) | numpy.where |
#! /usr/bin/env python
#
# Author: <NAME>
# Date: April 17, 2008
#
# Copyright (C) 2008 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import os.path
from scipy._lib.six import xrange, u
import numpy as np
from numpy.linalg import norm
from numpy.testing import (verbose, TestCase, run_module_suite, assert_,
assert_raises, assert_array_equal, assert_equal, assert_almost_equal,
assert_allclose)
from scipy.spatial.distance import (squareform, pdist, cdist, matching,
jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule,
num_obs_y, num_obs_dm, is_valid_dm, is_valid_y, minkowski, wminkowski,
euclidean, sqeuclidean, cosine, correlation, hamming, mahalanobis,
canberra, braycurtis, sokalmichener, _validate_vector)
_filenames = ["iris.txt",
"cdist-X1.txt",
"cdist-X2.txt",
"pdist-hamming-ml.txt",
"pdist-boolean-inp.txt",
"pdist-jaccard-ml.txt",
"pdist-cityblock-ml-iris.txt",
"pdist-minkowski-3.2-ml-iris.txt",
"pdist-cityblock-ml.txt",
"pdist-correlation-ml-iris.txt",
"pdist-minkowski-5.8-ml-iris.txt",
"pdist-correlation-ml.txt",
"pdist-minkowski-3.2-ml.txt",
"pdist-cosine-ml-iris.txt",
"pdist-seuclidean-ml-iris.txt",
"pdist-cosine-ml.txt",
"pdist-seuclidean-ml.txt",
"pdist-double-inp.txt",
"pdist-spearman-ml.txt",
"pdist-euclidean-ml.txt",
"pdist-euclidean-ml-iris.txt",
"pdist-chebychev-ml.txt",
"pdist-chebychev-ml-iris.txt",
"random-bool-data.txt"]
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0]], dtype='double')
_ytdist = squareform(_tdist)
# A hashmap of expected output arrays for the tests. These arrays
# come from a list of text files, which are read prior to testing.
# Each test loads inputs and outputs from this dictionary.
eo = {}
def load_testing_files():
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
fp = open(fqfn)
eo[name] = np.loadtxt(fp)
fp.close()
eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
load_testing_files()
class TestCdist(TestCase):
def test_cdist_euclidean_random(self):
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'euclidean')
Y2 = cdist(X1, X2, 'test_euclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_euclidean_random_unicode(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, u('euclidean'))
Y2 = cdist(X1, X2, u('test_euclidean'))
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sqeuclidean_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'sqeuclidean')
Y2 = cdist(X1, X2, 'test_sqeuclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cityblock_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cityblock')
Y2 = cdist(X1, X2, 'test_cityblock')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_hamming_double_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_hamming_bool_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_jaccard_double_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_jaccard_bool_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_chebychev_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'chebychev')
Y2 = cdist(X1, X2, 'test_chebychev')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=3.8)
Y2 = cdist(X1, X2, 'test_minkowski', p=3.8)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=4.6)
Y2 = cdist(X1, X2, 'test_minkowski', p=4.6)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=1.23)
Y2 = cdist(X1, X2, 'test_minkowski', p=1.23)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_int_weights(self):
# regression test when using integer weights
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = np.arange(X1.shape[1])
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=4.6, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=4.6, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=1.23, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=1.23, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_seuclidean_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'seuclidean')
Y2 = cdist(X1, X2, 'test_seuclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cosine_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cosine')
# Naive implementation
def norms(X):
# NumPy 1.7: np.linalg.norm(X, axis=1).reshape(-1, 1)
return np.asarray([np.linalg.norm(row)
for row in X]).reshape(-1, 1)
Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_correlation_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'correlation')
Y2 = cdist(X1, X2, 'test_correlation')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'mahalanobis')
Y2 = cdist(X1, X2, 'test_mahalanobis')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis(self):
# 1-dimensional observations
x1 = np.array([[2], [3]])
x2 = np.array([[2], [5]])
dist = cdist(x1, x2, metric='mahalanobis')
assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]])
# 2-dimensional observations
x1 = np.array([[0, 0], [-1, 0]])
x2 = np.array([[0, 2], [1, 0], [0, -2]])
dist = cdist(x1, x2, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [[rt2, rt2, rt2], [2, 2*rt2, 2]])
# Too few observations
assert_raises(ValueError,
cdist, [[0, 1]], [[2, 3]], metric='mahalanobis')
def test_cdist_canberra_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'canberra')
Y2 = cdist(X1, X2, 'test_canberra')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_braycurtis_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'braycurtis')
Y2 = cdist(X1, X2, 'test_braycurtis')
if verbose > 2:
print(Y1, Y2)
print((Y1-Y2).max())
_assert_within_tol(Y1, Y2, eps)
def test_cdist_yule_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'yule')
Y2 = cdist(X1, X2, 'test_yule')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_matching_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'matching')
Y2 = cdist(X1, X2, 'test_matching')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_kulsinski_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'kulsinski')
Y2 = cdist(X1, X2, 'test_kulsinski')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_dice_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'dice')
Y2 = cdist(X1, X2, 'test_dice')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_rogerstanimoto_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'rogerstanimoto')
Y2 = cdist(X1, X2, 'test_rogerstanimoto')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_russellrao_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'russellrao')
Y2 = cdist(X1, X2, 'test_russellrao')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sokalmichener_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalmichener')
Y2 = cdist(X1, X2, 'test_sokalmichener')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sokalsneath_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalsneath')
Y2 = cdist(X1, X2, 'test_sokalsneath')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestPdist(TestCase):
def test_pdist_euclidean_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_u(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, u('euclidean'))
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = pdist(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_euclidean_iris_double(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_euclidean_iris_nonC(self):
# Test pdist(X, 'test_euclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = pdist(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_nonC(self):
# Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_sqeuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_iris(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_float32(self):
# Tests pdist(X, 'seuclidean') on the Iris data set (float32).
eps = 1e-05
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_nonC(self):
# Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_sqeuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_nonC(self):
# Test pdist(X, 'test_cosine') [the non-C implementation]
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = pdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_iris_float32(self):
eps = 1e-07
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cosine_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = pdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_random(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_float32(self):
eps = 1e-06
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_nonC(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = pdist(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_iris(self):
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cityblock_iris_nonC(self):
# Test pdist(X, 'test_cityblock') [the non-C implementation] on the
# Iris data set.
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = pdist(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = pdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_iris_float32(self):
eps = 1e-07
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_correlation_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = pdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_nonC(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_3_2_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_5_8_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_5_8_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_minkowski_5_8_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = pdist(X, 'test_minkowski', 5.8)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_wminkowski(self):
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
p2_expected = [1.0, 1.0, np.sqrt(3),
np.sqrt(2), np.sqrt(2),
np.sqrt(2)]
p1_expected = [0.5, 1.0, 3.5,
1.5, 3.0,
2.5]
dist = pdist(x, metric=wminkowski, w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric=wminkowski, w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
def test_pdist_wminkowski_int_weights(self):
# regression test for int weights
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
dist1 = pdist(x, metric='wminkowski', w=np.arange(3), p=1)
dist2 = pdist(x, metric='wminkowski', w=[0., 1., 2.], p=1)
assert_allclose(dist1, dist2, rtol=1e-14)
def test_pdist_mahalanobis(self):
# 1-dimensional observations
x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
dist = pdist(x, metric='mahalanobis')
assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),
np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])
# 2-dimensional observations
x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])
dist = pdist(x, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2*rt2, 2, 2, 2*rt2, 2])
# Too few observations
assert_raises(ValueError,
pdist, [[0, 1], [2, 3]], metric='mahalanobis')
def test_pdist_hamming_random(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_nonC(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_dhamming_random(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_nonC(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jaccard_random(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_nonC(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_djaccard_random(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_nonC(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebychev_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebychev_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebychev_random_nonC(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test2 = pdist(X, 'test_chebychev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebychev_iris(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebychev_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebychev_iris_nonC(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test2 = pdist(X, 'test_chebychev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_matching_mtica1(self):
# Test matching(*,*) with mtica example #1 (nums).
m = matching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = matching(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_matching_mtica2(self):
# Test matching(*,*) with mtica example #2.
m = matching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = matching(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_matching_match(self):
# Test pdist(X, 'matching') to see if the two implementations match on
# random boolean input data.
D = eo['random-bool-data']
B = np.bool_(D)
if verbose > 2:
print(B.shape, B.dtype)
eps = 1e-10
y1 = pdist(B, "matching")
y2 = pdist(B, "test_matching")
y3 = pdist(D, "test_matching")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y1-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_jaccard_mtica1(self):
m = jaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = jaccard(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica2(self):
m = jaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = jaccard(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_jaccard_match(self):
# Test pdist(X, 'jaccard') to see if the two implementations match on
# random double input data.
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "jaccard")
y2 = pdist(D, "test_jaccard")
y3 = pdist(np.bool_(D), "test_jaccard")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_yule_mtica1(self):
m = yule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = yule(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_mtica2(self):
m = yule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = yule(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "yule")
y2 = pdist(D, "test_yule")
y3 = pdist(np.bool_(D), "test_yule")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_dice_mtica1(self):
m = dice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = dice(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/7, rtol=0, atol=1e-10)
assert_allclose(m2, 3/7, rtol=0, atol=1e-10)
def test_pdist_dice_mtica2(self):
m = dice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = dice(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 0.5, rtol=0, atol=1e-10)
assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
def test_pdist_dice_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "dice")
y2 = pdist(D, "test_dice")
y3 = pdist(D, "test_dice")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_sokalsneath_mtica1(self):
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/4, rtol=0, atol=1e-10)
assert_allclose(m2, 3/4, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica2(self):
m = sokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = sokalsneath(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 4/5, rtol=0, atol=1e-10)
assert_allclose(m2, 4/5, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalsneath")
y2 = pdist(D, "test_sokalsneath")
y3 = pdist(np.bool_(D), "test_sokalsneath")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_rogerstanimoto_mtica1(self):
m = rogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = rogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/4, rtol=0, atol=1e-10)
assert_allclose(m2, 3/4, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
m = rogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = rogerstanimoto(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 4/5, rtol=0, atol=1e-10)
assert_allclose(m2, 4/5, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "rogerstanimoto")
y2 = pdist(D, "test_rogerstanimoto")
y3 = pdist(np.bool_(D), "test_rogerstanimoto")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_russellrao_mtica1(self):
m = russellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = russellrao(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/5, rtol=0, atol=1e-10)
assert_allclose(m2, 3/5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica2(self):
m = russellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = russellrao(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_russellrao_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "russellrao")
y2 = pdist(D, "test_russellrao")
y3 = pdist(np.bool_(D), "test_russellrao")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_sokalmichener_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalmichener")
y2 = pdist(D, "test_sokalmichener")
y3 = pdist(np.bool_(D), "test_sokalmichener")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_kulsinski_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "kulsinski")
y2 = pdist(D, "test_kulsinski")
y3 = pdist(np.bool_(D), "test_kulsinski")
_assert_within_tol(y1, y2, eps, verbose > 2)
_assert_within_tol(y2, y3, eps)
def test_pdist_canberra_match(self):
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "canberra")
y2 = pdist(D, "test_canberra")
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_canberra_ticket_711(self):
# Test pdist(X, 'canberra') to see if Canberra gives the right result
# as reported on gh-1238.
eps = 1e-8
pdist_y = pdist(([3.3], [3.4]), "canberra")
right_y = 0.01492537
_assert_within_tol(pdist_y, right_y, eps, verbose > 2)
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def _assert_within_tol(a, b, atol, verbose_=False):
if verbose_:
print(np.abs(a-b).max())
assert_allclose(a, b, rtol=0, atol=atol)
class TestSomeDistanceFunctions(TestCase):
def setUp(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
# 3x1 arrays
x31 = x[:,np.newaxis]
y31 = y[:,np.newaxis]
# 1x3 arrays
x13 = x31.T
y13 = y31.T
self.cases = [(x,y), (x31, y31), (x13, y13)]
def test_minkowski(self):
for x, y in self.cases:
dist1 = minkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = minkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0+2.0**1.5)**(2./3))
dist2 = minkowski(x, y, p=2)
assert_almost_equal(dist2, np.sqrt(5))
def test_wminkowski(self):
w = np.array([1.0, 2.0, 0.5])
for x, y in self.cases:
dist1 = wminkowski(x, y, p=1, w=w)
assert_almost_equal(dist1, 3.0)
dist1p5 = wminkowski(x, y, p=1.5, w=w)
assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
dist2 = wminkowski(x, y, p=2, w=w)
assert_almost_equal(dist2, np.sqrt(5))
def test_euclidean(self):
for x, y in self.cases:
dist = euclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = sqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = cosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0/(np.sqrt(14)*np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0/3, -4.0/3, 5.0-7.0/3])
for x, y in self.cases:
dist = correlation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym)/(norm(xm)*norm(ym)))
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0],[1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
class TestSquareForm(TestCase):
def test_squareform_empty_matrix(self):
A = np.zeros((0,0))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_empty_vector(self):
v = np.zeros((0,))
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (1,1))
assert_equal(rv[0, 0], 0)
def test_squareform_1by1_matrix(self):
A = np.zeros((1,1))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_one_vector(self):
v = np.ones((1,)) * 8.3
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (2,2))
assert_equal(rv[0,1], 8.3)
assert_equal(rv[1,0], 8.3)
def test_squareform_one_binary_vector(self):
# Tests squareform on a 1x1 binary matrix; conversion to double was
# causing problems (see pull request 73).
v = np.ones((1,), dtype=np.bool)
rv = squareform(v)
assert_equal(rv.shape, (2,2))
assert_(rv[0,1])
def test_squareform_2by2_matrix(self):
A = np.zeros((2,2))
A[0,1] = 0.8
A[1,0] = 0.8
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (1,))
assert_equal(rA[0], 0.8)
def test_squareform_multi_matrix(self):
for n in xrange(2, 5):
yield self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in xrange(0, s[0]):
for j in xrange(i+1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
class TestNumObsY(TestCase):
def test_num_obs_y_multi_matrix(self):
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
assert_raises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in xrange(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM(TestCase):
def test_num_obs_dm_multi_matrix(self):
for n in xrange(1, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
assert_equal(num_obs_dm(A), n)
def test_num_obs_dm_0(self):
# Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
assert_(self.check_D(0))
def test_num_obs_dm_1(self):
# Tests num_obs_dm(D) on a 1x1 distance matrix.
assert_(self.check_D(1))
def test_num_obs_dm_2(self):
assert_(self.check_D(2))
def test_num_obs_dm_3(self):
assert_(self.check_D(2))
def test_num_obs_dm_4(self):
assert_(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM(TestCase):
def test_is_valid_dm_int16_array_E(self):
# Tests is_valid_dm(*) on an int16 array. Exception expected.
D = np.zeros((5, 5), dtype='i')
assert_raises(TypeError, is_valid_dm_throw, (D))
def test_is_valid_dm_int16_array_F(self):
D = np.zeros((5, 5), dtype='i')
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_1D_E(self):
D = np.zeros((5,), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
D = np.zeros((5,), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_3D_E(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_nonzero_diagonal_E(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_asymmetric_E(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_asymmetric_F(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_correct_1_by_1(self):
D = np.zeros((1,1), dtype=np.double)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_2_by_2(self):
y = np.random.rand(1)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_3_by_3(self):
y = np.random.rand(3)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_4_by_4(self):
y = np.random.rand(6)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_5_by_5(self):
y = np.random.rand(10)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY(TestCase):
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_int16_array_E(self):
y = np.zeros((10,), dtype='i')
assert_raises(TypeError, is_valid_y_throw, (y))
def test_is_valid_y_int16_array_F(self):
y = np.zeros((10,), dtype='i')
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3,3,), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3,3,), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
# Raise ValueError if p < 1.
p = 0.5
assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(canberra([1,2,3], [2,4,6]), 1)
assert_equal(canberra([1,1,0,0], [1,0,1,0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(braycurtis([1,2,3], [2,4,6]), 1./3, decimal=15)
assert_almost_equal(braycurtis([1,1,0,0], [1,0,1,0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(sqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(euclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
assert_almost_equal(euclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
assert_almost_equal(sqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
assert_almost_equal(sqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, euclidean, x, x)
assert_raises(ValueError, sqeuclidean, x, x)
# Another check, with random data.
rs = | np.random.RandomState(1234567890) | numpy.random.RandomState |
import argparse
import json
import logging
import numpy as np
import os
import sklearn.discriminant_analysis
import sklearn.ensemble
import sklearn.feature_selection
import sklearn.impute
import sklearn.linear_model
import sklearn.naive_bayes
import sklearn.neighbors
import sklearn.neural_network
import sklearn.preprocessing
import sklearn.svm
import sklearn.tree
import lccv
import openml
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--output_directory', type=str, help='directory to store output',
default=os.path.expanduser('~') + '/experiments/lccv/')
parser.add_argument('--job_idx', type=int, default=None)
parser.add_argument('--verbose', type=bool, default=False)
parser.add_argument('--study_id', type=str, default=271)
return parser.parse_args()
learners = [
sklearn.svm.LinearSVC(),
sklearn.tree.DecisionTreeClassifier(),
sklearn.tree.ExtraTreeClassifier(),
sklearn.linear_model.LogisticRegression(),
sklearn.linear_model.PassiveAggressiveClassifier(),
sklearn.linear_model.Perceptron(),
sklearn.linear_model.RidgeClassifier(),
sklearn.linear_model.SGDClassifier(),
sklearn.neural_network.MLPClassifier(),
sklearn.discriminant_analysis.LinearDiscriminantAnalysis(),
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis(),
sklearn.naive_bayes.BernoulliNB(),
sklearn.naive_bayes.MultinomialNB(),
sklearn.neighbors.KNeighborsClassifier(),
sklearn.ensemble.ExtraTreesClassifier(),
sklearn.ensemble.RandomForestClassifier(),
sklearn.ensemble.GradientBoostingClassifier(),
]
def highest_2power_below(n) -> int:
p = int(np.log2(n * .9))
return int(pow(2, p) / 9 * 10)
def clf_as_pipeline(clf, numeric_indices, nominal_indices):
numeric_transformer = sklearn.pipeline.make_pipeline(
sklearn.impute.SimpleImputer(),
sklearn.preprocessing.StandardScaler())
# note that the dataset is encoded numerically, hence we can only impute
# numeric values, even for the categorical columns.
categorical_transformer = sklearn.pipeline.make_pipeline(
sklearn.impute.SimpleImputer(strategy='constant', fill_value=-1),
sklearn.preprocessing.OneHotEncoder(handle_unknown='ignore'))
transformer = sklearn.compose.ColumnTransformer(
transformers=[
('numeric', numeric_transformer, numeric_indices),
('nominal', categorical_transformer, nominal_indices)],
remainder='passthrough')
pipeline = sklearn.pipeline.make_pipeline(transformer,
sklearn.feature_selection.VarianceThreshold(),
clf)
return pipeline
def run_classifier_on_task(
learner_idx: int,
task: openml.tasks.OpenMLSupervisedTask,
output_directory: str, verbose: bool):
nominal_indices = task.get_dataset().get_features_by_type('nominal', [task.target_name])
numeric_indices = task.get_dataset().get_features_by_type('numeric', [task.target_name])
clf = clf_as_pipeline(learners[learner_idx], numeric_indices, nominal_indices)
x, y = task.get_X_and_y(dataset_format='array')
unique, counts = np.unique(y, return_counts=True)
logging.info('class dist (all): %s' % dict(zip(unique, counts)))
size_big = highest_2power_below(len(x))
indices_big = np.random.permutation(np.arange(len(x)))[:size_big]
indices_small = indices_big[:int(size_big/2)]
x_big, y_big = x[indices_big], y[indices_big]
unique_big, counts_big = np.unique(y_big, return_counts=True)
logging.info('class dist (big): %s' % dict(zip(unique_big, counts_big)))
x_small, y_small = x[indices_small], y[indices_small]
unique_small, counts_small = np.unique(y_small, return_counts=True)
logging.info('class dist (small): %s' % dict(zip(unique_small, counts_small)))
output_dir = os.path.join(output_directory, str(task.task_id))
os.makedirs(output_dir, exist_ok=True)
filename = 'result_%s.json' % str(learners[learner_idx]) # do not use full pipeline name
if os.path.isfile(os.path.join(output_dir, filename)):
logging.info('clf %s on dataset %s already exists' % (str(learners[learner_idx]), task.get_dataset().name))
return
logging.info('dataset: %s, shape: %s > %s > %s' % (task.get_dataset().name,
x.shape, x_big.shape,
x_small.shape))
results_lccv = lccv.lccv(clf, x_small, y_small,
enforce_all_anchor_evaluations=True, verbose=verbose)
prediction = results_lccv[3].get_ipl_estimate_at_target(size_big)
cv_big = sklearn.model_selection.cross_val_score(
clf, x_big, y_big, cv=10, scoring='accuracy')
cv_small = sklearn.model_selection.cross_val_score(
clf, x_small, y_small, cv=10, scoring='accuracy')
all_results = {
'sizes': [int(size_big/2), size_big],
'lccv': results_lccv[2],
'cv': {
len(x_small): {
'n': 10,
'mean': np.mean(1 - cv_small),
'std': np.std(1 - cv_small)
},
len(x_big): {
'n': 10,
'mean': | np.mean(1 - cv_big) | numpy.mean |
import unittest
import numpy as np
import time
import torch
from potts_deepflow import Potts_MAP1d,Potts_MAP2d,Potts_MAP3d
from potts_deepflow import Potts_Mean1d,Potts_Mean2d,Potts_Mean3d
b=1
c=3
x=2**12
epsilon = 0.01
def get_size_into(d):
x_used = int(x**(1/d)+0.001)
return tuple( [b,c]+[x_used for i in range(d)] ), tuple( [1,c]+[x_used for i in range(d)] ), tuple([i+2 for i in range(d)])
def test_no_smoothness(d,device,asserter):
print("Testing (no smoothness) \t Dim: " +str(d)+ " \t Dev: " + device)
size_info, size_red_info, axes = get_size_into(d)
data_t = np.random.normal(0,1,size=size_info).astype(np.float32)
data_w = np.random.normal(0,1,size=size_info).astype(np.float32)
data_rx = np.zeros(shape=size_info).astype(np.float32)
if d > 1:
data_ry = np.zeros(shape=size_info).astype(np.float32)
if d > 2:
data_rz = np.zeros(shape=size_info).astype(np.float32)
t = torch.tensor(data_t, device=torch.device(device))
t.requires_grad = True
w = torch.tensor(data_w, device=torch.device(device))
rx = torch.tensor(data_rx, device=torch.device(device))
rx.requires_grad = True
if d > 1:
ry = torch.tensor(data_ry, device=torch.device(device))
ry.requires_grad = True
if d > 2:
rz = torch.tensor(data_rz, device=torch.device(device))
rz.requires_grad = True
if d == 1:
oa = torch.exp(Potts_MAP1d.apply(t,rx))
om = Potts_Mean1d.apply(t,rx)
elif d == 2:
oa = torch.exp(Potts_MAP2d.apply(t,rx,ry))
om = Potts_Mean2d.apply(t,rx,ry)
elif d == 3:
oa = torch.exp(Potts_MAP3d.apply(t,rx,ry,rz))
om = Potts_Mean3d.apply(t,rx,ry,rz)
loss = torch.sum(w*om)
loss.backward()
oa_np = oa.detach().cpu().numpy()
om_np = om.detach().cpu().numpy()
ot_np = t.grad.detach().cpu().numpy()
#make sure not nan
asserter.assertFalse(np.any(np.isnan(oa_np)))
asserter.assertFalse(np.any(np.isnan(om_np)))
asserter.assertFalse(np.any(np.isnan(ot_np)))
#resize into more usable form
dt_np_l = [data_t[0,i,...].flatten() for i in range(c)]
dw_np_l = [data_w[0,i,...].flatten() for i in range(c)]
oa_np_l = [oa_np[0,i,...].flatten() for i in range(c)]
om_np_l = [om_np[0,i,...].flatten() for i in range(c)]
ot_np_l = [ot_np[0,i,...].flatten() for i in range(c)]
x_space = len(dt_np_l[0])
#ensure MAP assigns 1 to highest term and 0 to everything else
for i in range(x_space):
highest = max([o[i] for o in dt_np_l])
for ic in range(c):
if(dt_np_l[ic][i] == highest and oa_np_l[ic][i] < 0.5):
raise Exception(str(dt_np_l[ic][i])+"\t"+str([o[i] for o in dt_np_l])+"\t"+str(highest)+"\t"+str(oa_np_l[ic][i])+"\t"+str([o[i] for o in oa_np_l]))
if(dt_np_l[ic][i] < highest - epsilon and oa_np_l[ic][i] > 0.5):
raise Exception(str(dt_np_l[ic][i])+"\t"+str([o[i] for o in dt_np_l])+"\t"+str(highest)+"\t"+str(oa_np_l[ic][i])+"\t"+str([o[i] for o in oa_np_l]))
#ensure mean pass is equivalent to the data terms only
for i in range(c):
for val_df, val_d in zip(om_np_l[i],dt_np_l[i]):
if(abs(val_df-val_d) > epsilon):
raise Exception(str(val_df) + "\t" + str(val_d))
#ensure gradient wrt data terms are passed immediately through
for i in range(c):
for val_df, val_d in zip(ot_np_l[i],dw_np_l[i]):
if(abs(val_df-val_d) > epsilon):
raise Exception(str(val_df) + "\t" + str(val_d))
def test_smoothness_dom(d,device,asserter):
print("Testing (smoothness dom.) \t Dim: " +str(d)+ " \t Dev: " + device)
size_info, size_red_info, axes = get_size_into(d)
winner = int(np.random.uniform()*c)
data_t = 1*np.random.uniform(0,1,size=size_info).astype(np.float32)
data_t[:,winner,...] = 0.75
data_r = 100+0*np.random.uniform(size=size_info).astype(np.float32)
t = torch.tensor(data_t, device=torch.device(device))
r = torch.tensor(data_r, device=torch.device(device))
if d == 1:
oa = torch.exp(Potts_MAP1d.apply(t,r))
om = Potts_Mean1d.apply(t,r)
elif d == 2:
oa = torch.exp(Potts_MAP2d.apply(t,r,r))
om = Potts_Mean2d.apply(t,r,r)
elif d == 3:
oa = torch.exp(Potts_MAP3d.apply(t,r,r,r))
om = Potts_Mean3d.apply(t,r,r,r)
oa_np = oa.detach().cpu().numpy()
om_np = om.detach().cpu().numpy()
#make sure not nan
asserter.assertFalse(np.any(np.isnan(oa_np)))
asserter.assertFalse(np.any(np.isnan(om_np)))
#resize into more usable form
dt_np_l = [data_t[0,i,...].flatten() for i in range(c)]
oa_np_l = [oa_np[0,i,...].flatten() for i in range(c)]
om_np_l = [om_np[0,i,...].flatten() for i in range(c)]
x_space = len(dt_np_l[0])
#ensure MAP assigns 1 to highest terms
sums = [np.sum(o) for o in dt_np_l]
highest = max(sums)
for i in range(c):
for val_df in oa_np_l[i]:
if(sums[i] == highest and val_df < 0.5):
raise Exception(str(sums[i])+ "\t" + str(sums) + "\t" + str(val_df))
if(sums[i] < highest and val_df > 0.5):
raise Exception(str(sums[i])+ "\t" + str(sums) + "\t" + str(val_df))
def test_device_equivalence(d,device_list,asserter):
print("Testing (dev equiv.) \t Dim: " +str(d)+ " \t Dev:",device_list)
size_info, size_red_info, axes = get_size_into(d)
data_t = np.random.normal(0,1,size=size_info).astype(np.float32)
data_w = np.random.normal(0,1,size=size_info).astype(np.float32)
data_rx = (0.25* | np.random.uniform(size=size_info) | numpy.random.uniform |
from collections import OrderedDict
import numpy as np
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
from PIL import Image, ImageFilter
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import seaborn as sns
import sys
import pandas as pd
class PerturbationScores:
col_prediction = "prediction"
col_prediction_t = "prediction_t"
col_a_PIR = "a_PIR"
col_b_PIR = "b_PIR"
col_PIR = "PIR"
col_nPIR = "nPIR"
col_a_PIRP = "a_PIRP"
col_b_PIRP = "b_PIRP"
col_PIRP = "PIRP"
col_nPIRP = "nPIRP"
def __init__(self, P_o, P_t, coi):
self.coi = coi
self.P_o = P_o
self.p_o = self.P_o[self.coi]
if P_t is not None:
self.P_t = P_t
self.p_t = self.P_t[self.coi]
else:
self.P_t = None
self.p_t = float('NaN')
self.PIR = float('NaN')
self.PIRP = float('NaN')
self.nPIR = float('NaN')
self.nPIRP = float('NaN')
self.a_pir, self.b_pir = float('NaN'), float('NaN')
self.classes_npir = float('NaN')
self.w_c_npir = float('NaN')
self.pirp_coi = float('NaN')
self.pirp_no_coi = float('NaN')
self.a_pirp, self.b_pirp = float('NaN'), float('NaN')
@staticmethod
def softsign_norm(x):
x_n = x / (1 + abs(x))
return x_n
@staticmethod
def relu(x):
if x >= 0:
return x
else:
return 0.0
@staticmethod
def _get_a_b(p_o, p_t):
a = (1 - p_o / p_t)
if a == float('inf'):
a = sys.float_info.max
elif a == float('-inf'):
a = -sys.float_info.max
b = (1 - p_t / p_o)
if b == float('inf'):
b = sys.float_info.max
elif b == float('-inf'):
b = -sys.float_info.max
return a, b
@staticmethod
def compute_influence_relation(p_o, p_t):
a, b = PerturbationScores._get_a_b(p_o, p_t)
return (p_t * b) - (p_o * a)
@staticmethod
def compute_perturbation_influence_relation(p_o, p_t):
return PerturbationScores.compute_influence_relation(p_o, p_t)
@staticmethod
def compute_perturbation_influence_relation_normalized(p_o, p_t):
PIR = PerturbationScores.compute_perturbation_influence_relation(p_o, p_t)
return PerturbationScores.softsign_norm(PIR)
@staticmethod
def compute_npir_for_all_classes(P_o, P_t):
classes_npir = [PerturbationScores.compute_perturbation_influence_relation_normalized(p_o, p_t) for p_o, p_t in zip(P_o, P_t)]
return classes_npir
@staticmethod
def weighted_classes_npir(classes_npir, weights):
return classes_npir * weights
@staticmethod
def pirp_coi(w_c_npir, coi):
pirp_coi = abs(w_c_npir[coi])
return pirp_coi
@staticmethod
def pirp_no_coi(w_c_npir, coi):
w_c_npir_no_coi = w_c_npir.copy()
w_c_npir_no_coi[coi] = 0.0
w_c_npir_no_coi = [PerturbationScores.relu(wir) for wir in w_c_npir_no_coi]
pirp_no_coi = sum(w_c_npir_no_coi)
return pirp_no_coi
@staticmethod
def compute_perturbation_influence_relation_precision(P_o, P_t, coi):
classes_npir = PerturbationScores.compute_npir_for_all_classes(P_o, P_t)
w_c_npir = PerturbationScores.weighted_classes_npir(classes_npir, P_o)
pirp_coi = PerturbationScores.pirp_coi(w_c_npir, coi)
pirp_no_coi = PerturbationScores.pirp_no_coi(w_c_npir, coi)
return PerturbationScores.compute_influence_relation(pirp_coi, pirp_no_coi)
@staticmethod
def compute_perturbation_influence_relation_precision_normalized(P_o, P_t, coi):
"""
se new_irp_simm > 0 -> la feature è precisa nella la classe in esame \n
se new_irp_simm = 0 -> la feature non è precisa nella la classe in esame ma impatta anche altre classi \n
se new_irp_simm < 0 -> la feature non è precisa nella la classe in esame e impatta maggiormente altre classi \n\n
:param P_o:
:param P_t:
:param coi:
:return:
"""
pirp = PerturbationScores.compute_perturbation_influence_relation_precision(P_o, P_t, coi)
return PerturbationScores.softsign_norm(pirp)
def compute_scores(self):
if self.P_t is None:
return self
self.PIR = PerturbationScores.compute_perturbation_influence_relation(self.p_o, self.p_t)
self.nPIR = PerturbationScores.compute_perturbation_influence_relation_normalized(self.p_o, self.p_t)
self.PIRP = PerturbationScores.compute_perturbation_influence_relation_precision(self.P_o, self.P_t, self.coi)
self.nPIRP = PerturbationScores.compute_perturbation_influence_relation_precision_normalized(self.P_o, self.P_t, self.coi)
self.a_pir, self.b_pir = PerturbationScores._get_a_b(self.p_o, self.p_t)
self.classes_npir = PerturbationScores.compute_npir_for_all_classes(self.P_o, self.P_t)
self.w_c_npir = PerturbationScores.weighted_classes_npir(self.classes_npir, self.P_o)
self.pirp_coi = PerturbationScores.pirp_coi(self.w_c_npir, self.coi)
self.pirp_no_coi = PerturbationScores.pirp_no_coi(self.w_c_npir, self.coi)
self.a_pirp, self.b_pirp = PerturbationScores._get_a_b(self.pirp_coi, self.pirp_no_coi)
return self
def get_scores_dict(self):
scores_dict = OrderedDict()
scores_dict[self.col_prediction] = float(self.p_o)
scores_dict[self.col_prediction_t] = float(self.p_t)
# PIR - Perturbation Influence Relation
scores_dict[self.col_a_PIR] = float(self.a_pir)
scores_dict[self.col_b_PIR] = float(self.b_pir)
scores_dict[self.col_PIR] = float(self.PIR)
scores_dict[self.col_nPIR] = float(self.nPIR)
# PIRP - Perturbation Influence Relation Precision
scores_dict[self.col_a_PIRP] = float(self.a_pirp)
scores_dict[self.col_b_PIRP] = float(self.b_pirp)
scores_dict[self.col_PIRP] = float(self.PIRP)
scores_dict[self.col_nPIRP] = float(self.nPIRP)
return scores_dict
def __str__(self):
return str(self.get_scores_dict())
class LocalExplanation:
def __init__(self, input_image, class_of_interest, features_map, model, preprocess_func):
self.input_image = input_image
self.class_of_interest = class_of_interest
self.features_map = features_map
self.model=model
self.preprocess_func = preprocess_func
self.cmap = sns.diverging_palette(20, 130, l=60, as_cmap=True) # 20 -> red, 130 -> green
self.original_predictions = self.predict_with_model(input_image)
self.feature_ids = np.unique(self.features_map)
self.perturbations = {}
self.numerical_explanation = {}
self.visual_explanation = None
self.informativeness = None
def predict_with_model(self, img):
if self.preprocess_func:
input_image_arr = self.preprocess_func(img)
else:
raise Exception("Preprocessing function not provided. You should always provide the same preprocess function used on the input image.")
p = self.model.predict(input_image_arr)
return p[0]
@staticmethod
def _find_centroid(init_x, X, n_iter=10):
new_x = init_x
f_X = X
for i in range(n_iter):
dist = np.linalg.norm(f_X - new_x, axis=1)
if f_X[dist < np.mean(dist)].__len__() > 0:
f_X = f_X[dist < np.mean(dist)]
else:
break
new_x = np.percentile(f_X, 50, interpolation="nearest", axis=0).astype(np.int64)
return new_x
@staticmethod
def _color_palette():
return (np.array(sns.color_palette("Set3")) * 255).astype(np.uint8)
@staticmethod
def _color_rgba(i, alpha=255):
r, g, b = LocalExplanation._color_palette()[i, :]
return (r, g, b, alpha)
def show_features_map(self):
colored_feature_map = np.zeros((self.features_map.shape[0], self.features_map.shape[0], 4), np.int64)
coordinates = []
for f_idx in self.feature_ids:
feature_map_mask = np.zeros((self.features_map.shape[0], self.features_map.shape[0], 4), np.int64) # init empty mask
c = self._color_rgba(f_idx - 1) # color
f = self._get_feature_mask(f_idx)
x_coors, y_coors = np.where(f == 255)
y_coor, x_coor = np.percentile(x_coors, 50).astype(int), np.percentile(y_coors, 50).astype(int)
coors = np.array([x_coors, y_coors]).transpose(1, 0)
coor = np.array([(x_coor, y_coor)])
coor = self._find_centroid(coor, coors)
y_coor, x_coor = coor[0], coor[1]
# set_color
feature_map_mask[x_coors, y_coors] = c
colored_feature_map += feature_map_mask
coordinates.append((f_idx, x_coor, y_coor, c))
fig, ax = plt.subplots(figsize=(5, 5), dpi=90)
fig.tight_layout()
ax.imshow(colored_feature_map)
for fid, x_coor, y_coor, c in coordinates:
ax.text(x_coor, y_coor, f"{fid}", color="black", ha='center', va='center', fontsize=26)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(f"Features map")
return ax
def show_visual_explanation(self, color_bar=True):
if self.visual_explanation:
fig, ax = plt.subplots(figsize=(5, 5))
fig.tight_layout()
ax.imshow(self.visual_explanation)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
if color_bar:
cb_ax = fig.add_axes([1, 0.12, 0.05, 0.8])
cb = matplotlib.colorbar.ColorbarBase(cb_ax, cmap=self.cmap,
norm=matplotlib.colors.Normalize(vmin=-1, vmax=1),
orientation='vertical')
cb.set_label(PerturbationScores.col_nPIR)
return ax
else:
raise Exception("Fit explanation first.")
def show_numerical_explanation(self):
if self.numerical_explanation:
_df = pd.DataFrame(self.numerical_explanation).T.sort_index()
fig, ax = plt.subplots(figsize=(5, 5), dpi=90)
fig.tight_layout()
_df[["nPIR", "nPIRP"]].plot(kind="bar", ax=ax, rot=0)
ax.set_xlabel("Feature id")
ax.set_title("Class of interest: " + str(self.class_of_interest))
ax.set_ylim(-1, 1)
ax.grid(True)
return ax
else:
raise Exception("Fit explanation first.")
def get_perturbed_input(self, feature_mask):
im = self.input_image.copy()
blurred_image = im.filter(ImageFilter.GaussianBlur(10))
im.paste(blurred_image, mask=feature_mask)
return im
def _get_feature_mask(self, f_idx):
f = self.features_map.reshape(-1).copy()
f = (f == f_idx).astype(np.uint8) * 255
f = f.reshape(self.features_map.shape)
return f
def get_feature_mask_image(self, f_idx):
f = self._get_feature_mask(f_idx)
f_map_img = Image.fromarray(f, mode="L")
return f_map_img
def _compute_perturbations(self):
for f_idx in self.feature_ids:
feature_mask = self.get_feature_mask_image(f_idx)
perturbed_image = self.get_perturbed_input(feature_mask)
self.perturbations[f_idx] = perturbed_image
return self
@staticmethod
def _get_visual_explanation_mask(heatmap, cmap):
norm = matplotlib.colors.Normalize(vmin=-1.0, vmax=1.0)
colors = plt.cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba(heatmap)
mask = Image.fromarray((colors*255).astype(np.uint8), mode="RGBA")
return mask
@staticmethod
def _apply_mask(image, heatmap):
res = Image.blend(image, heatmap, alpha=.85)
return res
def _explain(self):
# numerical explanation
for f_idx, img_p in self.perturbations.items():
predictions_p = self.predict_with_model(img_p)
perturbation_scores = PerturbationScores(self.original_predictions, predictions_p, self.class_of_interest)
perturbation_scores.compute_scores()
self.numerical_explanation[f_idx] = perturbation_scores.get_scores_dict()
# visual explanation
heatmap_nPIR = | np.zeros(self.features_map.shape, np.float32) | numpy.zeros |
import numpy
import pytest
from chainer_chemistry.saliency.visualizer.visualizer_utils import abs_max_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import min_max_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import normalize_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import red_blue_cmap # NOQA
def test_abs_max_scaler():
saliency = numpy.array([1., 2., 3.])
result = abs_max_scaler(saliency)
expected = numpy.array([1. / 3, 2. / 3., 1.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = abs_max_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_min_max_scaler():
saliency = | numpy.array([1., -3., 3.]) | numpy.array |
"""Utilities for plotting."""
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
from matplotlib import artist
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import Line3D, Text3D, Poly3DCollection, Line3DCollection
from .transformations import transform
from .rotations import unitx, unitz, perpendicular_to_vectors, norm_vector
class Frame(artist.Artist):
"""A Matplotlib artist that displays a frame represented by its basis.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
s : float, optional (default: 1)
Length of basis vectors
Other arguments except 'c' and 'color' are passed on to Line3D.
"""
def __init__(self, A2B, label=None, s=1.0, **kwargs):
super(Frame, self).__init__()
if "c" in kwargs:
kwargs.pop("c")
if "color" in kwargs:
kwargs.pop("color")
self.s = s
self.x_axis = Line3D([], [], [], color="r", **kwargs)
self.y_axis = Line3D([], [], [], color="g", **kwargs)
self.z_axis = Line3D([], [], [], color="b", **kwargs)
self.draw_label = label is not None
self.label = label
if self.draw_label:
self.label_indicator = Line3D([], [], [], color="k", **kwargs)
self.label_text = Text3D(0, 0, 0, text="", zdir="x")
self.set_data(A2B, label)
def set_data(self, A2B, label=None):
"""Set the transformation data.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
"""
R = A2B[:3, :3]
p = A2B[:3, 3]
for d, b in enumerate([self.x_axis, self.y_axis, self.z_axis]):
b.set_data(np.array([p[0], p[0] + self.s * R[0, d]]),
np.array([p[1], p[1] + self.s * R[1, d]]))
b.set_3d_properties(np.array([p[2], p[2] + self.s * R[2, d]]))
if self.draw_label:
if label is None:
label = self.label
label_pos = p + 0.5 * self.s * (R[:, 0] + R[:, 1] + R[:, 2])
self.label_indicator.set_data(
np.array([p[0], label_pos[0]]),
np.array([p[1], label_pos[1]]))
self.label_indicator.set_3d_properties(
np.array([p[2], label_pos[2]]))
self.label_text.set_text(label)
self.label_text.set_position([label_pos[0], label_pos[1]])
self.label_text.set_3d_properties(label_pos[2], zdir="x")
@artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""Draw the artist."""
for b in [self.x_axis, self.y_axis, self.z_axis]:
b.draw(renderer, *args, **kwargs)
if self.draw_label:
self.label_indicator.draw(renderer, *args, **kwargs)
self.label_text.draw(renderer, *args, **kwargs)
super(Frame, self).draw(renderer, *args, **kwargs)
def add_frame(self, axis):
"""Add the frame to a 3D axis."""
for b in [self.x_axis, self.y_axis, self.z_axis]:
axis.add_line(b)
if self.draw_label:
axis.add_line(self.label_indicator)
axis._add_text(self.label_text)
class LabeledFrame(Frame):
"""Displays a frame represented by its basis with axis labels.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
s : float, optional (default: 1)
Length of basis vectors
Other arguments except 'c' and 'color' are passed on to Line3D.
"""
def __init__(self, A2B, label=None, s=1.0, **kwargs):
self.x_label = Text3D(0, 0, 0, text="", zdir="x")
self.y_label = Text3D(0, 0, 0, text="", zdir="x")
self.z_label = Text3D(0, 0, 0, text="", zdir="x")
super(LabeledFrame, self).__init__(A2B, label=None, s=1.0, **kwargs)
def set_data(self, A2B, label=None):
"""Set the transformation data.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
label : str, optional (default: None)
Name of the frame
"""
super(LabeledFrame, self).set_data(A2B, label)
R = A2B[:3, :3]
p = A2B[:3, 3]
x_label_location = p + 1.1 * self.s * R[:, 0]
y_label_location = p + 1.1 * self.s * R[:, 1]
z_label_location = p + 1.1 * self.s * R[:, 2]
self.x_label.set_text("x")
self.x_label.set_position(x_label_location[:2])
self.x_label.set_3d_properties(x_label_location[2], zdir="x")
self.y_label.set_text("y")
self.y_label.set_position(y_label_location[:2])
self.y_label.set_3d_properties(y_label_location[2], zdir="x")
self.z_label.set_text("z")
self.z_label.set_position(z_label_location[:2])
self.z_label.set_3d_properties(z_label_location[2], zdir="x")
@artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""Draw the artist."""
self.x_label.draw(renderer, *args, **kwargs)
self.y_label.draw(renderer, *args, **kwargs)
self.z_label.draw(renderer, *args, **kwargs)
super(LabeledFrame, self).draw(renderer, *args, **kwargs)
def add_frame(self, axis):
"""Add the frame to a 3D axis."""
super(LabeledFrame, self).add_frame(axis)
axis._add_text(self.x_label)
axis._add_text(self.y_label)
axis._add_text(self.z_label)
class Trajectory(artist.Artist):
"""A Matplotlib artist that displays a trajectory.
Parameters
----------
H : array-like, shape (n_steps, 4, 4)
Sequence of poses represented by homogeneous matrices
show_direction : bool, optional (default: True)
Plot an arrow to indicate the direction of the trajectory
n_frames : int, optional (default: 10)
Number of frames that should be plotted to indicate the rotation
s : float, optional (default: 1)
Scaling of the frames that will be drawn
Other arguments are passed onto Line3D.
"""
def __init__(self, H, show_direction=True, n_frames=10, s=1.0, **kwargs):
super(Trajectory, self).__init__()
self.show_direction = show_direction
self.trajectory = Line3D([], [], [], **kwargs)
self.key_frames = [Frame(np.eye(4), s=s, **kwargs)
for _ in range(n_frames)]
if self.show_direction:
self.direction_arrow = Arrow3D(
[0, 0], [0, 0], [0, 0],
mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
self.set_data(H)
def set_data(self, H):
"""Set the trajectory data.
Parameters
----------
H : array-like, shape (n_steps, 4, 4)
Sequence of poses represented by homogeneous matrices
"""
positions = H[:, :3, 3]
self.trajectory.set_data(positions[:, 0], positions[:, 1])
self.trajectory.set_3d_properties(positions[:, 2])
key_frames_indices = np.linspace(
0, len(H) - 1, len(self.key_frames), dtype=np.int)
for i, key_frame_idx in enumerate(key_frames_indices):
self.key_frames[i].set_data(H[key_frame_idx])
if self.show_direction:
start = 0.8 * positions[0] + 0.2 * positions[-1]
end = 0.2 * positions[0] + 0.8 * positions[-1]
self.direction_arrow.set_data(
[start[0], end[0]], [start[1], end[1]], [start[2], end[2]])
@artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""Draw the artist."""
self.trajectory.draw(renderer, *args, **kwargs)
for key_frame in self.key_frames:
key_frame.draw(renderer, *args, **kwargs)
if self.show_direction:
self.direction_arrow.draw(renderer)
super(Trajectory, self).draw(renderer, *args, **kwargs)
def add_trajectory(self, axis):
"""Add the trajectory to a 3D axis."""
axis.add_line(self.trajectory)
for key_frame in self.key_frames:
key_frame.add_frame(axis)
if self.show_direction:
axis.add_artist(self.direction_arrow)
class Arrow3D(FancyArrowPatch): # http://stackoverflow.com/a/11156353/915743
"""A Matplotlib patch that represents an arrow in 3D."""
def __init__(self, xs, ys, zs, *args, **kwargs):
super(Arrow3D, self).__init__((0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def set_data(self, xs, ys, zs):
"""Set the arrow data.
Parameters
----------
xs : iterable
List of x positions
ys : iterable
List of y positions
zs : iterable
List of z positions
"""
self._verts3d = xs, ys, zs
def draw(self, renderer):
"""Draw the patch."""
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
super(Arrow3D, self).draw(renderer)
def make_3d_axis(ax_s, pos=111):
"""Generate new 3D axis.
Parameters
----------
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
pos : int, optional (default: 111)
Position indicator (nrows, ncols, plot_number)
Returns
-------
ax : Matplotlib 3d axis
New axis
"""
try:
ax = plt.subplot(pos, projection="3d", aspect="equal")
except NotImplementedError:
# HACK: workaround for bug in new matplotlib versions (ca. 3.02):
# "It is not currently possible to manually set the aspect"
ax = plt.subplot(pos, projection="3d")
plt.setp(ax, xlim=(-ax_s, ax_s), ylim=(-ax_s, ax_s), zlim=(-ax_s, ax_s),
xlabel="X", ylabel="Y", zlabel="Z")
return ax
def plot_vector(ax=None, start=np.zeros(3), direction=np.array([1, 0, 0]), s=1.0, arrowstyle="simple", ax_s=1, **kwargs):
"""Plot Vector.
Draws an arrow from start to start + s * direction.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
start : array-like, shape (3,), optional (default: [0, 0, 0])
Start of the vector
direction : array-like, shape (3,), optional (default: [0, 0, 0])
Direction of the vector
s : float, optional (default: 1)
Scaling of the vector that will be drawn
arrowstyle : str, or ArrowStyle, optional (default: 'simple')
See matplotlib's documentation of arrowstyle in
matplotlib.patches.FancyArrowPatch for more options
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
kwargs : dict, optional (default: {})
Additional arguments for the plotting functions, e.g. alpha
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
axis_arrow = Arrow3D(
[start[0], start[0] + s * direction[0]],
[start[1], start[1] + s * direction[1]],
[start[2], start[2] + s * direction[2]],
mutation_scale=20, arrowstyle=arrowstyle, **kwargs)
ax.add_artist(axis_arrow)
return ax
def plot_length_variable(ax=None, start=np.zeros(3), end=np.ones(3), name="l", above=False, ax_s=1, color="k", **kwargs):
"""Plot length with text at its center.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
start : array-like, shape (3,), optional (default: [0, 0, 0])
Start point
end : array-like, shape (3,), optional (default: [1, 1, 1])
End point
name : str, optional (default: 'l')
Text in the middle
above : bool, optional (default: False)
Plot name above line
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
color : str, optional (default: black)
Color in which the cylinder should be plotted
kwargs : dict, optional (default: {})
Additional arguments for the text, e.g. fontsize
"""
if ax is None:
ax = make_3d_axis(ax_s)
direction = end - start
length = np.linalg.norm(direction)
if above:
ax.plot([start[0], end[0]], [start[1], end[1]], [start[2], end[2]], color=color)
else:
mid1 = start + 0.4 * direction
mid2 = start + 0.6 * direction
ax.plot([start[0], mid1[0]], [start[1], mid1[1]], [start[2], mid1[2]], color=color)
ax.plot([end[0], mid2[0]], [end[1], mid2[1]], [end[2], mid2[2]], color=color)
if np.linalg.norm(direction / length - unitz) < np.finfo(float).eps:
axis = unitx
else:
axis = unitz
mark = norm_vector(perpendicular_to_vectors(direction, axis)) * 0.03 * length
mark_start1 = start + mark
mark_start2 = start - mark
mark_end1 = end + mark
mark_end2 = end - mark
ax.plot([mark_start1[0], mark_start2[0]],
[mark_start1[1], mark_start2[1]],
[mark_start1[2], mark_start2[2]],
color=color)
ax.plot([mark_end1[0], mark_end2[0]],
[mark_end1[1], mark_end2[1]],
[mark_end1[2], mark_end2[2]],
color=color)
text_location = start + 0.45 * direction
if above:
text_location[2] += 0.3 * length
ax.text(text_location[0], text_location[1], text_location[2], name, zdir="x", **kwargs)
return ax
def plot_box(ax=None, size=np.ones(3), A2B=np.eye(4), ax_s=1, wireframe=True, color="k", alpha=1.0):
"""Plot box.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
size : array-like, shape (3,), optional (default: [1, 1, 1])
Size of the box per dimension
A2B : array-like, shape (4, 4)
Center of the box
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of cylinder and surface otherwise
color : str, optional (default: black)
Color in which the cylinder should be plotted
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
corners = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]
])
corners = (corners - 0.5) * size
corners = transform(
A2B, np.hstack((corners, np.ones((len(corners), 1)))))[:, :3]
if wireframe:
for i, j in [(0, 1), (0, 2), (1, 3), (2, 3),
(4, 5), (4, 6), (5, 7), (6, 7),
(0, 4), (1, 5), (2, 6), (3, 7)]:
ax.plot([corners[i, 0], corners[j, 0]],
[corners[i, 1], corners[j, 1]],
[corners[i, 2], corners[j, 2]],
c=color, alpha=alpha)
else:
p3c = Poly3DCollection(np.array([
[corners[0], corners[1], corners[2]],
[corners[1], corners[2], corners[3]],
[corners[4], corners[5], corners[6]],
[corners[5], corners[6], corners[7]],
[corners[0], corners[1], corners[4]],
[corners[1], corners[4], corners[5]],
[corners[2], corners[6], corners[7]],
[corners[2], corners[3], corners[7]],
[corners[0], corners[4], corners[6]],
[corners[0], corners[2], corners[6]],
[corners[1], corners[5], corners[7]],
[corners[1], corners[3], corners[7]],
]))
p3c.set_alpha(alpha)
p3c.set_facecolor(color)
ax.add_collection3d(p3c)
return ax
def plot_sphere(ax=None, radius=1.0, p=np.zeros(3), ax_s=1, wireframe=True, n_steps=100, color="k", alpha=1.0):
"""Plot cylinder.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
radius : float, optional (default: 1)
Radius of the sphere
p : array-like, shape (3,), optional (default: [0, 0, 0])
Center of the sphere
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of cylinder and surface otherwise
n_steps : int, optional (default: 100)
Number of discrete steps plotted in each dimension
color : str, optional (default: black)
Color in which the cylinder should be plotted
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
phi, theta = np.mgrid[0.0:np.pi:n_steps * 1j, 0.0:2.0 * np.pi:n_steps * 1j]
x = p[0] + radius * np.sin(phi) * np.cos(theta)
y = p[1] + radius * np.sin(phi) * np.sin(theta)
z = p[2] + radius * np.cos(phi)
if wireframe:
ax.plot_wireframe(x, y, z, rstride=10, cstride=10, color=color, alpha=alpha)
else:
ax.plot_surface(x, y, z, color=color, alpha=alpha, linewidth=0)
return ax
def plot_cylinder(ax=None, length=1.0, radius=1.0, thickness=0.0, A2B=np.eye(4), ax_s=1, wireframe=True, n_steps=100, alpha=1.0, color="k"):
"""Plot cylinder.
Parameters
----------
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
length : float, optional (default: 1)
Length of the cylinder
radius : float, optional (default: 1)
Radius of the cylinder
thickness : float, optional (default: 0)
Thickness of a cylindrical shell. It will be subtracted from the
outer radius to obtain the inner radius. The difference must be
greater than 0.
A2B : array-like, shape (4, 4)
Center of the cylinder
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe of cylinder and surface otherwise
n_steps : int, optional (default: 100)
Number of discrete steps plotted in each dimension
alpha : float, optional (default: 1)
Alpha value of the mesh that will be plotted
color : str, optional (default: black)
Color in which the cylinder should be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
inner_radius = radius - thickness
if inner_radius <= 0.0:
raise ValueError("Thickness of cylindrical shell results in "
"invalid inner radius: %g" % inner_radius)
axis_start = A2B.dot(np.array([0, 0, -0.5 * length, 1]))[:3]
axis_end = A2B.dot(np.array([0, 0, 0.5 * length, 1]))[:3]
axis = axis_end - axis_start
axis /= length
not_axis = np.array([1, 0, 0])
if (axis == not_axis).all():
not_axis = np.array([0, 1, 0])
n1 = np.cross(axis, not_axis)
n1 /= np.linalg.norm(n1)
n2 = np.cross(axis, n1)
if wireframe:
t = | np.linspace(0, length, n_steps) | numpy.linspace |
# This script contains the Brianmodel class
# Calling makeneuron_ca() on a Brianmodel object will create a biophysical neuron
# Multiple other functions allow for plotting, animating, ...
from __future__ import division
#folder with parameters, equations and morphology
import os, sys
mod_path = os.path.abspath(os.path.join('..','Model'))
sys.path.append(mod_path)
from copy import deepcopy
import itertools as itools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
plt.rcParams['animation.ffmpeg_path'] = '/usr/local/bin/ffmpeg'
import matplotlib.colors as colorz
import matplotlib.cm as clrm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import brian2 as br2
from brian2 import uF, cm, um, ohm, ms, siemens, mV, nA, us,psiemens
# This is the 3D plotting toolkit
from mpl_toolkits.mplot3d import Axes3D
#import parameters and equations for neuron
from oo_Parameters import *
from oo_equations_AMPAplast import *
from MorphologyData import *
from Visualisation_functions import *
from oo_initScripts import set_init_nrn
br2.start_scope()
br2.defaultclock.dt = defaultclock.dt
class BRIANModel(object):
"""
Neuron object in brian2
"""
def __init__(self, swc_model):
"""
Parameters
----------
swc_model: a char
path of the file containing the neuron model in .swc format
"""
# Brian morphology
self.morpho = br2.Morphology.from_file(swc_model)
morpho = self.morpho
# Store compartment numbers
self.segment,self.segment_swc,self.compStart,self.compEnd = get_swc(swc_model)
# Initialise an dictionary for distances to the soma per compartment
self.distances = {}
# Initialise an dictionary for lines to plot the neuron
self.lines = {}
# Add the first section as soma
self.sections = {morpho.type: [self.morpho[0], 0, 0]}
# Set a name and distances for the soma
self.sections['soma'][0].name = 'soma'
self.sections['soma'][0].f_x = self.morpho[0].x/meter
self.sections['soma'][0].f_y = self.morpho[0].y/meter
self.sections['soma'][0].f_z = self.morpho[0].z/meter
self.sections['soma'][0].dist = 0
self.distances['soma'] = [0.]
# Initialize the dendrites numerotation
dend_b = 0
# Register soma's children in a sections dictionary
for sec in morpho.children:
# Create an attribut "name" for all children of the soma
if str(sec.type) == "dend":
sec.name = sec.type[:4]+"_"+str(dend_b)
dend_b += 1
else:
sec.name = sec.type
# store final coordinates of the parent (=soma) segment
sec.f_x = self.morpho[0].x[0]/meter
sec.f_y = self.morpho[0].y[0]/meter
sec.f_z = self.morpho[0].z[0]/meter
sec.dist = self.distances['soma'][0]
# add distances to the parent
self.distances = calc_dist(self.distances, sec)
# get the coordinates for all compartments in this section
xn = sec.x/meter
yn = sec.y/meter
zn = sec.z/meter
# get first coordinates (and make integer)
a=(int(round(xn[0]*1e9)),int(round(yn[0]*1e9)),int(round(zn[0]*1e9)))
# id for the section (they correspond to lnum in .swc)
line_num = self.segment[a]
# add id and section to the 'sections' dictionary
self.sections[sec.name] = [sec,line_num,line_num]
# Initialize the level value
level = [sec for sec in morpho.children]
while level != []:
for i, sec in enumerate(level):
for j, child in enumerate(sec.children):
# Create an attribut "name" for all children of sec
name = sec.name + str(j)
child.name = name
# Store parent final coordinates
child.f_x = sec.x[-1]/meter
child.f_y = sec.y[-1]/meter
child.f_z = sec.z[-1]/meter
# Store distances to the soma
child.dist = self.distances[sec.name][-1]
self.distances = calc_dist(self.distances, child)
# Get the coordinates for all compartments in this section
xn = child.x/meter
yn = child.y/meter
zn = child.z/meter
# get first coordinates (and make integer)
a=(int(round(xn[0]*1e9)),int(round(yn[0]*1e9)),int(round(zn[0]*1e9)))
# id for the section (corresponds to lnum in .swc)
line_num = self.segment[a]
# add id and section to the 'sections' dictionary
self.sections[name] = [child, line_num,line_num]
level = [sec.children for sec in level]
# Flatten the list at this level
level = [sublist for sl in level for sublist in sl]
################################################################################
# THE FUNCTION BELOW CAN BE CALLED TO CREATE A BIOPHYSICAL NEURON
################################################################################
def makeNeuron_Ca(self,morphodata):
"""return spatial neuron"""
# Set Biophysics
neuron = self.biophysics(morphodata)
return neuron
def biophysics(self,morpho_data):
"""Inserting biophysics"""
neuron = br2.SpatialNeuron(morphology=self.morpho, model=eqs, \
Cm=Capacit, Ri=R_axial, threshold = "v/mV>0", refractory = "v/mV > -10",
threshold_location = 0, reset = 's_trace += x_reset*(taux/ms)',method='heun') #
# define the different parts of the neuron
N_soma = neuron[morpho_data['soma'][0]:morpho_data['soma'][-1]+1]
N_axon = neuron[morpho_data['axon'][0]:morpho_data['axon'][-1]+1]
N_basal = neuron[morpho_data['basal'][0]:morpho_data['basal'][-1]+1]
N_apical = neuron[morpho_data['apical'][0]:morpho_data['apical'][-1]+1]
Theta_low = morpho_data['thetalow']*mV
# insert leak conductance
neuron.gLeak = g_leak
# noise
neuron.noise_sigma = 0*pA # initial value membrane voltage
neuron.noise_avg = 0*pA # initial value membrane voltage
N_soma.noise_sigma = noise_std # initial value membrane voltage
N_soma.noise_avg = noise_mean # initial value membrane voltage
####################
# ACTIVE CHANNELS
####################
# Na channels soma, axon, apical dendrites
N_soma.gNav = somaNa
N_axon.gNav = axonNa
N_apical.gNav = apicalNa
neuron.thi1 = thi1_all
N_axon.thi1 = thi1_axn
neuron.thi2 = thi2_all
N_axon.thi2 = thi2_axn
#Kv channels
N_soma.gKv = somagKv
N_basal.gKv = dendgKv
N_apical.gKv = dendgKv
N_axon.gKv = axongKv
#Ca channels sina
N_soma.gCav = ratio_ca*somaCa
N_soma.gIt = (1-ratio_ca)*somaCa
#Ka channels soma
N_soma.gKa_prox = somaKap
#Ka channels dendrites, Na channels basal dendrites, Ca channels dendrites, axon initial segment
for sec in self.sections:
secNr = self.sections[sec][2]
seclen = len(self.sections[sec][0].x)
#BASAL
if secNr in morpho_data['basal']:
# decreasing Na channels
gNa_diff = 0.5*np.array(self.distances[sec][:])*psiemens/um**2
neuron[secNr:secNr+seclen].gNav = np.multiply(basalNa - gNa_diff,basalNa - gNa_diff>0 )
# increasing Ka channels
gKa_diff = 0.7*np.array(self.distances[sec][:])*psiemens/um**2
ratio_A = np.multiply(1. - (1./300.)*np.array(self.distances[sec][:]),1. - (1./300.)*np.array(self.distances[sec][:])>0)
neuron[secNr:secNr+seclen].gKa_prox = ratio_A*np.multiply(basalKa + gKa_diff,basalKa + gKa_diff>0 )
neuron[secNr:secNr+seclen].gKa_dist = (1.-ratio_A)*np.multiply(basalKa + gKa_diff,basalKa + gKa_diff>0 )
# Ca channels
neuron[secNr:secNr+seclen].gCav = dendCa*ratio_ca*(np.array(self.distances[sec][:])>30) + somaCa*ratio_ca*(np.array(self.distances[sec][:])<=30)
neuron[secNr:secNr+seclen].gIt = dendCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])>30) + somaCa*(1.-ratio_ca)*(np.array(self.distances[sec][:])<=30)
#spines
addSpines = np.array(self.distances[sec][:]) > spinedist
noSpines = np.array(self.distances[sec][:]) <= spinedist
neuron[secNr:secNr+seclen].gLeak = noSpines*g_leak + addSpines*g_leak_dend
neuron[secNr:secNr+seclen].Cm = noSpines*Capacit + addSpines*Capacit_dend
#APICAL
if secNr in morpho_data['apical']:
#ratio of Ka channels
ratio_A = np.multiply(1. - (1./300.)*np.array(self.distances[sec][:]),1. - (1./300.)*np.array(self.distances[sec][:])>0)
neuron[secNr:secNr+seclen].gKa_prox = ratio_A*apicalKa
neuron[secNr:secNr+seclen].gKa_dist = (1.-ratio_A)*apicalKa
# Ca channels
neuron[secNr:secNr+seclen].gCav = dendCa*ratio_ca*(np.array(self.distances[sec][:])>30) + somaCa*ratio_ca*( | np.array(self.distances[sec][:]) | numpy.array |
import numpy as np
# 行向量
v = np.array([[0, 1, 2]])
print(f"行向量v1=\n{v}",v.shape)
# 列向量
v = np.array([[0], [1], [2]])
print(f"列向量v2=\n{v}",v.shape)
v=np.array([1,2,3])
print(f"真实向量v={v}",v.shape)
print(f"真实向量v={v.T}",v.shape)
# 创建矩阵
matrix = np.mat([[1, 2], [3, 4]])
print(f"矩阵matrix=\n{matrix}")
print(f"矩阵matrix的转置=\n{matrix.T}")
f = np.vectorize(lambda i: i*i)
print(f"矩阵函数:\n{f(matrix)}")
# 矩阵的秩
print(f"矩阵的秩\n{np.linalg.matrix_rank(matrix)}")
# 行列式
print(f"矩阵的行列式\n{np.linalg.det(matrix)}")
# 迹
print(f"矩阵的迹\n{matrix.trace()}")
# 特征向量和特征值
eigenvalues, eigenvectors = np.linalg.eig(matrix)
print(f"特征值和特征向量\n {eigenvalues}\n{eigenvectors}")
# 矩阵的逆
m = np.mat([[1, 2], [3, 4]])
result = np.linalg.inv(m)
print(f"矩阵的逆矩阵\n {result}")
# 矩阵的左乘
m1 = np.mat([[1, 2], [3, 4]])
m2 = np.mat([[0, 1], [1, 2]])
print(f"np.dot(m1, m2)=\n{np.dot(m1, m2)}")
print(f"矩阵的左乘\n {m1 @ m2}\n{m2 @ m1}")
print(f"矩阵的点乘\n {m1 * m2}\n {m2*m1}")
# 向量的点积
v1 = np.array([1, 0])
v2 = np.array([0, 1])
d1 = np.dot(v1, v2)
d2 = np.vdot(v1, v2)
d3 = np.inner(v1, v2)
d4 = np.outer(v1, v2)
d5 = | np.cross(v1, v2) | numpy.cross |
import os
import cv2
import glob
import json
from tqdm import tqdm
import numpy as np
from pathlib import Path
from PIL import Image
import torch
from torch.utils.data import Dataset
from lib.utils.general import xyxy2xywh, xywh2xyxy
def create_dataloader(image_path, imgsz, batch_size, hyp=None, augment=False, workers=8):
dataset = LoadImagesAndLabels(image_path, imgsz)
batch_size = min(batch_size, len(dataset))
nw = min([batch_size if batch_size > 1 else 0, workers]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
shuffle=True,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, augment=False, hyp=None, cache_images=False):
if os.path.isdir(path):
self.img_files = sorted(glob.glob(path + os.sep + '*.*'))
else:
raise Exception('%s does not exit' % path)
n = len(self.img_files)
assert n > 0, 'No images found in %s' % path
self.n = n # number of images
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.mosaic = self.augment # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Cache labels
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(enumerate(self.label_files))
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
# assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
self.labels[i] = l
nf += 1 # file found
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s' % (os.path.dirname(file) + os.sep)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = image.size # image size (width, height)
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
# content = json.load(f)
# for sample in content['shapes']:
# x1, y1 = sample['points'][0]
# x2, y2 = sample['points'][1]
# if sample['group_id'] is None:
# cls = 0
# else:
# cls = int(sample['group_id'])
# l.append([cls, x1, y1, x2, y2])
# l = np.array(l, dtype=np.float32) # labels
if len(l) == 0:
l = | np.zeros((0, 5), dtype=np.float32) | numpy.zeros |
"""
Define MagneticStatics class to calculate common static magnetic fields
as first raised in issue #100.
"""
import abc
import numbers
import numpy as np
import scipy.special
from astropy import constants
from astropy import units as u
from plasmapy.utils.decorators import validate_quantities
class MagnetoStatics(abc.ABC):
"""Abstract class for all kinds of magnetic static fields"""
@abc.abstractmethod
def magnetic_field(self, p: u.m) -> u.T:
"""
Calculate magnetic field generated by this wire at position `p`
Parameters
----------
p : `astropy.units.Quantity`
three-dimensional position vector
Returns
-------
B : `astropy.units.Quantity`
magnetic field at the specified positon
"""
class MagneticDipole(MagnetoStatics):
"""
Simple magnetic dipole - two nearby opposite point charges.
Parameters
----------
moment: `astropy.units.Quantity`
Magnetic moment vector, in units of A * m^2
p0: `astropy.units.Quantity`
Position of the dipole
"""
@validate_quantities
def __init__(self, moment: u.A * u.m ** 2, p0: u.m):
self.moment = moment.value
self._moment_u = moment.unit
self.p0 = p0.value
self._p0_u = p0.unit
def __repr__(self):
return "{name}(moment={moment}{moment_u}, p0={p0}{p0_u})".format(
name=self.__class__.__name__,
moment=self.moment,
p0=self.p0,
moment_u=self._moment_u,
p0_u=self._p0_u,
)
def magnetic_field(self, p: u.m) -> u.T:
r"""
Calculate magnetic field generated by this wire at position `p`
Parameters
----------
p : `astropy.units.Quantity`
three-dimensional position vector
Returns
-------
B : `astropy.units.Quantity`
magnetic field at the specified positon
"""
r = p - self.p0
m = self.moment
B = (
constants.mu0.value
/ 4
/ np.pi
* (
3 * r * np.dot(m, r) / np.linalg.norm(r) ** 5
- m / np.linalg.norm(r) ** 3
)
)
return B * u.T
class Wire(MagnetoStatics):
"""Abstract wire class for concrete wires to be inherited from."""
class GeneralWire(Wire):
r"""
General wire class described by its parametric vector equation
Parameters
----------
parametric_eq: Callable
A vector-valued (with units of position) function of a single real
parameter.
t1: float
lower bound of the parameter, smaller than t2
t2: float
upper bound of the parameter, larger than t1
current: `astropy.units.Quantity`
electric current
"""
@validate_quantities
def __init__(self, parametric_eq, t1, t2, current: u.A):
if callable(parametric_eq):
self.parametric_eq = parametric_eq
else:
raise ValueError("Argument parametric_eq should be a callable")
if t1 < t2:
self.t1 = t1
self.t2 = t2
else:
raise ValueError(f"t1={t1} is not smaller than t2={t2}")
self.current = current.value
self._current_u = current.unit
def __repr__(self):
return (
"{name}(parametric_eq={parametric_eq}, t1={t1}, t2={t2}, "
"current={current}{current_u})".format(
name=self.__class__.__name__,
parametric_eq=self.parametric_eq.__name__,
t1=self.t1,
t2=self.t2,
current=self.current,
current_u=self._current_u,
)
)
def magnetic_field(self, p: u.m, n: numbers.Integral = 1000) -> u.T:
r"""
Calculate magnetic field generated by this wire at position `p`
Parameters
----------
p : `astropy.units.Quantity`
three-dimensional position vector
n : int, optional
Number of segments for Wire calculation
(defaults to 1000)
Returns
-------
B : `astropy.units.Quantity`
magnetic field at the specified positon
Notes
-----
For simplicity, we segment the wire into n equal pieces,
and assume each segment is straight. Default n is 1000.
.. math::
\vec B
\approx \frac{\mu_0 I}{4\pi} \sum_{i=1}^{n}
\frac{[\vec l(t_{i}) - \vec l(t_{i-1})] \times
\left[\vec p - \frac{\vec l(t_{i}) + \vec l(t_{i-1})}{2}\right]}
{\left|\vec p - \frac{\vec l(t_{i}) + \vec l(t_{i-1})}{2}\right|^3},
\quad \text{where}\, t_i = t_{\min}+i/n*(t_{\max}-t_{\min})
"""
p1 = self.parametric_eq(self.t1)
step = (self.t2 - self.t1) / n
t = self.t1
B = 0
for i in range(n):
t = t + step
p2 = self.parametric_eq(t)
dl = p2 - p1
p1 = p2
R = p - (p2 + p1) / 2
B += np.cross(dl, R) / np.linalg.norm(R) ** 3
B = B * constants.mu0.value / 4 / np.pi * self.current
return B * u.T
class FiniteStraightWire(Wire):
"""
Finite length straight wire class.
p1 to p2 direction is the possitive current direction.
Parameters
----------
p1: `astropy.units.Quantity`
three-dimensional Cartesian coordinate of one end of the straight wire
p2: `astropy.units.Quantity`
three-dimensional Cartesian coordinate of another end of the straight wire
current: `astropy.units.Quantity`
electric current
"""
@validate_quantities
def __init__(self, p1: u.m, p2: u.m, current: u.A):
self.p1 = p1.value
self.p2 = p2.value
self._p1_u = p1.unit
self._p2_u = p2.unit
if np.all(p1 == p2):
raise ValueError("p1, p2 should not be the same point.")
self.current = current.value
self._current_u = current.unit
def __repr__(self):
return "{name}(p1={p1}{p1_u}, p2={p2}{p2_u}, current={current}{current_u})".format(
name=self.__class__.__name__,
p1=self.p1,
p2=self.p2,
current=self.current,
p1_u=self._p1_u,
p2_u=self._p2_u,
current_u=self._current_u,
)
def magnetic_field(self, p) -> u.T:
r"""
Calculate magnetic field generated by this wire at position `p`
Parameters
----------
p : `astropy.units.Quantity`
three-dimensional position vector
Returns
-------
B : `astropy.units.Quantity`
magnetic field at the specified positon
Notes
-----
Let :math:`P_f` be the foot of perpendicular, :math:`\theta_1`(:math:`\theta_2`) be the
angles between :math:`\overrightarrow{PP_1}`(:math:`\overrightarrow{PP_2}`)
and :math:`\overrightarrow{P_2P_1}`.
.. math:
\vec B = \frac{(\overrightarrow{P_2P_1}\times\overrightarrow{PP_f})^0}
{|\overrightarrow{PP_f}|}
\frac{\mu_0 I}{4\pi} (\cos\theta_1 - \cos\theta_2)
"""
# foot of perpendicular
p1, p2 = self.p1, self.p2
p2_p1 = p2 - p1
ratio = np.dot(p - p1, p2_p1) / np.dot(p2_p1, p2_p1)
pf = p1 + p2_p1 * ratio
# angles: theta_1 = <p - p1, p2 - p1>, theta_2 = <p - p2, p2 - p1>
cos_theta_1 = (
np.dot(p - p1, p2_p1) / np.linalg.norm(p - p1) / np.linalg.norm(p2_p1)
)
cos_theta_2 = (
np.dot(p - p2, p2_p1) / np.linalg.norm(p - p2) / np.linalg.norm(p2_p1)
)
B_unit = np.cross(p2_p1, p - pf)
B_unit = B_unit / np.linalg.norm(B_unit)
B = (
B_unit
/ np.linalg.norm(p - pf)
* (cos_theta_1 - cos_theta_2)
* constants.mu0.value
/ 4
/ np.pi
* self.current
)
return B * u.T
def to_GeneralWire(self):
"""Convert this `Wire` into a `GeneralWire`."""
p1, p2 = self.p1, self.p2
return GeneralWire(lambda t: p1 + (p2 - p1) * t, 0, 1, self.current * u.A)
class InfiniteStraightWire(Wire):
"""
Infinite straight wire class.
Parameters
----------
direction:
three-dimensional direction vector of the wire, also the positive current direction
p0: `astropy.units.Quantity`
one point on the wire
current: `astropy.units.Quantity`
electric current
"""
@validate_quantities
def __init__(self, direction, p0: u.m, current: u.A):
self.direction = direction / np.linalg.norm(direction)
self.p0 = p0.value
self._p0_u = p0.unit
self.current = current.value
self._current_u = current.unit
def __repr__(self):
return "{name}(direction={direction}, p0={p0}{p0_u}, current={current}{current_u})".format(
name=self.__class__.__name__,
direction=self.direction,
p0=self.p0,
current=self.current,
p0_u=self._p0_u,
current_u=self._current_u,
)
def magnetic_field(self, p) -> u.T:
r"""
Calculate magnetic field generated by this wire at position `p`
Parameters
----------
p : `astropy.units.Quantity`
three-dimensional position vector
Returns
-------
B : `astropy.units.Quantity`
magnetic field at the specified positon
Notes
-----
.. math:
\vec B = \frac{\mu_0 I}{2\pi r}*(\vec l^0\times \vec{PP_0})^0,
\text{where}\, \vec l^0\, \text{is the unit vector of current direction},
r\, \text{is the perpendicular distance between} P_0 \text{and the infinite wire}
"""
r = np.cross(self.direction, p - self.p0)
B_unit = r / np.linalg.norm(r)
r = np.linalg.norm(r)
return B_unit / r * constants.mu0.value / 2 / np.pi * self.current * u.T
class CircularWire(Wire):
"""
Circular wire(coil) class
Parameters
----------
normal:
three-dimensional normal vector of the circular coil
center: `astropy.units.Quantity`
three-dimensional position vector of the circular coil's center
radius: `astropy.units.Quantity`
radius of the circular coil
current: `astropy.units.Quantity`
electric current
"""
def __repr__(self):
return (
"{name}(normal={normal}, center={center}{center_u}, "
"radius={radius}{radius_u}, current={current}{current_u})".format(
name=self.__class__.__name__,
normal=self.normal,
center=self.center,
radius=self.radius,
current=self.current,
center_u=self._center_u,
radius_u=self._radius_u,
current_u=self._current_u,
)
)
@validate_quantities
def __init__(self, normal, center: u.m, radius: u.m, current: u.A, n=300):
self.normal = normal / np.linalg.norm(normal)
self.center = center.value
self._center_u = center.unit
if radius > 0:
self.radius = radius.value
self._radius_u = radius.unit
else:
raise ValueError("Radius should bu larger than 0")
self.current = current.value
self._current_u = current.unit
# parametric equation
# find other two axises in the disc plane
z = np.array([0, 0, 1])
axis_x = np.cross(z, self.normal)
axis_y = np.cross(self.normal, axis_x)
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_y = np.array([0, 1, 0])
else:
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
self.axis_x = axis_x
self.axis_y = axis_y
def curve(t):
if isinstance(t, np.ndarray):
t = np.expand_dims(t, 0)
axis_x_mat = np.expand_dims(axis_x, 1)
axis_y_mat = np.expand_dims(axis_y, 1)
return self.radius * (
np.matmul(axis_x_mat, np.cos(t)) + np.matmul(axis_y_mat, np.sin(t))
) + np.expand_dims(self.center, 1)
else:
return (
self.radius * (np.cos(t) * axis_x + np.sin(t) * axis_y)
+ self.center
)
self.curve = curve
self.roots_legendre = scipy.special.roots_legendre(n)
self.n = n
def magnetic_field(self, p) -> u.T:
r"""
Calculate magnetic field generated by this wire at position `p`
Parameters
----------
p : `astropy.units.Quantity`
three-dimensional position vector
Returns
-------
B : `astropy.units.Quantity`
magnetic field at the specified positon
Notes
-----
.. math:
\vec B
= \frac{\mu_0 I}{4\pi}
\int \frac{d\vec l\times(\vec p - \vec l(t))}{|\vec p - \vec l(t)|^3}\\
= \frac{\mu_0 I}{4\pi} \int_{-\pi}^{\pi} {(-r\sin\theta \hat x + r\cos\theta \hat y)}
\times \frac{\vec p - \vec l(t)}{|\vec p - \vec l(t)|^3} d\theta
We use n points Gauss-Legendre quadrature to compute the integral. The default n is 300.
"""
x, w = self.roots_legendre
t = x * np.pi
pt = self.curve(t)
dl = self.radius * (
-np.matmul(np.expand_dims(self.axis_x, 1), np.expand_dims(np.sin(t), 0))
+ np.matmul(np.expand_dims(self.axis_y, 1), np.expand_dims(np.cos(t), 0))
) # (3, n)
r = np.expand_dims(p, 1) - pt # (3, n)
r_norm_3 = | np.linalg.norm(r, axis=0) | numpy.linalg.norm |
import numpy as np
import joblib
from .base import Model
from pathlib import Path
BANDNAMES = ["B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B11", "B12"]
def cosine(X, Y):
"""
Cosine distance between `X` and `Y` calculated over axis=2.
"""
nX = 1 / np.sqrt(np.sum(np.square(X), axis=2))
nY = 1 / np.sqrt(np.sum(np.square(Y), axis=2))
XX = np.einsum("ij,ijk->ijk", nX, X)
YY = | np.einsum("ij,ijk->ijk", nY, Y) | numpy.einsum |
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
from src.logic.data.data_intents import assistant_intents
import random
import pickle
import numpy
import ast
import nltk
import src.logic.voice.speech as speech
from src.logic.lang.language import translator
justStarted = True
class GenericAssistant:
def __init__(self, intent_methods={}, model_name="assistant_model"):
self.intents = assistant_intents
self.intent_methods = intent_methods
self.model_name = model_name
self.lemmatizer = WordNetLemmatizer()
def train_model(self):
self.words = []
self.classes = []
documents = []
ignore_letters = ['!', '?', ',', '.']
for intent in self.intents['intents']:
for pattern in intent['patterns']:
word = nltk.word_tokenize(pattern)
self.words.extend(word)
documents.append((word, intent['tag']))
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
training = []
output_empty = [0] * len(self.classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in self.words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = | numpy.array(training) | numpy.array |
import cv2
import numpy as np
import dlib
import os
import ctypes
import sys
#import pandas as pd
from eye import Eye
from calibration import Calibration
from gaze_tracking import GazeTracking
from face_gaze_estimator import estimateGaze
# used https://github.com/MCodez/PUPIL-Detection-using-OpenCV
#face predictor
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
#300 faces In-the-wild challenge: Database and results.
#Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation "In-The-Wild". 2016.
#https://www.cs.ru.ac.za/research/g09W0474/Thesis.pdf
# l = left (-x) and r = right (+x)
filename = './src/assets/video.mp4'
filename = ''
if filename == '':
file = 0
else:
scriptDir = os.path.dirname(__file__)
assets_folder = os.path.join(scriptDir, '../assets/', filename)
file = os.path.abspath(filename)
print(file)
cap = cv2.VideoCapture(file)
cap.set(cv2.CAP_PROP_FPS,60)
if (cap.isOpened()== False):
print("Error opening video stream or file")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./src/assets/shape_predictor_68_face_landmarks.dat')
detector_params = cv2.SimpleBlobDetector_Params()
detector_params.minThreshold = 0
detector_params.filterByArea = True
detector_params.maxArea = 1500
detector_params.filterByConvexity = False
detector_params.minConvexity = 0.1
#blob_detector = cv2.SimpleBlobDetector_create(detector_params)
#left = [36, 37, 38, 39, 40, 41] # keypoint indices for left eye
#right = [42, 43, 44, 45, 46, 47] # keypoint indices for right eye
'''
def midpoint(p1, p2):
return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)
def calc_pupil(landmarks,side):
x_sum =0
y_sum =0
for i in side:
x_sum += landmarks.part(i).x
y_sum += landmarks.part(i).y
coord = (int(x_sum/len(side)),int(y_sum/len(side)))
return coord
def cut_eyebrows(img):
height, width = img.shape[:2]
eyebrow_h = int(height / 4)
return img[eyebrow_h:height, 0:width] # cut eyebrows out (15 px)return img
'''
def clamp(num, min, max):
if num > max:
return max
elif num < min:
return min
else:
return num
def subtract_tuples(tuple1,tuple2):
return (tuple1[0]-tuple2[0],tuple1[1]-tuple2[1])
def add_tuples(tuple1,tuple2):
return (tuple1[0]+tuple2[0],tuple1[1]+tuple2[1])
def abs_tuples(tuple):
return (abs(tuple[0]),abs(tuple[1]))
def multi_tuples(tuple1,tuple2):
return (tuple1[0]*tuple2[0],tuple1[1]*tuple2[1])
def div_tuples(tuple1,tuple2):
return (tuple1[0]/tuple2[0],tuple1[1]/tuple2[1])
def clamp_tuple(num,min_num,max_num):
return clamp(num[0],min_num[0],max_num[0]),clamp(num[1],min_num[1],max_num[1])
class pupil_tracker_old:
left = [36, 37, 38, 39, 40, 41] # keypoint indices for left eye
right = [42, 43, 44, 45, 46, 47] # keypoint indices for right eye
start_pos = 0
#global declerations
face_detector = dlib.get_frontal_face_detector()
predictor
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
blob_detector = cv2.SimpleBlobDetector_create(params)
#get the middle of two points in 2d space
def midpoint(p1, p2):
return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)
#cut the top 4th of a image
def cut_eyebrows(img):
height, width = img.shape[:2]
eyebrow_h = int(height / 4)
return img[eyebrow_h:height, 0:width] # cut eyebrows out (15 px)return img
#calculate the midle of the landmarks this aproximates in most case the place of the pupil
#is not accurate when the eye is recorded from extreem angles
def calc_pupil(landmarks,side):
x_sum =0
y_sum =0
for i in side:
x_sum += landmarks.part(i).x
y_sum += landmarks.part(i).y
coord = (int(x_sum/len(side)),int(y_sum/len(side)))
return coord
def image_blob_detection(self,image):
blur = cv2.GaussianBlur(image,(9,9),0)
ret,thresh1 = cv2.threshold(blur,12,255,cv2.THRESH_BINARY)
kernel = np.ones((2,2),np.uint8)
erosion = cv2.erode(thresh1,kernel,iterations = 1)
cv2.imshow("test",erosion)
return self.blob_detector.detect(erosion)
#this wil find and return the most probable place of the pupil
def detect_pupil(self,eye,backup): # backup is a point that is returned by a calc pupil func
#return backup
# edit the images so it the blob detector wil work better
#inverted = np.invert(eye)
blur = cv2.GaussianBlur(eye,(9,9),0)
ret,thresh1 = cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
#kernel = np.ones((2,2),np.uint8)
#erosion = cv2.erode(thresh1,kernel,iterations = 1)
#closing = cv2.morphologyEx(erosion, cv2.MORPH_CLOSE, kernel)
#pupil = cv2.HoughCircles(erosion,cv2.HOUGH_GRADIENT,1,20,param1=300,param2=0.8,minRadius=0,maxRadius=0)
#print(pupil)
#contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(erosion, contours, -1, (0,255,0), 3)
#cv2.imshow("test",erosion)
#print(hierarchy)
#im_tresh = cut_eyebrows(img_l_eye)
#blob detection
pupil = self.image_blob_detection(self,eye)
#im_with_keypoints = cv2.drawKeypoints(erosion, pupil, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#pupil = np.uint16(np.around(pupil))
#print(pupil)
#if no blobs are found we return the backup
if len(pupil) ==0:
return backup
#if we find keypoints we select the closed one to the
selected_keypoint = (0,0)
selected_keypoint_len_cubed = 1000000 #random number that is hopfully larger then all the real values #Needs fix
for keyPoint in pupil:
x = keyPoint.pt[0]
y = keyPoint.pt[1]
len_cubed = (backup[0]-x)**2 + (backup[1]-y)**2 # no square root needed because every distance is calculated with pythagoras and only the diference is important
if len_cubed<selected_keypoint_len_cubed:
selected_keypoint = (x,y)
selected_keypoint_len_cubed = len_cubed
#--------transform to full screen------------
#x += landmarks.part(36).x
#y += center_top[1]
#s = keyPoint.size
#cv2.circle(frame,(int(x),int(y)),10,(0,255,0),3)
#print(pupil)
#cv2.imshow("Frame",erosion )
return selected_keypoint
def __init__(self, shape_predictor_file = './src/assets/shape_predictor_68_face_landmarks.dat'):
scriptDir = os.path.dirname(__file__)
assets_folder = os.path.join(scriptDir, '../assets/', shape_predictor_file)
file = os.path.abspath(assets_folder)
#print(assets_folder)
self.predictor = dlib.shape_predictor(file)
def detect_in_frame(self,frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
if len(faces) ==0:
return ((0,0),(0,0))
if len(faces)> 1:
print('Please avoid multiple faces.')
sys.exit()
#for face in faces:
#x, y = face.left(), face.top()
#x1, y1 = face.right(), face.bottom()
#cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
#pass
landmarks = predictor(gray, faces[0])
#left_point = (landmarks.part(36).x, landmarks.part(36).y)
#right_point = (landmarks.part(39).x, landmarks.part(39).y)
center_top_l = self.midpoint(landmarks.part(37), landmarks.part(38))
center_bottom_l = self.midpoint(landmarks.part(41), landmarks.part(40))
center_top_r = self.midpoint(landmarks.part(43), landmarks.part(44))
center_bottom_r = self.midpoint(landmarks.part(47), landmarks.part(46))
img_l_eye = gray[center_top_l[1]:center_bottom_l[1],landmarks.part(36).x:landmarks.part(39).x]
img_r_eye = gray[center_top_r[1]:center_bottom_r[1],landmarks.part(42).x:landmarks.part(45).x]
backup_l_global = self.calc_pupil(landmarks,self.left)
backup_l_global = div_tuples(add_tuples(center_top_l,center_bottom_l),(2,2))
backup_r_global = self.calc_pupil(landmarks,self.right)
backup_r_global = div_tuples(add_tuples(center_top_r,center_bottom_r),(2,2))
#--------transform to eye space------------
#backup_l = (abs(backup_l[0]-landmarks.part(36).x),abs(backup_l[1]-center_top_l[1]))
#backup_r = (abs(backup_r[0]-landmarks.part(42).x),abs(backup_r[1]-center_top_r[1]))
backup_l = abs_tuples(subtract_tuples(backup_l_global,(landmarks.part(36).x,center_top_l[1])))
backup_r = abs_tuples(subtract_tuples(backup_r_global,(landmarks.part(42).x,center_top_r[1])))
pupil_l = self.detect_pupil(self,img_l_eye,backup_l)
pupil_r = self.detect_pupil(self,img_r_eye,backup_r)
#--------transform to screen space------------
pupil_l = add_tuples(pupil_l,(landmarks.part(36).x,center_top_l[1]))
pupil_r = add_tuples(pupil_r,(landmarks.part(42).x,center_top_r[1]))
#print(backup_l_global,backup_r_global)
#print(pupil_l,pupil_r)
middle = self.midpoint(landmarks.part(40),landmarks.part(43))#div_tuples(add_tuples((landmarks.part(40).x,landmarks.part(40).y),(landmarks.part(43).x,landmarks.part(43).y)),(2,2))
if self.start_pos == 0:
self.start_pos = middle
else:
difference = subtract_tuples(self.start_pos,middle)
pupil_l = add_tuples(difference,pupil_l)
pupil_r = add_tuples(difference,pupil_r)
return pupil_l,pupil_r
class Plane(object):
def __init__(self,translation,normal):
self.normal = normal
self.d = -translation
class analytical_tracker(object):
"""
This class tracks the user's gaze.
It provides useful information like the position of the eyes
and pupils and allows to know if the eyes are open or closed
"""
face_gaze_reconstruction_points = [33,8,36,45,48,54]
def __init__(self):
self.frame = None
self.eye_left = None
self.eye_right = None
self.calibration = Calibration()
self.translation = None
self.normal = None
# _face_detector is used to detect faces
self._face_detector = dlib.get_frontal_face_detector()
# _predictor is used to get facial landmarks of a given face
cwd = os.path.abspath(os.path.dirname(__file__))
model_path = os.path.abspath(os.path.join(cwd, "../assets/shape_predictor_68_face_landmarks.dat"))
self._predictor = dlib.shape_predictor(model_path)
@property
def pupils_located(self):
"""Check that the pupils have been located"""
try:
int(self.eye_left.pupil.x)
int(self.eye_left.pupil.y)
int(self.eye_right.pupil.x)
int(self.eye_right.pupil.y)
#print(self.eye_left.screen_cord[0])
int(self.eye_left.screen_cord[0])
int(self.eye_right.screen_cord[0])
return True
except Exception:
return False
def _analyze(self):
"""Detects the face and initialize Eye objects"""
frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
faces = self._face_detector(frame)
try:
landmarks = self._predictor(frame, faces[0])
facialLandmarkFeatures = np.empty([6, 2], dtype = "double")
for i,point in enumerate(self.face_gaze_reconstruction_points):
facialLandmarkFeatures[i][0] = landmarks.part(point).x
facialLandmarkFeatures[i][1] = landmarks.part(point).x
self.translation,self.normal = estimateGaze(frame,facialLandmarkFeatures)
#print(self.translation,self.normal)
self.eye_left = Eye(frame, landmarks, 0, self.calibration,self.translation,self.normal)
self.eye_right = Eye(frame, landmarks, 1, self.calibration,self.translation,self.normal)
except IndexError:
self.eye_left = None
self.eye_right = None
def refresh(self, frame):
"""Refreshes the frame and analyzes it.
Arguments:
frame (numpy.ndarray): The frame to analyze
"""
#print("hi")
if self.calibration.camera_matrix is None:
self.calibration.set_camera_matrix(frame)
self.frame = frame
self._analyze()
def pupil_left_coords(self):
"""Returns the coordinates of the left pupil"""
if self.pupils_located:
x = self.eye_left.origin[0] + self.eye_left.pupil.x
y = self.eye_left.origin[1] + self.eye_left.pupil.y
return (x, y)
def pupil_right_coords(self):
"""Returns the coordinates of the right pupil"""
if self.pupils_located:
x = self.eye_right.origin[0] + self.eye_right.pupil.x
y = self.eye_right.origin[1] + self.eye_right.pupil.y
return (x, y)
def pupil_left_screen_coords(self):
"""Returns the coordinates of the left pupil"""
if self.pupils_located:
x = self.eye_left.screen_cord[0][0]
y = self.eye_left.screen_cord[1][0]
return (x, y)
def pupil_right_screen_coords(self):
"""Returns the coordinates of the right pupil"""
if self.pupils_located:
x = self.eye_right.screen_cord[0][0]
y = self.eye_right.screen_cord[1][0]
return (x, y)
def annotated_frame(self):
"""Returns the main frame with pupils highlighted"""
frame = self.frame.copy()
if self.pupils_located:
color = (0, 255, 0)
x_left, y_left = self.pupil_left_coords()
x_right, y_right = self.pupil_right_coords()
cv2.line(frame, (x_left - 5, y_left), (x_left + 5, y_left), color)
cv2.line(frame, (x_left, y_left - 5), (x_left, y_left + 5), color)
cv2.line(frame, (x_right - 5, y_right), (x_right + 5, y_right), color)
cv2.line(frame, (x_right, y_right - 5), (x_right, y_right + 5), color)
cv2.circle(frame,(int(self.eye_left.origin_3d_projected[0]),int(self.eye_left.origin_3d_projected[1])),10,color,3)
cv2.line(frame, (x_left, y_left ), (int(self.eye_left.origin_3d_projected[0]),int(self.eye_left.origin_3d_projected[1])), color)
cv2.circle(frame,(int(self.eye_right.origin_3d_projected[0]),int(self.eye_right.origin_3d_projected[1])),10,color,3)
cv2.line(frame, (x_right, y_right ), (int(self.eye_right.origin_3d_projected[0]),int(self.eye_right.origin_3d_projected[1])), color)
return frame
class gaze_tracker:
pupilTracker = pupil_tracker_old
tr = GazeTracking()
width = 1366#1860
height = 768#1020
offset = (40, 40)
user32 = ctypes.windll.user32
size_screen = user32.GetSystemMetrics(1), user32.GetSystemMetrics(0)
calibration_page = (np.zeros((int(size_screen[0]), int(size_screen[1]), 3)) + 255).astype('uint8')
#x_cut_min_l, x_cut_max_l, y_cut_min_l, y_cut_max_l
offset_calibrated_cut_left = []
#x_cut_min_r, x_cut_max_r, y_cut_min_r, y_cut_max_r
offset_calibrated_cut_right = []
cut_frame_l = 0
cut_frame_r = 0
scale_l = 0
scale_r = 0
# Calibration
corners = [
(offset), # Point 1
(width - offset[0], height - offset[1]), # Point 2
(width - offset[0], offset[1]), # Point 3
(offset[0], height - offset[1]) # Point 4
]
def save_calibration(foldername, offset_calibrated_cut_left,offset_calibrated_cut_right):
filename = "offset.txt"
scriptDir = os.path.dirname(__file__)
currentdir_folder = os.path.join(scriptDir, '../data/', foldername, filename)
file = os.path.abspath(currentdir_folder)
document = open(file, "a+") # Will open & create if file is not found
document.write(str(offset_calibrated_cut_left))
print("Succesfully writen to file")
def find_cut_limits(calibration_cut):
x_cut_max = np.transpose(np.array(calibration_cut))[0].max()
x_cut_min = np.transpose(np.array(calibration_cut))[0].min()
y_cut_max = np.transpose(np.array(calibration_cut))[1].max()
y_cut_min = np.transpose(np.array(calibration_cut))[1].min()
return x_cut_min, x_cut_max, y_cut_min, y_cut_max
def find_cut_limits_offset(calibration_cut):
x_cut_max = (np.transpose(np.array(calibration_cut))[0].max())
x_cut_min = (np.transpose(np.array(calibration_cut))[0].min())
y_cut_max = (np.transpose(np.array(calibration_cut))[1].max())
y_cut_min = (np.transpose(np.array(calibration_cut))[1].min())
offset = x_cut_min,y_cut_min
return x_cut_min, x_cut_max, y_cut_min, y_cut_max,offset
def offset_calibrated_cut(calibration_cut):
x_cut_min = np.transpose(np.array(calibration_cut))[0].min()
y_cut_min = np.transpose(np.array(calibration_cut))[1].min()
return x_cut_min,y_cut_min
''''def calc_scale(frome_s,to_s):
return tuple((to_s[0]/frome_s[0]),(to_s[1]/frome_s[1]))
'''
def show_window(title_window, window):
cv2.namedWindow(title_window, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(title_window, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow(title_window,window)
def calibration(self,final_folder_path, foldername,camera):
corner = 0
calibration_cut_left = []
calibration_cut_right = []
started = False
while (corner<len(self.corners)): # calibration of 4 corners
# draw circle
cv2.circle(self.calibration_page, self.corners[corner], 40, (0, 255, 0), -1)
ret, frame = camera.read() # Capture frame
frame = cv2.flip(frame, 1) # flip camera sees things mirorred
#gray_scale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # gray-scale to work with
tr.refresh(frame)
pupils = tr.pupil_left_coords(),tr.pupil_right_coords()
#pupils = self.pupilTracker.detect_in_frame(self.pupilTracker,frame)
print(pupils)
'''
# detect faces in frame
faces = detector(gray_scale_frame)
if len(faces)> 1:
print('Please avoid multiple faces.')
sys.exit()
for face in faces:
landmarks = predictor(gray_scale_frame, face) # find points in face
# get position of right eye and display lines
#right_eye_coordinates = get_eye_coordinates(landmarks, [42, 43, 44, 45, 46, 47])
right_eye_coordinates = get_eye_coordinates(landmarks, self.pupilTracker.right)
display_eye_lines(frame, right_eye_coordinates, 'green')
# define the coordinates of the pupil from the centroid of the right eye
pupil_coordinates = np.mean([right_eye_coordinates[2], right_eye_coordinates[3]], axis = 0).astype('int')
'''
if cv2.waitKey(33) == ord('a') and pupils[0] is not None and pupils[1] is not None:
calibration_cut_left.append(pupils[0])
calibration_cut_right.append(pupils[1])
# visualize message
cv2.putText(self.calibration_page, 'ok',tuple(np.array(self.corners[corner])-5), cv2.FONT_HERSHEY_SIMPLEX, 2,(0, 0, 0), 5)
corner += 1
started = False
# Display results
# print(calibration_cut, ' len: ', len(calibration_cut))
self.show_window('projection', self.calibration_page)
# show_window('frame', cv2.resize(frame, (640, 360)))
if cv2.waitKey(113) == ord('q'):
break
print(calibration_cut_left)
# Process calibration
#x_cut_min_l, x_cut_max_l, y_cut_min_l, y_cut_max_l = self.find_cut_limits(calibration_cut)
#offset_calibrated_cut = [ x_cut_min, y_cut_min ]
x_cut_min_l, x_cut_max_l, y_cut_min_l, y_cut_max_l,self.offset_calibrated_cut_left = self.find_cut_limits_offset(calibration_cut_left)
x_cut_min_r, x_cut_max_r, y_cut_min_r, y_cut_max_r,self.offset_calibrated_cut_right = self.find_cut_limits_offset(calibration_cut_right)
self.cut_frame_l = np.copy(self.calibration_page[y_cut_min_l:y_cut_max_l, x_cut_min_l:x_cut_max_l, :])
self.cut_frame_r = np.copy(self.calibration_page[y_cut_min_r:y_cut_max_r, x_cut_min_r:x_cut_max_r, :])
#self.offset_calibrated_cut_left = self.offset_calibrated_cut(calibration_cut_left)
#self.offset_calibrated_cut_right = self.offset_calibrated_cut(calibration_cut_right)
#self.scale_l = (np.array(self.calibration_page.shape) / np.array(self.cut_frame_l.shape))
#self.scale_r = (np.array(self.calibration_page.shape) / np.array(self.cut_frame_r.shape))
self.scale_l = div_tuples(self.calibration_page.shape,(y_cut_max_l-y_cut_min_l,x_cut_max_l-x_cut_min_l))#self.cut_frame_l.shape)
self.scale_r = div_tuples(self.calibration_page.shape,self.cut_frame_r.shape)
#self.scale_l = self.calc_scale(tuple(x_cut_max_l-x_cut_min_l,y_cut_max_l-y_cut_min_l),tuple(x_cut_max_r-x_cut_min_r,y_cut_max_r-y_cut_min_r))
#self.save_calibration(foldername, offset_calibrated_cut)
#camera.release()
print('Calibration Finished')
cv2.destroyAllWindows()
#start_message(final_folder_path)
def calibrate(self,final_folder_path, foldername,camera):
corner = 0
calibration_cut_left = []
calibration_cut_right = []
started = False
while (corner<len(self.corners)): # calibration of 4 corners
# draw circle
cv2.circle(self.calibration_page, self.corners[corner], 40, (0, 255, 0), -1)
ret, frame = camera.read() # Capture frame
frame = cv2.flip(frame, 1) # flip camera sees things mirorred
#gray_scale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # gray-scale to work with
tr.refresh(frame)
#pupils = tr.pupil_left_coords(),tr.pupil_right_coords()
pupils = tr.vertical_ratio(),tr.horizontal_ratio()
#pupils = self.pupilTracker.detect_in_frame(self.pupilTracker,frame)
print(pupils)
'''
# detect faces in frame
faces = detector(gray_scale_frame)
if len(faces)> 1:
print('Please avoid multiple faces.')
sys.exit()
for face in faces:
landmarks = predictor(gray_scale_frame, face) # find points in face
# get position of right eye and display lines
#right_eye_coordinates = get_eye_coordinates(landmarks, [42, 43, 44, 45, 46, 47])
right_eye_coordinates = get_eye_coordinates(landmarks, self.pupilTracker.right)
display_eye_lines(frame, right_eye_coordinates, 'green')
# define the coordinates of the pupil from the centroid of the right eye
pupil_coordinates = np.mean([right_eye_coordinates[2], right_eye_coordinates[3]], axis = 0).astype('int')
'''
key = cv2.waitKey(1)
if key == 32 and pupils[0] is not None and pupils[1] is not None:
calibration_cut_left.append(pupils)
#calibration_cut_right.append(pupils[1])
# visualize message
cv2.putText(self.calibration_page, 'ok',tuple( | np.array(self.corners[corner]) | numpy.array |
from config import Config
import cv2
import numpy as np
import keyboard
import time
from utils.custom_mouse import mouse
from template_finder import TemplateFinder
from ui_manager import detect_screen_object, ScreenObjects, center_mouse
from utils.misc import wait, trim_black, color_filter, cut_roi
from inventory import consumables, personal
from ui import view
from screen import grab
from dataclasses import dataclass
from logger import Logger
from ocr import Ocr
@dataclass
class BoxInfo:
img: np.ndarray = None
pos: tuple = None
column: int = None
row: int = None
need_id: bool = False
sell: bool = False
keep: bool = False
def __getitem__(self, key):
return super().__getattribute__(key)
def __setitem__(self, key, value):
setattr(self, key, value)
def get_slot_pos_and_img(img: np.ndarray, column: int, row: int) -> tuple[tuple[int, int], np.ndarray]:
"""
Get the pos and img of a specific slot position in Inventory. Inventory must be open in the image.
:param config: The config which should be used
:param img: Image from screen.grab() not cut
:param column: Column in the Inventory
:param row: Row in the Inventory
:return: Returns position and image of the cut area as such: [[x, y], img]
"""
top_left_slot = (Config().ui_pos["inventory_top_left_slot_x"], Config().ui_pos["inventory_top_left_slot_y"])
slot_width = Config().ui_pos["slot_width"]
slot_height= Config().ui_pos["slot_height"]
slot = (top_left_slot[0] + slot_width * column, top_left_slot[1] + slot_height * row)
# decrease size to make sure not to have any borders of the slot in the image
offset_w = int(slot_width * 0.12)
offset_h = int(slot_height * 0.12)
min_x = slot[0] + offset_w
max_x = slot[0] + slot_width - offset_w
min_y = slot[1] + offset_h
max_y = slot[1] + slot_height - offset_h
slot_img = img[min_y:max_y, min_x:max_x]
center_pos = (int(slot[0] + (slot_width // 2)), int(slot[1] + (slot_height // 2)))
return center_pos, slot_img
def slot_has_item(slot_img: np.ndarray) -> bool:
"""
Check if a specific slot in the inventory has an item or not based on color
:param slot_img: Image of the slot
:return: Bool if there is an item or not
"""
slot_img = cv2.cvtColor(slot_img, cv2.COLOR_BGR2HSV)
avg_brightness = np.average(slot_img[:, :, 2])
return avg_brightness > 16.0
def close(img: np.ndarray = None) -> np.ndarray:
img = grab() if img is None else img
if detect_screen_object(ScreenObjects.RightPanel, img).valid or detect_screen_object(ScreenObjects.LeftPanel, img).valid:
keyboard.send("esc")
wait(0.1, 0.2)
if detect_screen_object(ScreenObjects.RightPanel, img).valid or detect_screen_object(ScreenObjects.LeftPanel, img).valid:
success = view.return_to_play()
if not success:
return None
return img
def calc_item_roi(img_pre, img_post):
try:
diff = cv2.absdiff(img_pre, img_post)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
diff_thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)[1]
blue_mask, _ = color_filter(img_pre, Config().colors["blue_slot"])
red_mask, _ = color_filter(img_pre, Config().colors["red_slot"])
green_mask, _ = color_filter(img_post, Config().colors["green_slot"])
blue_red_mask = np.bitwise_or(blue_mask, red_mask)
final = np.bitwise_and.reduce([blue_red_mask, green_mask, diff_thresh])
_, roi = trim_black(final)
return roi
except BaseException as err:
Logger.error(f"_calc_item_roi: Unexpected {err=}, {type(err)=}")
return None
def tome_state(img: np.ndarray = None, tome_type: str = "tp", roi: list = None):
img = img if img is not None else grab()
if (tome_found := TemplateFinder().search([f"{tome_type.upper()}_TOME", f"{tome_type.upper()}_TOME_RED"], img, roi = roi, threshold = 0.8, best_match = True, normalize_monitor = True)).valid:
if tome_found.name == f"{tome_type.upper()}_TOME":
state = "ok"
else:
state = "empty"
position = tome_found.center
else:
state = position = None
return state, position
def id_item_with_tome(item_location: list, id_tome_location: list):
mouse.move(id_tome_location[0], id_tome_location[1], randomize=4, delay_factor=[0.4, 0.8])
wait(0.2, 0.4)
mouse.click(button="right")
wait(0.1)
mouse.move(item_location[0], item_location[1], randomize=4, delay_factor=[0.4, 0.8])
wait(0.1)
mouse.click(button="left")
consumables.increment_need("id", 1)
wait(0.2, 0.4)
def transfer_items(items: list, action: str = "drop") -> list:
#requires open inventory / stash / vendor
img = grab()
filtered = []
left_panel_open = detect_screen_object(ScreenObjects.LeftPanel, img).valid
if action == "drop":
filtered = [ item for item in items if item.keep == False and item.sell == False ]
elif action == "sell":
filtered = [ item for item in items if item.keep == False and item.sell == True ]
if not left_panel_open:
Logger.error(f"transfer_items: Can't perform, vendor is not open")
elif action == "stash":
if detect_screen_object(ScreenObjects.GoldBtnStash, img).valid:
filtered = [ item for item in items if item.keep == True ]
else:
Logger.error(f"transfer_items: Can't perform, stash is not open")
else:
Logger.error(f"transfer_items: incorrect action param={action}")
if filtered:
# if dropping, control+click to drop unless left panel is open, then drag to middle
# if stashing, control+click to stash
# if selling, control+click to sell
# TODO: if purchasing, right-click to buy
# TODO: if purchasing stack, shift+right-click to buy
if (action == "drop" and not left_panel_open) or action in ["sell", "stash"]:
keyboard.send('ctrl', do_release=False)
wait(0.2, 0.4)
for item in filtered:
attempts = 0
prev_gold_img = cut_roi(grab(), roi=Config().ui_roi["inventory_gold_digits"])
while attempts < 2:
# move to item position and left click
mouse.move(*item.pos, randomize=4, delay_factor=[0.2, 0.4])
wait(0.2, 0.4)
mouse.press(button="left")
wait(0.2, 0.4)
mouse.release(button="left")
wait(0.2, 0.4)
# if dropping, drag item to middle if vendor/stash is open
if action == "drop" and left_panel_open:
center_mouse()
wait(0.2, 0.3)
mouse.press(button="left")
wait(0.2, 0.3)
mouse.release(button="left")
wait(0.8, 1)
# check if item is still there
img=grab()
slot_img = get_slot_pos_and_img(img, item.column, item.row)[1]
if not slot_has_item(slot_img):
# item successfully transferred, delete from list
for cnt, o_item in enumerate(items):
if o_item.pos == item.pos:
items.pop(cnt)
break
# check and see if inventory gold count changed
new_gold_img = cut_roi(img, roi=Config().ui_roi["inventory_gold_digits"])
if prev_gold_img.shape == new_gold_img.shape and not(np.bitwise_xor(prev_gold_img, new_gold_img).any()):
Logger.info("Inventory gold is full, force stash")
personal.set_inventory_gold_full(True)
else:
personal.set_inventory_gold_full(False)
break
else:
# item is still there, try again
attempts += 1
if attempts > 1:
Logger.error(f"transfer_items: could not stash in position {item.pos}")
if (action == "drop" and not left_panel_open) or action in ["sell", "stash"]:
keyboard.send('ctrl', do_press=False)
wait(0.1)
return items
# use with caution--unreliable
def read_gold(img: np.ndarray = None, type: str = "inventory"):
if type not in ["vendor", "inventory", "stash"]:
Logger.error(f"read_gold: type {type} not supported")
return False
img = img if img is not None else grab()
img = cut_roi(img, Config().ui_roi[f"{type}_gold_digits"])
# _, img = color_filter(img, Config().colors["gold_numbers"])
img = np.pad(img, pad_width=[(8, 8),(8, 8),(0, 0)], mode='constant')
ocr_result = Ocr().image_to_text(
images = img,
model = "engd2r_inv_th_fast",
psm = 13,
scale = 1.2,
crop_pad = False,
erode = False,
invert = False,
threshold = 76,
digits_only = True,
fix_regexps = False,
check_known_errors = False,
check_wordlist = False,
)[0]
number=int(ocr_result.text.strip())
Logger.debug(f"{type.upper()} gold: {number}")
return number
def wait_for_left_inventory():
start=time.time()
while time.time() - start < 5:
if left_inventory_ready(grab()):
Logger.debug("Vendor/stash inventory fully loaded")
return True
wait(0.1)
Logger.error("wait_for_left_inventory: Vendor/stash inventory not detected")
return False
def left_inventory_ready(img = np.ndarray):
# on laggy PC's or online the vendor may take longer to have all of its inventory ready
if detect_screen_object(ScreenObjects.LeftPanel, img).valid:
# check for tab text
text, _ = color_filter(img, Config().colors["tab_text"])
text = cut_roi(text, Config().ui_roi["left_inventory_tabs"])
# check for red slots in inventory space
red, _ = color_filter(img, Config().colors["red_slot"])
red = cut_roi(red, Config().ui_roi["left_inventory"])
# check for blue slots in inventory space
blue, _ = color_filter(img, Config().colors["blue_slot"])
blue = cut_roi(blue, Config().ui_roi["left_inventory"])
# if none of the above are true, then inventory is empty and there are no tabs (not loaded yet)
return any( | np.sum(i) | numpy.sum |
from os.path import join as pjoin
import os
import pydicom as dicom
from patient import Patient
import numpy as np
from pydicom.pixel_data_handlers.util import apply_modality_lut as m_lut
import pickle
class Pathology:
def __init__(self, _inputpath, _outputpath):
self.inputpath = _inputpath
self.outputpath = _outputpath
vec_2ch = [0.6585, 6.7523, -0.0170]
vec_2ch_inv = [-0.7194, -0.6941, -0.0234]
vec_4ch = [0.1056, -0.6553, 0.7479]
vec_4ch_inv = [-0.0952, 0.7712, -0.6294]
vec_lvot = [-0.7625, -0.1435, -0.6307]
vec_lvot_inv = [0.6704, 0.2410, 0.7017]
patients2CH = []
patients4CH = []
patientsLVOT = []
@staticmethod
def dicom_info(file, path):
if file.find('.dcm') != -1:
try:
temp = dicom.dcmread(pjoin(path, file))
return temp[0x0010, 0x0040].value, temp[0x0010, 0x1030].value, temp[0x0028, 0x0030].value
except Exception as e:
print(e)
return "NA", 0, [0, 0]
@staticmethod
def dicom_time(file, path):
if file.find('.dcm') != -1:
try:
temp = dicom.dcmread(pjoin(path, file))
return temp[0x0008, 0x0013].value
except Exception as e:
print(e)
return 0
def dump(self):
for p1 in self.patients2CH:
pic = open(pjoin(self.outputpath, p1.id, "2CH"), "wb")
pickle.dump(p1.images, pic)
pickle.dump(p1.result, pic)
pickle.dump(p1.id, pic)
pickle.dump(p1.gender, pic)
pickle.dump(p1.weight, pic)
pickle.dump(p1.spacing, pic)
pic.close()
for p2 in self.patients4CH:
pic = open(pjoin(self.outputpath, p2.id, "4CH"), "wb")
pickle.dump(p2.images, pic)
pickle.dump(p2.result, pic)
pickle.dump(p2.id, pic)
pickle.dump(p2.gender, pic)
pickle.dump(p2.weight, pic)
pickle.dump(p2.spacing, pic)
pic.close()
for p3 in self.patientsLVOT:
pic = open(pjoin(self.outputpath, p3.id, "LVOT"), "wb")
pickle.dump(p3.images, pic)
pickle.dump(p3.result, pic)
pickle.dump(p3.id, pic)
pickle.dump(p3.gender, pic)
pickle.dump(p3.weight, pic)
pickle.dump(p3.spacing, pic)
pic.close()
pos = {
0: "2CH",
1: "2CH",
2: "4CH",
3: "4CH",
4: "LVOT",
5: "LVOT"
}
@staticmethod
def calculateangle(a, b, c, vec):
c1 = a * vec[0] + b * vec[1] + c * vec[2]
c2 = np.sqrt(pow(a, 2) + pow(b, 2) + pow(c, 2))
c3 = np.sqrt(pow(vec[0], 2) + pow(vec[1], 2) + pow(vec[2], 2))
angle = np.arccos(c1 / (c2 * c3))
return angle
def la_type(self, file):
xx = file[0x0020, 0x0037].value[0]
xy = file[0x0020, 0x0037].value[1]
xz = file[0x0020, 0x0037].value[2]
yx = file[0x0020, 0x0037].value[3]
yy = file[0x0020, 0x0037].value[4]
yz = file[0x0020, 0x0037].value[5]
cross = np.cross([xx, xy, xz], [yx, yy, yz])
angles = [self.calculateangle(cross[0], cross[1], cross[2], self.vec_2ch),
self.calculateangle(cross[0], cross[1], cross[2], self.vec_2ch_inv),
self.calculateangle(cross[0], cross[1], cross[2], self.vec_4ch),
self.calculateangle(cross[0], cross[1], cross[2], self.vec_4ch_inv),
self.calculateangle(cross[0], cross[1], cross[2], self.vec_lvot),
self.calculateangle(cross[0], cross[1], cross[2], self.vec_lvot_inv)]
mini = angles.index(min(angles))
return self.pos[mini]
@staticmethod
def dicom_reader(temp):
arr = temp.pixel_array
hu = m_lut(arr, temp)
bottom_centile = np.percentile(hu, 1)
top_centile = np.percentile(hu, 99)
bottom_filtered = np.where(hu < bottom_centile, bottom_centile, hu)
filtered = np.where(bottom_filtered > top_centile, top_centile, bottom_filtered)
_min = np.amin(filtered)
_max = np.amax(filtered)
for n in range(len(filtered)):
filtered[n] = (filtered[n] - _min) * (255 / (_max - _min))
uint = | np.uint8(filtered) | numpy.uint8 |
# SPDX-FileCopyrightText: Copyright 2021, <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
import numpy
import scipy.optimize
from functools import partial
from .._utilities.plot_utilities import * # noqa: F401, F403
from .._utilities.plot_utilities import load_plot_settings, save_plot, plt
# =================
# Direct Likelihood
# =================
class DirectLikelihood(object):
# ==============
# log likelihood
# ==============
@staticmethod
def log_likelihood(z, X, K_mixed, sign_switch, hyperparam):
"""
Here we use direct parameter, sigma and sigma0
sign_switch chnages the sign of the output from lp to -lp. When True,
this is used to minimizing (instad of maximizing) the negative of
log-likelihood function.
"""
# hyperparameters
sigma = hyperparam[0]
sigma0 = hyperparam[1]
n, m = X.shape
# S is the (sigma**2) * K + (sigma0**2) * I, but we don't construct it.
# Instead, we consruct Kn = K + eta I, where eta = (sigma0 / sigma)**2
tol = 1e-8
if numpy.abs(sigma) < tol:
# Ignore (sigma**2 * K) compared to (sigma0**2 * I) term.
logdet_S = n * numpy.log(sigma0**2)
Y = X / sigma0**2
else:
eta = (sigma0 / sigma)**2
logdet_Kn = K_mixed.logdet(eta)
logdet_S = n * numpy.log(sigma**2) + logdet_Kn
Y = K_mixed.solve(eta, X) / sigma**2
# Compute log det (X.T*Sinv*X)
XtSinvX = numpy.matmul(X.T, Y)
logdet_XtSinvX = numpy.log(numpy.linalg.det(XtSinvX))
# Compute zMz
B = numpy.matmul(X.T, Y)
Binv = numpy.linalg.inv(B)
Mz = DirectLikelihood.M_dot(K_mixed, Binv, Y, sigma, sigma0, z)
zMz = numpy.dot(z, Mz)
# Log likelihood
lp = -0.5*(n-m)*numpy.log(2.0*numpy.pi) - 0.5*logdet_S \
- 0.5*logdet_XtSinvX - 0.5*zMz
# If lp is used in scipy.optimize.minimize, change the sign to optain
# the minimum of -lp
if sign_switch:
lp = -lp
return lp
# =======================
# log likelihood jacobian
# =======================
@staticmethod
def log_likelihood_jacobian(z, X, K_mixed, sign_switch, hyperparam):
"""
When both :math:`\\sigma` and :math:`\\sigma_0` are zero, jacobian is
undefined.
"""
# hyperparameters
sigma = hyperparam[0]
sigma0 = hyperparam[1]
n, m = X.shape
# S is the (sigma**2) * K + (sigma0**2) * I, but we don't construct it
# Instead, we construct Kn = K + eta I, where eta = (sigma0 / sigma)**2
# Computing Y=Sinv*X and w=Sinv*z
tol = 1e-8
if numpy.abs(sigma) < tol:
# Ignore (sigma**2 * K) compared to (sigma0**2 * I) term.
Y = X / sigma0**2
else:
eta = (sigma0 / sigma)**2
Y = K_mixed.solve(eta, X) / sigma**2
# B is Xt * Y
B = numpy.matmul(X.T, Y)
Binv = numpy.linalg.inv(B)
# Compute Mz
Mz = DirectLikelihood.M_dot(K_mixed, Binv, Y, sigma, sigma0, z)
# Compute KMz
KMz = K_mixed.dot(0, Mz)
# Compute zMMz and zMKMz
zMMz = numpy.dot(Mz, Mz)
zMKMz = numpy.dot(Mz, KMz)
# Compute trace of M
if numpy.abs(sigma) < tol:
trace_M = (n - m) / sigma0**2
else:
trace_Sinv = K_mixed.traceinv(eta) / sigma**2
YtY = numpy.matmul(Y.T, Y)
trace_BinvYtY = numpy.trace(numpy.matmul(Binv, YtY))
trace_M = trace_Sinv - trace_BinvYtY
# Compute trace of KM which is (n-m)/sigma**2 - eta* trace(M)
if numpy.abs(sigma) < tol:
YtKY = numpy.matmul(Y.T, K_mixed.dot(0, Y))
BinvYtKY = numpy.matmul(Binv, YtKY)
trace_BinvYtKY = numpy.trace(BinvYtKY)
trace_KM = K_mixed.trace(0)/sigma0**2 - trace_BinvYtKY
else:
trace_KM = (n - m)/sigma**2 - eta*trace_M
# Derivative of lp wrt to sigma
der1_sigma = -0.5*trace_KM + 0.5*zMKMz
der1_sigma0 = -0.5*trace_M + 0.5*zMMz
jacobian = numpy.array([der1_sigma, der1_sigma0], dtype=float)
if sign_switch:
jacobian = -jacobian
return jacobian
# ======================
# log likelihood hessian
# ======================
@staticmethod
def log_likelihood_hessian(z, X, K_mixed, sign_switch, hyperparam):
"""
"""
# hyperparameters
sigma = hyperparam[0]
sigma0 = hyperparam[1]
n, m = X.shape
# S is the (sigma**2) * K + (sigma0**2) * I, but we don't construct it
# Instead, we construct Kn = K + eta I, where eta = (sigma0 / sigma)**2
# Computing Y=Sinv*X, V = Sinv*Y, and w=Sinv*z
# tol = 1e-8
tol = 1e-16
if numpy.abs(sigma) < tol:
# Ignore (sigma**2 * K) compared to (sigma0**2 * I) term.
Y = X / sigma0**2
V = Y / sigma0**2
else:
eta = (sigma0 / sigma)**2
Y = K_mixed.solve(eta, X) / sigma**2
V = K_mixed.solve(eta, Y) / sigma**2
# B is Xt * Y
B = numpy.matmul(X.T, Y)
Binv = numpy.linalg.inv(B)
YtY = numpy.matmul(Y.T, Y)
A = numpy.matmul(Binv, YtY)
# Compute Mz, MMz
Mz = DirectLikelihood.M_dot(K_mixed, Binv, Y, sigma, sigma0, z)
MMz = DirectLikelihood.M_dot(K_mixed, Binv, Y, sigma, sigma0, Mz)
# Compute KMz, zMMMz
KMz = K_mixed.dot(0, Mz)
zMMMz = numpy.dot(Mz, MMz)
# Compute MKMz
MKMz = DirectLikelihood.M_dot(K_mixed, Binv, Y, sigma, sigma0, KMz)
# Compute zMKMKMz
zMMKMz = numpy.dot(MMz, KMz)
zMKMKMz = numpy.dot(KMz, MKMz)
# Trace of M
if numpy.abs(sigma) < tol:
trace_M = (n - m) / sigma0**2
else:
trace_Sinv = K_mixed.traceinv(eta) / sigma**2
trace_A = numpy.trace(A)
trace_M = trace_Sinv - trace_A
# Trace of Sinv**2
if numpy.abs(sigma) < tol:
trace_S2inv = n / sigma0**4
else:
trace_S2inv = K_mixed.traceinv(eta, exponent=2) / sigma**4
# Trace of M**2
YtV = numpy.matmul(Y.T, V)
C = numpy.matmul(Binv, YtV)
trace_C = numpy.trace(C)
AA = numpy.matmul(A, A)
trace_AA = numpy.trace(AA)
trace_M2 = trace_S2inv - 2.0*trace_C + trace_AA
# Trace of (KM)**2
if numpy.abs(sigma) < tol:
trace_K2 = K_mixed.trace(0, exponent=2)
D = numpy.matmul(X.T, X)
K2X = K_mixed.dot(0, X, exponent=2)
E = numpy.matmul(K2X, D)
E2 = | numpy.matmul(E, E) | numpy.matmul |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue( | np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True) | numpy.allclose |
import numpy as np
from scipy.optimize import fmin
def rotation_xy(alpha, beta):
Rx = [[1, 0, 0],
[0, | np.cos(alpha) | numpy.cos |
from os import path, remove
import numpy as np
import rectlang
import colors
import tools
radiuses = [25, 50, 75, 100, 125, 150, 175, 200]
angles = [2.5656, 3.0144, 3.4632, 3.912, 4.3608, 4.8096, 5.2583, 5.7072]
letters = 'ABCDEF<KEY>'
scale_factor = 16
figure_width = 7.2 #inches
rectlang_space = rectlang.Space((8,8), solutions_file='../data/8x8_solutions.json')
def draw_language(language, offset_x, offset_y, chain, generation, show_stimuli=False, rect_compress=True):
if rect_compress:
return draw_language_rects(language, offset_x, offset_y, chain, generation, show_stimuli)
return draw_language_cells(language, offset_x, offset_y, chain, generation, show_stimuli)
def draw_language_cells(language, offset_x, offset_y, chain, generation, show_stimuli=True):
language_id = letters[chain] + str(generation)
svg = ' <g id="language-%s">\n' % language_id
for x in range(8):
rad = radiuses[x] / scale_factor
loc_x = (offset_x + (x * 500) + 250) / scale_factor
box_x = (offset_x + (x * 500)) / scale_factor
for y in range(8):
stim_i = x*8 + y
svg += ' <g id="stim-%s">\n' % stim_i
ang = angles[y]
loc_y = (offset_y + ((y+1) * 500) - 250) / scale_factor
box_y = (offset_y + (y * 500)) / scale_factor
line_x = rad * np.cos(ang) + loc_x
line_y = rad * np.sin(ang) + loc_y
color = colors.categories[language[y,x]]
svg += " <polygon points='%s,%s %s,%s %s,%s %s,%s' style='stroke: %s; stroke-width:1; fill:%s;' />\n" % (str(box_x), str(box_y), str(box_x+(500//scale_factor)), str(box_y), str(box_x+(500//scale_factor)), str(box_y+(500//scale_factor)), str(box_x), str(box_y+(500//scale_factor)), color, color)
if show_stimuli:
svg += " <circle cx='%s' cy='%s' r='%s' style='stroke:black; stroke-width:1; fill:none;' />\n" % (str(loc_x), str(loc_y), str(rad))
svg += " <line x1='%s' y1='%s' x2='%s' y2='%s' style='stroke: black; stroke-width:1;' />\n" % (loc_x, loc_y, line_x, line_y)
svg += ' </g>\n'
svg += ' </g>\n\n'
return svg
def draw_language_rects(language, offset_x, offset_y, chain, generation, show_stimuli=True):
language_id = letters[chain] + str(generation)
svg = '\t\t<g id="language-%s">\n' % language_id
for cat_i in np.unique(language):
color = colors.categories[cat_i]
cat_rects = rectlang_space.compress_concept(language==cat_i)[1]
for (y, x), _, _, _, (h, w) in cat_rects:
box_x = (offset_x + (x * 500)) / scale_factor
box_y = (offset_y + (y * 500)) / scale_factor
box_w = w * 500 / scale_factor
box_h = h * 500 / scale_factor
svg += "\t\t\t<rect x='%s' y='%s' width='%s' height='%s' style='fill:%s; stroke-width:0.1; stroke:%s' />\n" % (str(box_x), str(box_y), str(box_w), str(box_h), color, color)
svg += '\t\t</g>\n\n'
return svg
def draw_letter(letter_i, offset_x, offset_y):
letter = letters[letter_i]
loc_x = (offset_x + (4000 / 2)) / scale_factor
loc_y = (offset_y + (4000 / 2)) / scale_factor
svg = ' <g id="chain-letter-%s">\n' % letter
svg += ' <text text-anchor="middle" dominant-baseline="central" x="%s" y="%s" fill="black" style="font-size: %ipx; font-family:Helvetica">%s</text>\n' % (loc_x, loc_y, 2000//scale_factor, letter)
svg += ' </g>\n\n'
return svg
def draw_all_chains(chain_data, n_columns=10, show_stimuli=False, method='productions', rect_compress=True, verbose=False):
arr = []
svg = ''
offset_x = 4400
offset_y = 400
for chain_i in range(len(chain_data)):
svg += ' <g id="chain-%i">\n\n' % chain_i
chain = chain_data[chain_i]
n_generations = len(chain['generations'])
n_full_rows = n_generations // n_columns # number of full rows that will be required
final_row_length = n_generations % n_columns # number of gens in the final row
if final_row_length == 0:
n_rows = n_full_rows
else:
n_rows = n_full_rows + 1
for row_i in range(n_rows):
arr.append([])
offset_x = -1200
for col_i in range(n_columns+1):
if row_i == 0 and col_i == 0:
# insert the chain letter
arr[-1].append(str(chain['chain_id']))
svg += draw_letter(chain['chain_id'], offset_x, offset_y)
elif row_i > 0 and col_i == 0:
# blank
arr[-1].append('-')
elif row_i >= 0 and row_i < n_rows-1 and col_i == n_columns+1:
# insert ...
arr[-1].append('.')
elif row_i < n_rows-1 and col_i == n_columns+2:
arr[-1].append('--')
else:
generation = (row_i * n_columns) + (col_i - 1)
if generation < n_generations:
str_gen = str(generation)
if len(str_gen) == 1:
str_gen = '0' + str_gen
arr[-1].append(str_gen)
language = | np.array(chain['generations'][generation][method], dtype=int) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 01 10:52:23 2018
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import PolynomialOrderStar as POS
#Plot #1: Trapezium Rule Order Star
def p(z):
return (1.0 + 0.5 * z) / (1.0 - 0.5 * z)
POS.polyOrderStar(p, -3, 3, -2, 2)
#Plot #2: Index Example
def p(z):
return 1.0 + z + z ** 2 /2.0 + z ** 3 / 6.0 + z ** 4 / 24.0
#Plot #3
x = np.linspace(0, np.pi, 1000)
c1 = 1 + np.exp(x * 1j) / 2.0
c2 = np.sqrt(2) / 2.0 + (np.sqrt(2) * 1j) / 2.0 + np.exp(x * 1j) / 2.0
c3 = 1j + np.exp(x * 1j) / 2.0
c4 = - np.sqrt(2) / 2.0 + (np.sqrt(2) * 1j) / 2.0 + np.exp(x * 1j) / 2.0
c5 = -1 + np.exp(x * 1j) / 2.0
#Initialize a Figure
fig = plt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
#plot first chain
ax.plot(np.real(c1), np.imag(c1), color = 'C2')
ax.plot(np.real(c1), - np.imag(c1), color = 'C2')
ax.fill_between(np.real(c1), np.imag(c1), -np.imag(c1), color = 'C2',
alpha = 0.1)
ax.plot(np.real(c2), np.imag(c2), color = 'C0')
ax.plot(np.real(c2), 2 * np.imag(c2[0]) - np.imag(c2), color = 'C0')
ax.fill_between(np.real(c2), np.imag(c2), 2 * | np.imag(c2[0]) | numpy.imag |
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 08:12:54 2020
@author: juru
"""
import numpy as np
import matplotlib.pyplot as plt
from ed_win.intersection_checker import intersection_checker
def capacitated_spanning_tree(X=[], Y=[], option=3, UL=100, Inters_const=True, max_it=20000):
"""
Calculate a minimum spanning tree distance for a layout.
Capacitated minimum spanning tree heuristics algorithm for Topfarm.
Parameters
----------
*X, Y: list([n_wt_oss]) or array type as well
X,Y positions of the wind turbines and oss
*option: Heuristic type. option=1 is Prim. option=2 is Kruskal. option=3 is Esau-Williams
*max_it: Maximm number of iterations for the heuristics
*UL: Upper limit for the max number of wind turbines connectable by the biggest available cable
*Inters_const=Bool. True is cable crossings are not allowed. False if they are allowed.
:return: T: Array. First column is first node, second column is second noce, third column is the distance between nodes
The OSS is always the node number 1. The WTs go from 2 to number of WTs plus one
feasible: Bool. True is solution is feasible. False if not.
"""
# %% Initializing arrays, lists, variables (until line 46 .m file)
n_wt_oss = len(X) # Defining number of wind turbines with OSS
half = int(n_wt_oss * (n_wt_oss - 1) / 2)
edges_tot = np.zeros((2 * half, 5)) # Defining the matrix with Edges information
cont_edges = 0
for i in range(n_wt_oss):
for j in range(i + 1, n_wt_oss):
edges_tot[cont_edges, 0] = i + 1 # First element is first node (Element =1 is the OSS. and from 2 to Nwt the WTs)
edges_tot[cont_edges, 1] = j + 1 # Second element is second node
edges_tot[cont_edges, 2] = np.sqrt((X[j] - X[i])**2 + (Y[j] - Y[i])**2) # Third element is the length of the edge
cont_edges += 1
CP = [x for x in range(n_wt_oss)] # Initializing component position list for each node. A component goes from 0 until n_wt_oss-1. Fixed length.
address_nodes = [-1 for x in range(n_wt_oss)] # Initializing address list for each node. It indicates the root node for each node in the tree and in subtrees from OSS. Fixed length.
address_nodes[0] = 0
address_nodes = np.array(address_nodes, dtype=int)
C = [[x + 1] for x in range(n_wt_oss)] # Initializing component list (nodes belonging to each comonent). A component goes from 0 until n_wt_oss-1, and its length decreases until 1 (component 0). Variable length.
S = [1 for x in range(n_wt_oss)] # Initializing size of components list (how many nodes are in each component). A component goes from 0 until n_wt_oss-1, and its length decreases until 1 (component 0 with n_wt_oss-1 elements). Variable length.
go, it, node1, node2, weight = True, 0, 0, 0, np.zeros((n_wt_oss, 1)) # Initializing variables for iterations
if option == 1: # Initializing weight of nodes. Each index represents a node, such as Node=Index+1
weight[0], weight[1:n_wt_oss] = 0, -10**50
elif option == 2:
weight
elif option == 3:
weight[0], weight[1:n_wt_oss] = 0, edges_tot[0:n_wt_oss - 1, 2].reshape(n_wt_oss - 1, 1)
else:
raise Exception('option should be either 1, 2 or 3 The value of x was: {}'.format(option))
for i in range(2 * half): # Forming the big matrix with all edges and corresponding trade-off values (fixed size).
if i <= half - 1:
edges_tot[i, 3] = weight[edges_tot[i, 0].astype(int) - 1, 0]
edges_tot[i, 4] = edges_tot[i, 2] - edges_tot[i, 3]
else:
edges_tot[i, 0] = edges_tot[i - half, 1]
edges_tot[i, 1] = edges_tot[i - half, 0]
edges_tot[i, 2] = edges_tot[i - half, 2]
edges_tot[i, 3] = weight[edges_tot[i, 0].astype(int) - 1, 0]
edges_tot[i, 4] = edges_tot[i, 2] - edges_tot[i, 3]
mst_edges = np.zeros(2 * half, dtype=bool) # Array containing the activation variables of selected edges
feasible = False
# %% Main (until line 609 .m file)
while go:
flag1, flag2, flag3, flag4 = True, True, True, True
it += 1
value_potential_edge, pos_potential_edge = np.min(edges_tot[:, 4]), np.argmin(edges_tot[:, 4])
if (value_potential_edge > 10**49) or (it == max_it): # Condition to stop if a C-MST cannot be found
# print(it)
# print(value_potential_edge)
break
node1, node2 = edges_tot[pos_potential_edge, 0].astype(int), edges_tot[pos_potential_edge, 1].astype(int)
if (CP[node1 - 1] == CP[node2 - 1]) and (flag1) and (flag2) and (flag3) and (flag4): # Condition for avoiding the creation of loops
flag1 = False # Boolean for loops creation
if pos_potential_edge <= half - 1: # Eliminiating edges which connect the same component
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
# %% Next code is when the potential edge is connected directly to the OSS (node==1) and it does not create loops
if ((node1 == 1) or (node2 == 1)) and (flag1) and (flag2) and (flag3) and (flag4): # Evaluation of the number of nodes in a subtree rooted at 1
flag2 = False
if node1 == 1: # If the selected edge has a node 1 the OSS
if (S[CP[node2 - 1]] > UL): # Evaluation of the capacity constraint: If true, proceeding to eliminate edges
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
else: # If capacity constraint not violated, then evaluate no-crossing cables constraint
if (not(intersection_checker(pos_potential_edge, edges_tot, mst_edges, X, Y, Inters_const))): # If no cables crossing, add the edge to the tree
mst_edges[pos_potential_edge] = True # Add it to the tree. line 88 .m file
# Update node address
address_nodes[node2 - 1] = 1
C_sliced_n2 = C[CP[node2 - 1]]
for j in range(len(C_sliced_n2)): # This could be replaced without for loop as address_nodes is now an array (before was a list)
if C_sliced_n2[j] == node2:
pass
else:
address_nodes[C_sliced_n2[j] - 1] = node2
# Update weights and cost functions
if option == 1:
weight[node2 - 1] = 0
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 4] = edges_tot[np.where(edges_tot[:, 0] == node2)[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3]
elif option == 2:
pass
elif option == 3:
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
weight[C_sliced_n1[j] - 1] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 4] = edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3]
else:
raise Exception('option should be either 1, 2 or 3 The value of x was: {}'.format(option)) # Weight and cost function updated. line 126 .m file
# Eliminating selected edge from edges potential list
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
# Updating auxiliary matrix CP, C, S
u, v = min(node1, node2), max(node1, node2)
C_sliced_u, C_sliced_v = C[CP[u - 1]], C[CP[v - 1]]
S[CP[u - 1]] = len(C_sliced_u) + len(C_sliced_v) # Updating size of components
C[CP[u - 1]] += C[CP[v - 1]] # Merging two lists due to component's merge
old_pos = CP[v - 1]
for j in range(len(C_sliced_v)): # Updating components position for each merged node
CP[C_sliced_v[j] - 1] = CP[u - 1]
for j in range(len(CP)):
if CP[j] > old_pos:
CP[j] -= 1
del C[old_pos] # Deleting old component
del S[old_pos] # Deleting old component size (line 167 .m file)
else: # If a cable crossing is detected
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
if node2 == 1: # If the selected edge has a node 2 the OSS
if (S[CP[node1 - 1]] > UL): # Evaluation of the capacity constraint: If true, proceeding to eliminate edges
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
else:
if (not(intersection_checker(pos_potential_edge, edges_tot, mst_edges, X, Y, Inters_const))): # If no cables crossing, add the edge to the tree
mst_edges[pos_potential_edge] = True # Add it to the tree. line 190 .m file
# Update node address
address_nodes[node1 - 1] = 1
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
if C_sliced_n1[j] == node1:
pass
else:
address_nodes[C_sliced_n1[j] - 1] = node1
# Update weights and cost functions
if option == 1:
weight[node2 - 1] = 0
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 4] = edges_tot[np.where(edges_tot[:, 0] == node2)[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3]
elif option == 2:
pass
elif option == 3:
# C_sliced_n1=C[CP[node1-1]]
for j in range(len(C_sliced_n1)):
weight[C_sliced_n1[j] - 1] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 4] = edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3]
else:
raise Exception('option should be either 1, 2 or 3 The value of x was: {}'.format(option)) # Weight and cost function updated. line 226 .m file
# Eliminating selected edge from edges potential list
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50 # line 234 .m file
# Updating auxiliary matrix CP, C, S
u, v = min(node1, node2), max(node1, node2)
C_sliced_u, C_sliced_v = C[CP[u - 1]], C[CP[v - 1]]
S[CP[u - 1]] = len(C_sliced_u) + len(C_sliced_v) # Updating size of components
C[CP[u - 1]] += C[CP[v - 1]] # Merging two lists due to component's merge
old_pos = CP[v - 1]
for j in range(len(C_sliced_v)): # Updating components position for each merged node
CP[C_sliced_v[j] - 1] = CP[u - 1]
for j in range(len(CP)):
if CP[j] > old_pos:
CP[j] -= 1
del C[old_pos] # Deleting old component
del S[old_pos] # Deleting old component size (line 267 .m file)
else: # If a cable crossing is detected
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
# %% Next code is when the potential edge is not connected directly to the OSS (node==1) and it does not create loops. Two cases: One of the components has as element node=1 or none of them.
if (flag1) and (flag2) and (flag3) and (flag4):
if (1 in C[CP[node1 - 1]]) or (1 in C[CP[node2 - 1]]): # One of the components has an element '1' (OSS)
flag3 = False # line 284 .m file
if (1 in C[CP[node1 - 1]]): # The component of node1 includes the root 1
if address_nodes[node1 - 1] == 1: # The node 1 is connected directly to the OSS (element '1')
tot_nodes = np.where(address_nodes == node1)[0].size + S[CP[node2 - 1]] + 1
else: # The node 1 is not connected directly to the OSS (element '1')
tot_nodes = np.where(address_nodes == address_nodes[node1 - 1])[0].size + S[CP[node2 - 1]] + 1
if tot_nodes > UL: # Evaluation of the capacity constraint: If true, proceeding to eliminate edges
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
else: # No violation of capacity constraint
if (not(intersection_checker(pos_potential_edge, edges_tot, mst_edges, X, Y, Inters_const))): # If no cables crossing, add the edge to the tree
mst_edges[pos_potential_edge] = True # Add it to the tree. line 301 .m file
# Update node address
if address_nodes[node1 - 1] == 1:
C_sliced_n2 = C[CP[node2 - 1]]
for j in range(len(C_sliced_n2)):
address_nodes[C_sliced_n2[j] - 1] = node1
else:
C_sliced_n2 = C[CP[node2 - 1]]
for j in range(len(C_sliced_n2)):
address_nodes[C_sliced_n2[j] - 1] = address_nodes[node1 - 1] # line 318 .m file
# Update weights and cost functions
if option == 1:
weight[node2 - 1] = 0
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 4] = edges_tot[np.where(edges_tot[:, 0] == node2)[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3]
elif option == 2:
pass
elif option == 3:
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
weight[C_sliced_n1[j] - 1] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 4] = edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3]
else:
raise Exception('option should be either 1, 2 or 3 The value of x was: {}'.format(option)) # Weight and cost function updated. line 344 .m file
# Eliminating selected edge from edges potential list
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50 # line 352 .m file
# Updating auxiliary matrix CP, C, S
u, v = min(node1, node2), max(node1, node2)
C_sliced_u, C_sliced_v = C[CP[u - 1]], C[CP[v - 1]]
S[CP[u - 1]] = len(C_sliced_u) + len(C_sliced_v) # Updating size of components
C[CP[u - 1]] += C[CP[v - 1]] # Merging two lists due to component's merge
old_pos = CP[v - 1]
for j in range(len(C_sliced_v)): # Updating components position for each merged node
CP[C_sliced_v[j] - 1] = CP[u - 1]
for j in range(len(CP)):
if CP[j] > old_pos:
CP[j] -= 1
del C[old_pos] # Deleting old component
del S[old_pos] # Deleting old component size (line 385 .m file)
else: # If a cable crossing is detected
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50 # (line 396 .m file)
else: # The component of node2 includes the root 1
if address_nodes[node2 - 1] == 1: # The node 2 is connected directly to the OSS (element '1')
tot_nodes = np.where(address_nodes == node2)[0].size + S[CP[node1 - 1]] + 1
else: # The node 2 is not connected directly to the OSS (element '1')
tot_nodes = np.where(address_nodes == address_nodes[node2 - 1])[0].size + S[CP[node1 - 1]] + 1
if tot_nodes > UL: # Evaluation of the capacity constraint: If true, proceeding to eliminate edges
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
else: # No violation of capacity constraint
if (not(intersection_checker(pos_potential_edge, edges_tot, mst_edges, X, Y, Inters_const))): # If no cables crossing, add the edge to the tree
mst_edges[pos_potential_edge] = True # Add it to the tree. line 413 .m file
# Update node address
if address_nodes[node2 - 1] == 1:
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
address_nodes[C_sliced_n1[j] - 1] = node2
else:
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
address_nodes[C_sliced_n1[j] - 1] = address_nodes[node2 - 1] # line 430 .m file
# Update weights and cost functions
if option == 1:
weight[node2 - 1] = 0
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 4] = edges_tot[np.where(edges_tot[:, 0] == node2)[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3]
elif option == 2:
pass
elif option == 3:
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
weight[C_sliced_n1[j] - 1] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 4] = edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3]
else:
raise Exception('option should be either 1, 2 or 3 The value of x was: {}'.format(option)) # Weight and cost function updated. line 456 .m file
# Eliminating selected edge from edges potential list
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50 # line 464 .m file
# Updating auxiliary matrix CP, C, S
u, v = min(node1, node2), max(node1, node2)
C_sliced_u, C_sliced_v = C[CP[u - 1]], C[CP[v - 1]]
S[CP[u - 1]] = len(C_sliced_u) + len(C_sliced_v) # Updating size of components
C[CP[u - 1]] += C[CP[v - 1]] # Merging two lists due to component's merge
old_pos = CP[v - 1]
for j in range(len(C_sliced_v)): # Updating components position for each merged node
CP[C_sliced_v[j] - 1] = CP[u - 1]
for j in range(len(CP)):
if CP[j] > old_pos:
CP[j] -= 1
del C[old_pos] # Deleting old component
del S[old_pos] # Deleting old component size (line 497 .m file)
else: # If a cable crossing is detected
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50 # (line 507 .m file)
else: # Node of the components has as element '1' (OSS)
flag4 = False
if (S[CP[node1 - 1]] + S[CP[node2 - 1]] > UL): # Evaluation of the capacity constraint: If true, proceeding to eliminate edges
if pos_potential_edge <= half - 1:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge + half, 4] = edges_tot[pos_potential_edge + half, 2] + 10**50
else:
edges_tot[pos_potential_edge, 4] = edges_tot[pos_potential_edge, 2] + 10**50
edges_tot[pos_potential_edge - half, 4] = edges_tot[pos_potential_edge - half, 2] + 10**50
else: # If no violation of the capacity constraint
if (not(intersection_checker(pos_potential_edge, edges_tot, mst_edges, X, Y, Inters_const))): # If no cables crossing, add the edge to the tree
mst_edges[pos_potential_edge] = True # Add it to the tree. line 522 .m file
# Update weights and cost functions
if option == 1:
weight[node2 - 1] = 0
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 4] = edges_tot[np.where(edges_tot[:, 0] == node2)[0], 2] -\
edges_tot[np.where(edges_tot[:, 0] == node2)[0], 3]
elif option == 2:
pass
elif option == 3:
C_sliced_n1 = C[CP[node1 - 1]]
for j in range(len(C_sliced_n1)):
weight[C_sliced_n1[j] - 1] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 3] = weight[node2 - 1]
edges_tot[np.where(edges_tot[:, 0] == C_sliced_n1[j])[0], 4] = edges_tot[ | np.where(edges_tot[:, 0] == C_sliced_n1[j]) | numpy.where |
import sys
import time
# pyStatReduce specific imports
import unittest
import numpy as np
import chaospy as cp
import copy
from pystatreduce.new_stochastic_collocation import StochasticCollocation2
from pystatreduce.stochastic_collocation import StochasticCollocation
from pystatreduce.monte_carlo import MonteCarlo
from pystatreduce.quantity_of_interest import QuantityOfInterest
from pystatreduce.dimension_reduction import DimensionReduction
from pystatreduce.stochastic_arnoldi.arnoldi_sample import ArnoldiSampling
from pystatreduce.examples.oas_scaneagle_proto import OASScanEagleWrapper, Fuelburn, StressConstraint, LiftConstraint, MomentConstraint
import pystatreduce.utils as utils
np.set_printoptions(precision=8)
np.set_printoptions(linewidth=150, suppress=True)
#pyoptsparse sepecific imports
from scipy import sparse
import argparse
import pyoptsparse # from pyoptsparse import Optimization, OPT, SNOPT
# Import the OpenMDAo shenanigans
from openmdao.api import IndepVarComp, Problem, Group, NewtonSolver, \
ScipyIterativeSolver, LinearBlockGS, NonlinearBlockGS, \
DirectSolver, LinearBlockGS, PetscKSP, SqliteRecorder, ScipyOptimizeDriver
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
# Default mean values
mean_Ma = 0.071
mean_TSFC = 9.80665 * 8.6e-6 * 3600
mean_W0 = 10.0
mean_E = 85.e9
mean_G = 25.e9
mean_mrho = 1600
mean_R = 1800
mean_load_factor = 1.0
mean_altitude = 4.57
# Default standard values
std_dev_Ma = 0.005
std_dev_TSFC = 0.00607/3600
std_dev_W0 = 0.2
std_dev_mrho = 50
std_dev_R = 500
std_dev_load_factor = 0.1
std_dev_E = 5.e9
std_dev_G = 1.e9
std_dev_altitude = 0.5
num_y = 21 # Medium fidelity model
num_x = 3 #
mesh_dict = {'num_y' : num_y,
'num_x' : num_x,
'wing_type' : 'rect',
'symmetry' : True,
'span_cos_spacing' : 0.5,
'span' : 3.11,
'root_chord' : 0.3,
}
rv_dict = { 'Mach_number' : {'mean' : mean_Ma,
'std_dev' : std_dev_Ma},
'CT' : {'mean' : mean_TSFC,
'std_dev' : std_dev_TSFC},
'W0' : {'mean' : mean_W0,
'std_dev' : std_dev_W0},
'R' : {'mean' : mean_R,
'std_dev' : std_dev_R},
'load_factor' : {'mean' : mean_load_factor,
'std_dev' : std_dev_load_factor},
'E' : {'mean' : mean_E,
'std_dev' : std_dev_E},
'G' : {'mean' : mean_G,
'std_dev' : std_dev_G},
'mrho' : {'mean' : mean_mrho,
'std_dev' : std_dev_mrho},
}
input_dict = {'n_twist_cp' : 3,
'n_thickness_cp' : 3,
'n_CM' : 3,
'n_thickness_intersects' : 10,
'n_constraints' : 1 + 10 + 1 + 3 + 3,
'ndv' : 3 + 3 + 2,
'mesh_dict' : mesh_dict,
'rv_dict' : rv_dict
}
uq_systemsize = len(rv_dict)
mu_orig, std_dev = utils.get_scaneagle_input_rv_statistics(rv_dict)
jdist = cp.MvNormal(mu_orig, std_dev)
# Create the base openaerostruct problem wrapper that will be used by the
# different quantity of interests
oas_obj = OASScanEagleWrapper(uq_systemsize, input_dict)
# Create the QoI objects
obj_QoI = Fuelburn(uq_systemsize, oas_obj)
failure_QoI = StressConstraint(uq_systemsize, oas_obj)
lift_con_QoI = LiftConstraint(uq_systemsize, oas_obj)
moment_con_QoI = MomentConstraint(uq_systemsize, oas_obj)
class OASScanEagleProtoTest(unittest.TestCase):
def test_OASScanEagleWrapper_functions(self):
mu_new = mu_orig + np.diagonal(std_dev)
oas_obj.update_rv(mu_new)
self.assertEqual(mu_new[0], obj_QoI.p['Mach_number'])
self.assertEqual(mu_new[1], obj_QoI.p['CT'])
self.assertEqual(mu_new[2], obj_QoI.p['W0'])
self.assertEqual(mu_new[3], obj_QoI.p['R'])
self.assertEqual(mu_new[4], obj_QoI.p['load_factor'])
self.assertEqual(mu_new[5], obj_QoI.p['E'])
self.assertEqual(mu_new[6], obj_QoI.p['G'])
self.assertEqual(mu_new[7], obj_QoI.p['mrho'])
# Revert to the original values
oas_obj.update_rv(mu_orig)
def test_Fuelburn_class(self):
# Check variables are being updated correctly
mu_new = mu_orig + np.diagonal(std_dev)
obj_QoI.update_rv(mu_new)
self.assertEqual(mu_new[0], obj_QoI.p['Mach_number'])
self.assertEqual(mu_new[1], obj_QoI.p['CT'])
self.assertEqual(mu_new[2], obj_QoI.p['W0'])
self.assertEqual(mu_new[3], obj_QoI.p['R'])
self.assertEqual(mu_new[4], obj_QoI.p['load_factor'])
self.assertEqual(mu_new[5], obj_QoI.p['E'])
self.assertEqual(mu_new[6], obj_QoI.p['G'])
self.assertEqual(mu_new[7], obj_QoI.p['mrho'])
# Check QoI value
fval = obj_QoI.eval_QoI(mu_orig, np.zeros(uq_systemsize))
true_val = 5.2059024220429615 # 5.229858093218218
err = abs(fval - true_val)
self.assertTrue(err < 1.e-6)
# Check the gradients w.r.t the random variables
dJdrv = obj_QoI.eval_QoIGradient(mu_orig, np.zeros(uq_systemsize))
true_val = np.array( [-86.161737972785,
20.460428652057,
0.439735298404,
0.003451150117,
6.014451860042,
-0.000000000001,
-0.,
0.879771028302])
err = abs(dJdrv - true_val) / true_val
# print('err = ', err)
self.assertTrue((err < 1.e-6).all())
def test_StressConstraint_class(self):
# Check variables are being updated correctly
mu_new = mu_orig + | np.diagonal(std_dev) | numpy.diagonal |
import sys, os
import numpy as np
from numpy.linalg import norm
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import networkx as nx
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import logging
import traceback
import timeit
import time
import math
from ast import literal_eval as make_tuple
import platform
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D, proj3d
import glob
import pickle
import myFunctions as mf
import copy
from operator import itemgetter
from os.path import join
import inspect
from scipy.optimize import fsolve, fmin_tnc, least_squares, differential_evolution, minimize, fmin_l_bfgs_b, basinhopping
import myFunctions as mf
from scipy import stats
class FluidNetwork(object):
"""
Unified framework for doing the fluid simulation. At this stage, the graph used has already been reduced, i.e., each
edge represens a segment in `segmentList` and each node represents a bifurcation. Previously, each segment may be
consisted of one or more edges. To reduce the previous graph, use the function `reduceGraph`. Also, for the sake of
consistency, the original `segmentInfoDict` has been renamed to `edgeInfoDict`, `segmentList` to `edgeList`, but
`nodeInfoDict` remains the same. Besides, the nodes and edges are now indexed by integers starting from zero for
simplicity. Use the function `convertGraph` to do the conversion.
So the `fundemental staff` that you need to have are: `edgeList`, `edgeInfoDict`, `nodeInfoDict`, `G`. These are
necessary to all of the subsquent analysis. To perform a blood flow simulation, you need to do the following steps:
1. Get the graph and the `fundemental staff` by either creating one or loading an existing one.
2. Set c and k (used in H-W equation) for each edge by `setNetwork`.
3. Set the terminating pressures by `setTerminatingPressure`.
4. Generate H-W equations for each edge and flow conservation equations for each node by `setupFluidEquations`.
5. Solve the equations by optimization and use `computerNetworkDetail` as objective function.
The fluid simulation tries to solve the network by finding a set of pressures for each node and a set of flow rates
for each edges such that H-W equations and flow conservation equations are satisfied with the given set of
terminating pressures. For a binary tree structure without merges, a solution is guaranteed to exist no matter what
the terminating pressures look like. However, for the network with merges (e.g., the GBM network with CoW), it is
possible that a solution does not exist for the given set of terminating pressures. Therefore, for these cases, we
need to check the optimization result and check whether the error in each equations are within a acceptable range.
Note that not all the functions in this class should be used. Some are just for experimental purposes!
"""
def __init__(self):
self.directory = os.path.abspath(os.path.dirname(__file__))
self.edgeList = []
self.edgeIndexList = []
self.G = nx.Graph()
self.rootNode = 0
self.edgeInfoDict = {}
self.nodeInfoDict = {}
self.nodeIndex = 0
self.edgeIndex = 0
self.spacing = 0.00040 # meter/voxel
self.eqnInfoDictList = []
self.velocityPressure = []
self.velocityPressureGroundTruth = []
self.distributeFlowEqnDict = {}
self.nodeInfoDictBefore = {}
self.nodeInfoDictAfter = {}
self.edgeInfoDictBefore = {}
self.edgeInfoDictAfter = {}
def generateNetwork(self, maxDepth=10, allowMerge=False):
"""
Generate a binary tree with random edge and node properties.
Parameters
----------
maxDepth : int, optional
Maximum depth of the graph (depth start from zero).
allowMerge : bool, optional
If True, there will be 30% change that two edges at the same depth will merge together.
"""
G = nx.Graph()
nodeDepth, edgeDepth = 0, 0
G.add_node(0, depth=nodeDepth, depthLevel=nodeDepth, nodeIndex=self.nodeIndex, isEntryNode=True) # first node
self.nodeIndex += 1
while nodeDepth <= maxDepth - 1:
nodesAtCurrentDepth = [node for node in G.nodes() if G.node[node]['depth'] == nodeDepth]
if len(nodesAtCurrentDepth) > 2:
# Determine if merge would happen
if allowMerge:
mergeAtCurrentDepth = (np.random.rand() <= 0.3) # 30% probability TODO: this should be controlled by function arguments
else:
mergeAtCurrentDepth = False
# Merge nodes if allowed
if mergeAtCurrentDepth:
numOfMerges = 1 # TODO: this should be controlled by function arguments
nodesToMerge = np.random.choice(nodesAtCurrentDepth, 2, replace=False)
newNode = self.nodeIndex
newEdgeIndex1, newEdgeIndex2 = self.edgeIndex, self.edgeIndex + 1 # TODO: allow >2 edge merge?
G.add_edge(nodesToMerge[0], newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex, segmentIndex=self.edgeIndex)
G.add_edge(nodesToMerge[1], newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex + 1, segmentIndex=self.edgeIndex + 1)
G.node[newNode]['depth'] = nodeDepth + 1
G.node[newNode]['depthLevel'] = nodeDepth + 1
G.node[newNode]['nodeIndex'] = self.nodeIndex
G.node[newNode]['isEntryNode'] = False
self.nodeIndex += 1
self.edgeIndex += 2
for currentNode in nodesAtCurrentDepth:
numOfChildEdges = len([node for node in G[currentNode].keys() if G.node[node]['depth'] > nodeDepth])
numOfNewEdges = 2 - numOfChildEdges # TODO: allow for more child edges?
for ii in range(numOfNewEdges):
newNode = self.nodeIndex
G.add_edge(currentNode, newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex, segmentIndex=self.edgeIndex)
G.node[newNode]['depth'] = nodeDepth + 1
G.node[newNode]['depthLevel'] = nodeDepth + 1
G.node[newNode]['nodeIndex'] = self.nodeIndex
G.node[newNode]['isEntryNode'] = False
self.nodeIndex += 1
self.edgeIndex += 1
nodeDepth += 1
edgeDepth += 1
# Gather data
edgeList = [0] * self.edgeIndex
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeList[edgeIndex] = edge
nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]
edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]
nodeInfoDict, edgeInfoDict = {}, {}
for node in G.nodes():
nodeInfoDict[node] = G.node[node]
nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
nodeInfoDict[node]['coord'] = []
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeInfoDict[edgeIndex] = G[edge[0]][edge[1]]
edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# Save
self.G = G
self.edgeList = edgeList
self.nodeIndexList = nodeIndexList
self.edgeIndexList = edgeIndexList
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def loadNetwork(self, version=2, year=2013):
"""
Load old version of data files (that needs to be converted).
"""
directory = self.directory
if version == 1:
filename = 'basicFilesForStructureWithCoW(year={}).pkl'.format(year)
elif version == 2:
filename = 'basicFilesForStructureWithCoW2(year={}).pkl'.format(year)
elif version == 3:
filename = 'basicFilesForStructureWithCoW3(year={}).pkl'.format(year)
elif version == 4:
filename = 'basicFilesForStructureWithCoW4(year={}).pkl'.format(year)
with open(join(directory, filename), 'rb') as f:
resultDict = pickle.load(f)
with open(join(directory, 'partitionInfo.pkl'), 'rb') as f:
partitionInfo = pickle.load(f)
with open(join(directory, 'chosenVoxelsForPartition.pkl'), 'rb') as f:
chosenVoxels = pickle.load(f)
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
resultDict['resultADANDict'] = resultADANDict
resultDict['partitionInfo'] = partitionInfo
resultDict['chosenVoxels'] = chosenVoxels
self.loadedNetwork = resultDict
def reduceGraph(self, G, segmentList, segmentIndexList):
"""
Reduce the graph such that the node is either terminating or bifurcating point.
Parameters
----------
G : NetworkX graph
The graph representation of the network.
segmentList : list
A list of segments in which each segment is a simple branch.
segmentIndexList : list
A list of segment indices referring to the segments actually be used in `segmentList`.
Returns
-------
DG : NetworkX graph
The reduced graph (each edge refers to a segment).
"""
DG = nx.DiGraph()
for segmentIndex in segmentIndexList:
segment = segmentList[segmentIndex]
head, tail, secondNode = segment[0], segment[-1], segment[1]
headLevel, tailLevel = G.node[head]['depthLevel'], G.node[tail]['depthLevel']
if headLevel > tailLevel:
head, tail, secondNode = tail, head, segment[-2]
headLevel, tailLevel = tailLevel, headLevel
DG.add_path([head, tail])
for key, value in G[head][secondNode].items():
DG[head][tail][key] = value
for key, value in G.node[head].items():
DG.node[head][key] = value
for key, value in G.node[tail].items():
DG.node[tail][key] = value
return DG
def convertNetowrk(self):
"""
Convert the old version of data files into the new version used here.
"""
resultDict = self.loadedNetwork
GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)
segmentIndexList = list(segmentInfoDictOld.keys())
heartLoc = (255, 251, 26) # change as needed
DG = self.reduceGraph(GOld, segmentList, segmentIndexList)
G = nx.Graph()
nodeInfoDict, edgeInfoDict = {}, {}
nodeIndex, edgeIndex = 0, 0
maxNodeDepth = np.max([DG.node[node]['depthLevel'] for node in DG.nodes()])
for currentDepth in range(maxNodeDepth + 1):
nodesAtCurrentDepth = [node for node in DG.nodes() if DG.node[node]['depthLevel'] == currentDepth]
for node in nodesAtCurrentDepth:
G.add_node(nodeIndex, depth=DG.node[node]['depthLevel'], nodeIndex=nodeIndex, coord=node)
DG.node[node]['nodeIndexHere'] = nodeIndex
if node == heartLoc:
G.node[nodeIndex]['isEntryNode'] = True
rootNode = nodeIndex
else:
G.node[nodeIndex]['isEntryNode'] = False
nodeIndex += 1
for edge in DG.edges():
depth = np.min([DG.node[edge[0]]['depthLevel'], DG.node[edge[1]]['depthLevel']])
DG[edge[0]][edge[1]]['depth'] = depth
maxEdgeDepth = np.max([DG[edge[0]][edge[1]]['depth'] for edge in DG.edges()])
for currentDepth in range(maxEdgeDepth + 1):
edgesAtCurrentDepth = [edge for edge in DG.edges() if DG[edge[0]][edge[1]]['depth'] == currentDepth]
for edge in edgesAtCurrentDepth:
G.add_edge(DG.node[edge[0]]['nodeIndexHere'], DG.node[edge[1]]['nodeIndexHere'], depth=currentDepth, edgeIndex=edgeIndex)
edgeIndex += 1
currentNodeIndex = nodeIndex
currentEdgeIndex = edgeIndex
edgeList = [[]] * edgeIndex
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeList[edgeIndex] = edge
nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]
edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]
for node in DG.nodes():
nodeIndexHere = DG.node[node]['nodeIndexHere']
nodeInfoDict[nodeIndexHere] = DG.node[node]
nodeInfoDict[nodeIndexHere]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
nodeInfoDict[nodeIndexHere]['coord'] = []
for edge in DG.edges():
edgeIndex = G[DG.node[edge[0]]['nodeIndexHere']][DG.node[edge[1]]['nodeIndexHere']]['edgeIndex']
segmentIndex = DG[edge[0]][edge[1]]['segmentIndex']
edgeInfoDict[edgeIndex] = DG[edge[0]][edge[1]]
edgeInfoDict[edgeIndex]['length'] = DG[edge[0]][edge[1]]['pathLength'] # backward compatibility
edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# Sync between G and nodeInfoDict
for node in G.nodes():
for key, value in G.node[node].items():
nodeInfoDict[node][key] = value
# Save
self.G = G
self.edgeIndex = currentEdgeIndex
self.nodeIndex = currentNodeIndex
self.edgeList = edgeList
self.nodeIndexList = nodeIndexList
self.edgeIndexList = edgeIndexList
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
self.rootNode = rootNode
def adjustNetwork(self):
"""
If the network changes, recheck the correspondence between branch name and edgeIndex!
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
# LICA(Pre)
edgeInfoDict[0]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[0]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# LICA(Post)
edgeInfoDict[3]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[3]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# RICA(Pre)
edgeInfoDict[2]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[2]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# RICA(Post)
edgeInfoDict[7]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[7]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# VA
# edgeInfoDict[1]['meanRadius'] = 2.0 / (spacing * 1000) # mm->voxel
edgeInfoDict[1]['length'] = 28 / (spacing * 1000) # mm->voxel
# RPCAComm
edgeInfoDict[4]['length'] = 16 / (spacing * 1000) # mm->voxel
# RMCA(first segment)
# edgeInfoDict[12]['length'] = 8 / (spacing * 1000) # mm->voxel
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def setNetwork(self, option=1, extraInfo=None):
"""
Set c and k (and possibly radius and length) for each branch
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
# Use BraVa data to set the radius and ADAN result to set the c and k
if option == 1:
minSetLength, maxSetLength = 1, 70 # mm
# Params used to fit radius to edgeLevel using the BraVa data. radius (mm) = a * np.exp(-b * edgeLevel) + c
fitResultDict = {'LMCA': {'param': [0.5569, 0.4199, 0.469]}, 'RMCA': {'param': [0.6636, 0.3115, 0.3666]}, 'LPCA': {'param': [0.6571, 0.3252, 0.2949]}, 'RPCA': {'param': [0.7103, 0.5587, 0.3815]}, 'ACA': {'param': [0.3604, 1.0538, 0.4714]}} # new names
# fitResultDict = {'LCA': {'param': [0.5569, 0.4199, 0.469]}, 'RCA': {'param': [0.6636, 0.3115, 0.3666]}, 'LACA': {'param': [0.6571, 0.3252, 0.2949]}, 'RACA': {'param': [0.7103, 0.5587, 0.3815]}, 'PCA': {'param': [0.3604, 1.0538, 0.4714]}} # old names
a, b, c = fitResultDict['LMCA']['param']
for edgeIndex in edgeIndexList:
edgeLevel = edgeInfoDict[edgeIndex]['depth']
radius = (a * np.exp(-b * edgeLevel) + c) / (spacing * 1000) # voxel
edgeInfoDict[edgeIndex]['meanRadius'] = radius # voxel
length = (np.random.rand() * (maxSetLength - minSetLength) + minSetLength) / (spacing * 1000) # voxel
edgeInfoDict[edgeIndex]['pathLength'] = length # for backward compatibility
edgeInfoDict[edgeIndex]['length'] = length # voxel
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
slopeCRadius, interceptCRadius = resultADANDict['slopeCRadius'], resultADANDict['interceptCRadius']
radiusThresholds, CKCandidates, numOfCCategory = resultADANDict['radiusThresholds'], resultADANDict['CKCandidates'], resultADANDict['numOfCCategory']
minRadius, maxRadius = np.min(radiusThresholds), np.max(radiusThresholds) # meter
slopePressureRadius, interceptPressureRadius = resultADANDict['slopePressureRadius'], resultADANDict['interceptPressureRadius']
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
if radius > minRadius and radius < maxRadius:
binIndex = np.digitize([radius], radiusThresholds)[0] - 1
c, k = CKCandidates[binIndex], CKCandidates[-1] # assuming c is different for each branch and k is the same
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
else:
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
k = CKCandidates[-1] # assuming c is different for each branch and k is the same
c = c if c > 0 else 0.1
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
# Only set c and k using ADAN result
elif option == 2:
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
if extraInfo is not None:
excludedEdgeIndex = itemgetter('excludedEdgeIndex')(extraInfo)
slopeCRadius, interceptCRadius = resultADANDict['slopeCRadius'], resultADANDict['interceptCRadius']
# print('slopeCRadius={}, interceptCRadius={}'.format(slopeCRadius, interceptCRadius))
radiusThresholds, CKCandidates, numOfCCategory = resultADANDict['radiusThresholds'], resultADANDict['CKCandidates'], resultADANDict['numOfCCategory']
minRadius, maxRadius = np.min(radiusThresholds), np.max(radiusThresholds) # meter
slopePressureRadius, interceptPressureRadius = resultADANDict['slopePressureRadius'], resultADANDict['interceptPressureRadius']
# if extraInfo is not None:
# edgeIndexListToUse = [edgeIndex for edgeIndex in edgeIndexList if edgeIndex not in excludedEdgeIndex]
# else:
# edgeIndexListToUse = edgeIndexList
edgeIndexListToUse = edgeIndexList
for edgeIndex in edgeIndexListToUse:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
if radius > minRadius and radius < maxRadius:
binIndex = np.digitize([radius], radiusThresholds)[0] - 1
c, k = CKCandidates[binIndex], CKCandidates[-1] # assuming c is different for each branch and k is the same
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
else:
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
k = CKCandidates[-1] # assuming c is different for each branch and k is the same
# c = c if c > 0 else 0.1
if radius * 1000 >= 1.5 and radius * 1000 <= 2.5:
c = 1
else:
if c < 0:
c = 0.1
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def showFlowInfo(self):
"""
Print out flow rates for selected edges and pressure for selected nodes.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for edgeIndex in range(16):
flow = edgeInfoDict[edgeIndex]['simulationData']['flow']
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(edgeInfoDict[edgeIndex])
if flow is not None:
flow *= 10**6 # convert to cm^3/s
else:
flow = -1 #
radius *= (spacing * 100) # convert to cm
length *= (spacing * 100) # convert to cm
print('Edge {}: flow={:.3f} cm^3/s, radius={:.4f} cm, length={:.4f} cm, c={:.4f}, k={:.4f}'.format(edgeIndex, flow, radius, length, c, k))
print('\n')
for node in range(16):
flow, pressure = itemgetter('flow', 'pressure')(nodeInfoDict[node]['simulationData'])
if flow is not None:
flow *= 10**6 # convert to cm^3/s
else:
flow = -1
if pressure is not None:
pressure /= (13560*9.8/1000) # convert to mmHg
else:
pressure = -1
print('Node {}: flow={:.3f} cm^3/s, pressure={:.3f} mmHg'.format(node, flow, pressure))
def getFlowInfoFromDeltaPressure(self, edgeIndex, deltaPressure):
"""
Calculate the required flow/velocity in order to achieve the given pressure drop for the specific edge.
Parameters
----------
edgeIndex : int
The index of the edge.
deltaPressure : float
The desired pressure drop with a unit of Pascal.
Returns
-------
flow : float
The required flow rate to achieve the desired pressure drop with a unit of cm^3/s.
velocity : float
The velocity in that edge corresponding to the required flow rate.
"""
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
flow = np.power(deltaPressure * c**k * (2*radius)**4.8704 / 10.67 / length, 1/k) # m^3/s
velocity = flow / (np.pi * radius**2) # m/s
return flow, velocity
def getDeltaPressureFromFlow(self, edgeIndex, flow):
"""
Calculate the required pressure drop in order to achieve the given flow for the specific edge.
Parameters
----------
edgeIndex : int
The index of the edge.
flow : float
The desired flow rate of the edge with a unit of cm^3/s.
Returns
-------
deltaPressure : float
The required pressure drop in the edge to achieve the desired flow rate with a unit of Pascal.
"""
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
deltaPressure = 10.67 * flow**k * length / c**k / (2*radius)**4.8704
return deltaPressure
def createGroundTruth(self, seed=None, option=1):
"""
Manually set the velocity and pressure for all edges/nodes in order to check whether the solver is correct.
Option 1: each child branch randomly takes ~1/N (with some random fluctuation) of the parent flow.
Option 2: flow is split proportional to the cross sectional area of the child branches.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
success = False
# Set argsIndex (index of pressure/velocity unknowns in the fluid simulation)
argsIndex = 0
for edgeIndex in edgeIndexList:
edgeInfoDict[edgeIndex]['argsIndex'] = argsIndex
argsIndex += 1
for node in G.nodes():
nodeInfoDict[node]['isBifurcatingNode'] = False
nodeList = [node for node in G.nodes() if node != 0 and G.degree(node) != 1]
for node in nodeList:
nodeInfoDict[node]['argsIndex'] = argsIndex
nodeInfoDict[node]['isBifurcatingNode'] = True
argsIndex += 1
minSetVelocity, maxSetVelocity = 0.01, 3 # m/s
inletPressure = 13560 * 9.8 * 0.12 # Pascal
inletVelocity = 1.5 # m/s
inletFlow = 754/60/10**6 # m^3/s
minSplitAmout, maxSplitAmout = 0.4, 0.6
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
for currentDepth in range(maxDepth):
## first deal with the nodes whose child edge merges
nodesAtNextDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth + 1]
for nodeAtNextDepth in nodesAtNextDepth:
parentNodes = [node for node in G[nodeAtNextDepth].keys() if nodeInfoDict[node]['depth'] == currentDepth]
# parentNodes = [node for node in G[nodeAtNextDepth].keys() if nodeInfoDict[node]['depth'] < nodeInfoDict[nodeAtNextDepth]['depth']]
if len(parentNodes) > 1:
# print('Node {} merge into {}'.format(parentNodes, nodeAtNextDepth))
flowAtParentNodes = [nodeInfoDict[node]['simulationData']['flow'] for node in parentNodes] # m^3/s
degreeAtParentNodes = [G.degree(node) for node in parentNodes]
pressureAtParentNodes = [nodeInfoDict[node]['simulationData']['pressure'] for node in parentNodes]
parentEdgeIndexList = [G[nodeAtNextDepth][node]['edgeIndex'] for node in parentNodes]
parentEdgeDeltaPressureList = [self.getDeltaPressureFromFlow(edgeIndex, flow) for edgeIndex, flow in zip(parentEdgeIndexList, flowAtParentNodes)]
nodeMinPressureList = [headPressure - deltaPressure for headPressure, deltaPressure in zip(pressureAtParentNodes, parentEdgeDeltaPressureList)]
if degreeAtParentNodes[0] == 2 and degreeAtParentNodes[1] > 2:
loc1, loc2 = 0, 1
isEdge1StraightPipe, isEdge2StraightPipe = True, False
elif degreeAtParentNodes[0] > 2 and degreeAtParentNodes[1] == 2:
loc1, loc2 = 1, 0
isEdge1StraightPipe, isEdge2StraightPipe = True, False
elif degreeAtParentNodes[0] == 2 and degreeAtParentNodes[1] == 2:
loc1, loc2 = 0, 1
isEdge1StraightPipe, isEdge2StraightPipe = True, True
if nodeMinPressureList[0] != nodeMinPressureList[1]:
success = False
print('Error! Two straight edges cannot achieve the same end pressure')
return success
print('Warning! Two straight edges merge into one node')
else:
if nodeMinPressureList[0] > nodeMinPressureList[1]:
loc1, loc2 = 0, 1
else:
loc1, loc2 = 1, 0
isEdge1StraightPipe, isEdge2StraightPipe = False, False
edgeIndex1, edgeIndex2 = parentEdgeIndexList[loc1], parentEdgeIndexList[loc2]
parentNode1, parentNode2 = parentNodes[loc1], parentNodes[loc2]
parentPressure1, parentPressure2 = pressureAtParentNodes[loc1], pressureAtParentNodes[loc2]
parentFlow1, parentFlow2 = flowAtParentNodes[loc1], flowAtParentNodes[loc2]
radius1, radius2 = edgeInfoDict[edgeIndex1]['meanRadius'] * spacing, edgeInfoDict[edgeIndex2]['meanRadius'] * spacing
length1, length2 = edgeInfoDict[edgeIndex1]['length'] * spacing, edgeInfoDict[edgeIndex2]['length'] * spacing
c1, c2 = edgeInfoDict[edgeIndex1]['c'], edgeInfoDict[edgeIndex2]['c']
k1, k2 = edgeInfoDict[edgeIndex1]['k'], edgeInfoDict[edgeIndex2]['k']
flowCounter = 0
# for the first edge
maxPossibleFlow = parentFlow1
minDeltaPressure = np.max([0, pressureAtParentNodes[loc1] - pressureAtParentNodes[loc2]])
minPossibleFlow, _ = self.getFlowInfoFromDeltaPressure(parentEdgeIndexList[loc1], minDeltaPressure)
if minPossibleFlow > maxPossibleFlow:
success = False
print('Error while merging node {} to node {}, minPossibleFlow ({}) is larger than maxPossibleFlow ({})'.format(parentNodes, nodeAtNextDepth, minPossibleFlow, maxPossibleFlow))
return success
if isEdge1StraightPipe:
flow1 = parentFlow1
if flow1 >= minPossibleFlow and flow1 <= maxPossibleFlow:
pass
else:
print('Edge {} wants to use all flow={} from node {}, but possible range is [{}, {}]'.format(edgeIndex1, flow1, parentNode1, minPossibleFlow, maxPossibleFlow))
else:
# flow1 = np.random.rand() * (maxPossibleFlow - minPossibleFlow) + minPossibleFlow
flow1 = (maxPossibleFlow + minPossibleFlow) / 2
## Manual manipulation !!! ##
if nodeAtNextDepth == 10:
if edgeIndex1 == 9:
flow1 = maxPossibleFlow * 0.15 # used to be 0.3
print('Edge {} gets flow={} cm^3/s'.format(edgeIndex1, flow1*10**6))
elif edgeIndex1 == 11:
flow1 = maxPossibleFlow * 0.15 # used to be 0.3
print('Edge {} gets flow={} cm^3/s'.format(edgeIndex1, flow1*10**6))
# radius8, radius9 = edgeInfoDict[8]['meanRadius'], edgeInfoDict[9]['meanRadius']
# flow9 = maxPossibleFlow * radius9**2 / (radius8**2 + radius9**2)
# print('Edge {} get flow={}'.format(edgeIndex1, flow1))
velocity1 = flow1 / (np.pi * radius1**2) # m/s
edgeInfoDict[edgeIndex1]['simulationData']['velocity'] = velocity1
edgeInfoDict[edgeIndex1]['simulationData']['flow'] = flow1
deltaPressure1 = 10.67 * flow1**k1 * length1 / c1**k1 / (2*radius1)**4.8704
tailPressure = parentPressure1 - deltaPressure1 # pressure at the merging node
nodeInfoDict[nodeAtNextDepth]['simulationData']['pressure'] = tailPressure
flowCounter += flow1
# the other edge
deltaPressure2 = parentPressure2 - tailPressure
flow2 = np.power(deltaPressure2 / 10.67 / length2 * c2**k2 * (2*radius2)**4.8704, 1/k2)
velocity2 = flow2 / (np.pi * radius2**2) # m/s
edgeInfoDict[edgeIndex2]['simulationData']['velocity'] = velocity2
edgeInfoDict[edgeIndex2]['simulationData']['flow'] = flow2
flowCounter += flow2
nodeInfoDict[nodeAtNextDepth]['simulationData']['flow'] = flowCounter
if flow2 > parentFlow2:
print('Node {}: the flow ({}) in other edge is larger than provided ({})'.format(nodeAtNextDepth, flow2, parentFlow2))
print('edgeIndex1={}, edgeIndex2={}, flow1={}, flow2={}'.format(edgeIndex1, edgeIndex2, flow1, flow2))
print(nodeInfoDict[1]['simulationData']['pressure']/13560/9.8*1000, nodeInfoDict[3]['simulationData']['pressure']/13560/9.8*1000, nodeInfoDict[2]['simulationData']['pressure']/13560/9.8*1000)
## Now deal with remaining nodes
nodesAtCurrentDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth]
for currentNode in nodesAtCurrentDepth:
if currentDepth == 0:
nodeInfoDict[currentNode]['simulationData']['pressure'] = inletPressure
nodeInfoDict[currentNode]['simulationData']['flow'] = inletFlow
flowIn = inletFlow
pressureIn = inletPressure
# print('inletPressure={} mmHg, inletFlow={} cm^3/s, currentDepth={}'.format(inletPressure/13560/9.8*1000, inletFlow*10**6, currentDepth))
else:
flowIn = nodeInfoDict[currentNode]['simulationData']['flow']
if flowIn is None:
print('Node {} has flow=None, nodesAtCurrentDepth={}'.format(currentNode, nodesAtCurrentDepth))
pressureIn = nodeInfoDict[currentNode]['simulationData']['pressure']
edgeIndexAtNextDepth = [G[currentNode][neighborNode]['edgeIndex'] for neighborNode in G[currentNode].keys() if nodeInfoDict[neighborNode]['depth'] > currentDepth]
edgeIndexToProcess = [edgeIndex for edgeIndex in edgeIndexAtNextDepth if edgeInfoDict[edgeIndex]['simulationData']['flow'] is None]
edgeIndexCompleted = [edgeIndex for edgeIndex in edgeIndexAtNextDepth if edgeInfoDict[edgeIndex]['simulationData']['flow'] is not None]
edgeCounter = len(edgeIndexToProcess)
flowAvailable = nodeInfoDict[currentNode]['simulationData']['flow']
for edgeIndex in edgeIndexCompleted:
flowAvailable -= edgeInfoDict[edgeIndex]['simulationData']['flow']
if flowAvailable < 0 - np.finfo(float).eps:
flowIn = nodeInfoDict[currentNode]['simulationData']['flow']
flowUsed = ['Edge {}: {}'.format(edgeIndex, edgeInfoDict[edgeIndex]['simulationData']['flow']) for edgeIndex in edgeIndexCompleted]
print('Error! Node {}: flowIn={}, flowUsed={}, flowAvailable={}'.format(currentNode, flowIn, flowUsed, flowAvailable))
flowAmount = []
# Random split the flow (within a range)
if option == 1:
while edgeCounter >= 1:
if edgeCounter > 1:
basePercentage = 100 / edgeCounter
fluctuationPercentage = basePercentage / 3.0
actualPercentage = basePercentage - fluctuationPercentage/2 + np.random.rand() * fluctuationPercentage
# actualPercentage = (np.random.rand() * 0.8 + 0.1) * 100
flow = flowAvailable * actualPercentage / 100
if flow < 0:
print('Node {}: flow < 0, actualPercentage={}, flowAvailable={}'.format(currentNode, actualPercentage, flowAvailable))
flowAmount.append(flow)
flowAvailable -= flow
if flowAvailable < 0:
print('Node {}: flowAvailable < 0, actualPercentage={}'.format(currentNode, actualPercentage))
else:
flowAmount.append(flowAvailable)
edgeCounter -= 1
elif option == 2:
radiusList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexToProcess]
radiusSqList = [radius**2 for radius in radiusList]
sumOfRadiusSq = np.sum(radiusSqList)
flowAmount = [flowAvailable * radiusSq / sumOfRadiusSq for radiusSq in radiusSqList]
## Manual manipulation !!! ###
if currentNode == 0 and G.degree(currentNode) == 3:
edgeIndexToProcess = [0, 2, 1] # LICA/RICA/VA
inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
flowAmount = [inletFlow*0.4, inletFlow*0.4, inletFlow*0.2]
# elif currentNode == 8:
# edgeIndexToProcess = [16, 17] #
# inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
# flowAmount = [inletFlow*0.7, inletFlow*0.3]
# elif currentNode == 9:
# edgeIndexToProcess = [18, 19] #
# inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
# flowAmount = [inletFlow*0.7, inletFlow*0.3]
for edgeIndex, flow in zip(edgeIndexToProcess, flowAmount):
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = flow / (np.pi * radius**2) # m/s
edgeHead, edgeTail = edge[0], edge[1]
if nodeInfoDict[edgeHead]['depth'] > nodeInfoDict[edgeTail]['depth']:
edgeHead, edgeTail = edgeTail, edgeHead
pressureHead = nodeInfoDict[edgeHead]['simulationData']['pressure']
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
deltaPressure = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704 # Pascal
if np.isnan(deltaPressure):
print('velocity={}, flow={}'.format(velocity, flow))
pressureTail = pressureHead - deltaPressure # Pascal
nodeInfoDict[edgeTail]['simulationData']['pressure'] = pressureTail
nodeInfoDict[edgeTail]['simulationData']['flow'] = flow
# print('Node {} (head={}, edgeIndex={}), flow={}'.format(edgeTail, edgeHead, edgeIndex, flow))
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
# print('Pressure at {} = {} mmHg, currentDepth={}'.format(edgeTail, pressureTail/13560/9.8*1000, currentDepth))
# if edgeIndex ==5 or edgeIndex == 6:
# print('Node {}, edgeIndex={}, flow={} cm^3/s, deltaPressure={} mmHg'.format(currentNode, edgeIndex, flow*10**6, deltaPressure/13560/9.8*1000))
velocityPressure = [0] * argsIndex
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
pressure = nodeInfoDict[node]['simulationData']['pressure']
velocityPressure[argsIndex] = pressure
for edgeIndex in edgeIndexList:
if 'argsIndex' in edgeInfoDict[edgeIndex]:
argsIndex = edgeInfoDict[edgeIndex]['argsIndex']
velocity = edgeInfoDict[edgeIndex]['simulationData']['velocity']
velocityPressure[argsIndex] = velocity
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
self.velocityPressure = velocityPressure # Ground truth solution
self.velocityPressureGroundTruth = velocityPressure # Ground truth solution
success = True
return success
def getVelocityPressure(self):
"""
Extract velocity and pressure from edgeInfoDict and nodeInfoDict.
Returns
-------
velocityPressure : list
A list of velocities and pressures in the form of [v0, v1,..., vN, p0, p1,..., pN].
"""
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
velocityPressure = np.hstack((np.full((numOfEdges,), 0.0), np.full((numOfNodes,), 0.0))) # make sure dtype is float
for node, info in nodeInfoDict.items():
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressure[argsIndex] = pressure
for edgeIndex, info in edgeInfoDict.items():
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressure[argsIndex] = velocity
return velocityPressure
def getVolumePerPartition(self):
"""
Calculate the total volume of each compartment.
Returns
volumePerPartition : dict
A dictionary with compartments names as keys and volumes (with a unit of mm^3) as corresponding values.
"""
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
volumePerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
totalVolume = 0
for edgeIndex in visitedEdges:
radius, length= itemgetter('meanRadius', 'length')(edgeInfoDict[edgeIndex])
radius = radius * spacing * 1000 # mm
length = length * spacing * 1000 # mm
edgeVolume = np.pi * radius**2 * length # mm^3
totalVolume += edgeVolume
volumePerPartition[partitionName] = totalVolume
return volumePerPartition
def showTerminatingPressureAndPathLength(self):
"""
Check terminating pressure vs path length relationship.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
terminatingNodes = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == maxDepth]
terminatingPressure = [nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000 for node in terminatingNodes] # mmHg
termiantingPathLength = []
for node in terminatingNodes:
path = nx.shortest_path(G, self.rootNode, node)
pathEdgeIndex = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in pathEdgeIndex]) # meter
termiantingPathLength.append(pathLength)
fig = plt.figure(1, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
plt.plot(termiantingPathLength, terminatingPressure, 'bo')
plt.xlabel('Path length (m)')
plt.ylabel('Terminating pressure (mmHg)')
plt.show()
def setupFluidEquations(self, boundaryCondition=None):
"""
Programmatically stores the info to generate the conservation equations used for fluid simulation (each dict represents an equation).
There are two kinds of equations: H-W equation for each edge and flow conservation equation for each node and optionally boundary
conditions. For the H-W equation, the
information is stored in a dictionay as:
{'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': edgeIndex}
For the flow conservation equation, the information is stored as:
{'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': nodeInfoDict[node]['nodeIndex'], 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
For the boundary conditions (inlet or outlet velocity), the information is stored as:
{'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
All of the units are SI units. The dictonaries that hold these equations are then stored in the `eqnInfoDictList`.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = []
numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns = 0, 0, 0
for node in G.nodes():
if nodeInfoDict[node]['isBifurcatingNode']:
neighborsIndexIn = [G[node][neighborIn]['edgeIndex'] for neighborIn in G.neighbors(node) if 'depth' in G.node[neighborIn] and G.node[neighborIn]['depth'] < G.node[node]['depth']]
neighborsIndexOut = [G[node][neighborOut]['edgeIndex'] for neighborOut in G.neighbors(node) if 'depth' in G.node[neighborOut] and G.node[neighborOut]['depth'] > G.node[node]['depth']]
radiusInList = [edgeInfoDict[neighborIndexIn]['meanRadius'] * spacing for neighborIndexIn in neighborsIndexIn]
radiusOutList = [edgeInfoDict[neighborIndexOut]['meanRadius'] * spacing for neighborIndexOut in neighborsIndexOut]
velocityInIndexList = [edgeInfoDict[neighborIndexIn]['argsIndex'] for neighborIndexIn in neighborsIndexIn]
velocityOutIndexList = [edgeInfoDict[neighborIndexOut]['argsIndex'] for neighborIndexOut in neighborsIndexOut]
if len(radiusInList) != 0 and len(radiusOutList) != 0: # Exclude the nodes at inlet and outlet
eqnInfoDict = {'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': nodeInfoDict[node]['nodeIndex'], 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
eqnInfoDictList.append(eqnInfoDict)
numOfFlowEqns += 1
else:
print('node={}, len(radiusInList)={}, len(radiusOutList)={}'.format(node, len(radiusInList), len(radiusOutList)))
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
length = edgeInfoDict[edgeIndex]['length'] * spacing
velocityIndex = edgeInfoDict[edgeIndex]['argsIndex']
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
eqnInfoDict = {'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': edgeIndex}
if nodeInfoDict[edge[0]]['depth'] < nodeInfoDict[edge[-1]]['depth']:
headNode, tailNode = edge[0], edge[-1]
else:
headNode, tailNode = edge[-1], edge[0]
# head pressure
if nodeInfoDict[headNode]['isEntryNode'] is True or G.degree(headNode) == 1:
headPressure = nodeInfoDict[headNode]['simulationData']['pressure']
eqnInfoDict['headPressureInfo'] = {'pressure': headPressure}
else:
headPressureIndex = nodeInfoDict[headNode]['argsIndex']
headNodeIndex = nodeInfoDict[headNode]['nodeIndex']
eqnInfoDict['headPressureInfo'] = {'pressureIndex': headPressureIndex, 'nodeIndex': headNodeIndex}
# tail pressure
if nodeInfoDict[tailNode]['isEntryNode'] is True or G.degree(tailNode) == 1:
tailPressure = nodeInfoDict[tailNode]['simulationData']['pressure']
eqnInfoDict['tailPressureInfo'] = {'pressure': tailPressure}
# print('Tail node {} has pressure={} mmHg'.format(tailNode, tailPressure/13560/9.8*1000))
else:
tailPressureIndex = nodeInfoDict[tailNode]['argsIndex']
tailNodeIndex = nodeInfoDict[tailNode]['nodeIndex']
eqnInfoDict['tailPressureInfo'] = {'pressureIndex': tailPressureIndex, 'nodeIndex': tailNodeIndex}
eqnInfoDictList.append(eqnInfoDict)
numOfPressureEqns += 1
if boundaryCondition is not None and len(boundaryCondition) != 0 and 'pressureIn' not in boundaryCondition:
for boundaryNode, info in boundaryCondition.items():
edgeIndex = info['edgeIndex']
velocityIn = info['velocityIn']
edge = edgeList[edgeIndex]
velocityIndex = edgeInfoDict[edgeIndex]['argsIndex']
eqnInfoDict = {'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
eqnInfoDictList.append(eqnInfoDict)
numOfBoundaryConditionEqns += 1
print('There are {} flow eqns, {} pressure eqns and {} boundary condition eqns'.format(numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns))
self.eqnInfoDictList = eqnInfoDictList
def setupFluidEquationsMatLab(self, boundaryCondition=None):
"""
Programmatically stores the info to generate the conservation equations used for fluid simulation (each dict represents an equation).
Note that the Python-MatLab bridge only accepts generic python types, and thus all numpy types need to be converted.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = []
numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns = 0, 0, 0
for node in G.nodes():
if nodeInfoDict[node]['isBifurcatingNode']:
neighborsIndexIn = [G[node][neighborIn]['edgeIndex'] for neighborIn in G.neighbors(node) if 'depth' in G.node[neighborIn] and G.node[neighborIn]['depth'] < G.node[node]['depth']]
neighborsIndexOut = [G[node][neighborOut]['edgeIndex'] for neighborOut in G.neighbors(node) if 'depth' in G.node[neighborOut] and G.node[neighborOut]['depth'] > G.node[node]['depth']]
radiusInList = [float(edgeInfoDict[neighborIndexIn]['meanRadius'] * spacing) for neighborIndexIn in neighborsIndexIn]
radiusOutList = [float(edgeInfoDict[neighborIndexOut]['meanRadius'] * spacing) for neighborIndexOut in neighborsIndexOut]
velocityInIndexList = [int(edgeInfoDict[neighborIndexIn]['argsIndex']) for neighborIndexIn in neighborsIndexIn]
velocityOutIndexList = [int(edgeInfoDict[neighborIndexOut]['argsIndex']) for neighborIndexOut in neighborsIndexOut]
if len(radiusInList) != 0 and len(radiusOutList) != 0: # Exclude the nodes at inlet and outlet
eqnInfoDict = {'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': int(nodeInfoDict[node]['nodeIndex']), 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
eqnInfoDictList.append(eqnInfoDict)
numOfFlowEqns += 1
else:
print('node={}, len(radiusInList)={}, len(radiusOutList)={}'.format(node, len(radiusInList), len(radiusOutList)))
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = float(edgeInfoDict[edgeIndex]['meanRadius'] * spacing)
length = float(edgeInfoDict[edgeIndex]['length'] * spacing)
velocityIndex = int(edgeInfoDict[edgeIndex]['argsIndex'])
c, k = float(edgeInfoDict[edgeIndex]['c']), float(edgeInfoDict[edgeIndex]['k'])
eqnInfoDict = {'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': int(edgeIndex)}
if nodeInfoDict[edge[0]]['depth'] < nodeInfoDict[edge[-1]]['depth']:
headNode, tailNode = edge[0], edge[-1]
else:
headNode, tailNode = edge[-1], edge[0]
# head pressure
if nodeInfoDict[headNode]['isEntryNode'] is True or G.degree(headNode) == 1:
headPressure = float(nodeInfoDict[headNode]['simulationData']['pressure'])
eqnInfoDict['headPressureInfo'] = {'pressure': headPressure}
else:
headPressureIndex = int(nodeInfoDict[headNode]['argsIndex'])
headNodeIndex = int(nodeInfoDict[headNode]['nodeIndex'])
eqnInfoDict['headPressureInfo'] = {'pressureIndex': headPressureIndex, 'nodeIndex': headNodeIndex}
# tail pressure
if nodeInfoDict[tailNode]['isEntryNode'] is True or G.degree(tailNode) == 1:
tailPressure = float(nodeInfoDict[tailNode]['simulationData']['pressure'])
eqnInfoDict['tailPressureInfo'] = {'pressure': tailPressure}
else:
tailPressureIndex = int(nodeInfoDict[tailNode]['argsIndex'])
tailNodeIndex = int(nodeInfoDict[tailNode]['nodeIndex'])
eqnInfoDict['tailPressureInfo'] = {'pressureIndex': tailPressureIndex, 'nodeIndex': tailNodeIndex}
eqnInfoDictList.append(eqnInfoDict)
numOfPressureEqns += 1
if boundaryCondition is not None and len(boundaryCondition) != 0 and 'pressureIn' not in boundaryCondition:
for boundaryNode, info in boundaryCondition.items():
edgeIndex = int(info['edgeIndex'])
velocityIn = float(info['velocityIn'])
edge = edgeList[edgeIndex]
velocityIndex = int(edgeInfoDict[edgeIndex]['argsIndex'])
eqnInfoDict = {'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
eqnInfoDictList.append(eqnInfoDict)
numOfBoundaryConditionEqns += 1
print('There are {} flow eqns, {} pressure eqns and {} boundary condition eqns'.format(numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns))
self.eqnInfoDictList = eqnInfoDictList
def setupEquationsForDistributeFlow(self):
"""
Setup equations for distributeFlowTest(). This function is unfinished. TODO
The resulting file is distributeFlowEqnDict and it contains three fields:
-- 'connectInfoDictList' --
It is a list of dicts and each dict represents an edge and it contains:
-- 'connection' -- In the form of [headNode, edgeIndex, tailNode]
-- 'edgeInfo' -- Contains subfields 'c'/'k'/'radius'/'length'
-- 'mergeInfoDict' --
Each merging node is a key and the corresponding value is empty (for now)
-- 'desiredTerminatingPressures' --
Each terminating node is a key and the corresponding value is the desired terminating pressure for that node
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
edgeList = self.edgeList
spacing = self.spacing
distributeFlowEqnDict = {'connectInfoDictList': [], 'mergeInfoDict': {}, 'desiredTerminatingPressures': {}}
edgeDepthArray = np.array([edgeInfoDict[edgeIndex]['depth'] for edgeIndex in edgeIndexList])
edgeIndexListSorted = np.array(edgeIndexList)[edgeDepthArray.argsort()].tolist()
for edgeIndex in edgeIndexListSorted:
edge = edgeList[edgeIndex]
headNode, tailNode = edge
if nodeInfoDict[headNode]['depth'] > nodeInfoDict[tailNode]['depth']:
headNode, tailNode = tailNode, headNode
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
length = edgeInfoDict[edgeIndex]['length'] * spacing
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
distributeFlowEqnDict['connectInfoDictList'].append({'connection': [headNode, edgeIndex, tailNode], 'edgeInfo': {'radius': radius, 'length': length, 'c': c, 'k': k}})
for currentNode in G.nodes():
parentNodes = [node for node in G[currentNode].keys() if nodeInfoDict[node]['depth'] < nodeInfoDict[currentNode]['depth']]
if len(parentNodes) > 1:
distributeFlowEqnDict['mergeInfoDict'][currentNode] = {}
for node in G.nodes():
if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0:
distributeFlowEqnDict['desiredTerminatingPressures'][node] = 13560*9.8*0.12 # Pascal
print(edgeIndexListSorted)
print(distributeFlowEqnDict['mergeInfoDict'])
# Save #
self.distributeFlowEqnDict = distributeFlowEqnDict
def validateFluidEquations(self, velocityPressure=None, boundaryCondition=None):
"""
Validate if all of the equations generated by `setupFluidEquations` are satisfied. This function will output errors for
each of the equations and corresponding details. Note that the error for each equations is amplified in the same way as
in the function `computeNetworkDetail`.
Parameters
----------
velocityPressure : list
A list of velocities and pressures in the form of [v0, v1,..., vN, p0, p1,..., pN].
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = self.eqnInfoDictList
if velocityPressure is None:
velocityPressure = self.velocityPressure
counter = 0
pressureErrorList, flowErrorList = [], []
pressureErrorTrueList, flowErrorTrueList = [], []
for eqnInfoDict in eqnInfoDictList:
eqnType = eqnInfoDict['type']
if eqnType == 'pressure':
radius, length, velocityIndex, edgeIndex = itemgetter('radius', 'length', 'velocityIndex', 'edgeIndex')(eqnInfoDict)
velocity = np.abs(velocityPressure[velocityIndex])
c, k = eqnInfoDict['c'], eqnInfoDict['k']
if 'pressure' in eqnInfoDict['headPressureInfo']:
headPressure = eqnInfoDict['headPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['headPressureInfo']:
pressureIndex = eqnInfoDict['headPressureInfo']['pressureIndex']
headPressure = velocityPressure[pressureIndex]
headPressureInmmHg = headPressure / 13560 / 9.8 * 1000
if 'pressure' in eqnInfoDict['tailPressureInfo']:
tailPressure = eqnInfoDict['tailPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['tailPressureInfo']:
pressureIndex = eqnInfoDict['tailPressureInfo']['pressureIndex']
tailPressure = velocityPressure[pressureIndex]
tailPressureInmmHg = tailPressure / 13560 / 9.8 * 1000
deltaPressureByNode = np.abs(headPressure - tailPressure)
deltaPressureByHW = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704
error = np.abs(deltaPressureByNode - deltaPressureByHW)
deltaPressureByHWInmmHg = deltaPressureByHW / 13560 / 9.8 * 1000
errorInmmHg = error / 13560 / 9.8 * 1000
pressureErrorList.append(errorInmmHg * 500)
pressureErrorTrueList.append(errorInmmHg)
print('error={:.4f} mmHg, headP={:.2f} mmHg, tailP={:.2f} mmHg, headP>tailP={}, deltaPByHW={:.2f} mmHg, velocity={:.3f} cm/s, radius={:.4f} cm, length={:.4f} cm, edgeIndex={}'.format(errorInmmHg,
headPressureInmmHg, tailPressureInmmHg, headPressure>tailPressure, deltaPressureByHWInmmHg, velocity*100, radius*100, length*100, edgeIndex))
if headPressure <= tailPressure:
counter += 1
elif eqnType == 'flow':
velocityInIndexList, radiusInList = eqnInfoDict['velocityInIndexList'], eqnInfoDict['radiusInList']
velocityOutIndexList, radiusOutList = eqnInfoDict['velocityOutIndexList'], eqnInfoDict['radiusOutList']
neighborsInEdgeIndex, neighborsOutEdgeIndex = itemgetter('neighborsInEdgeIndex', 'neighborsOutEdgeIndex')(eqnInfoDict)
velocityInList = [np.abs(velocityPressure[velocityIndex]) for velocityIndex in velocityInIndexList]
velocityOutList = [np.abs(velocityPressure[velocityIndex]) for velocityIndex in velocityOutIndexList]
flowIn = np.sum([velocity * np.pi * radius**2 for velocity, radius in zip(velocityInList, radiusInList)])
flowOut = np.sum([velocity * np.pi * radius**2 for velocity, radius in zip(velocityOutList, radiusOutList)])
error = np.abs(flowIn -flowOut)
inVel = [np.round(100*vel, 4) for vel in velocityInList]
inR = [np.round(100*r, 4) for r in radiusInList]
inFlow = np.round(flowIn*10**6, 4)
outVel = [np.round(100*vel, 4) for vel in velocityOutList]
outR = [np.round(100*r, 4) for r in radiusOutList]
outFlow = np.round(flowOut*10**6, 4)
errorT = np.round(error*10**6, 4)
coord = eqnInfoDict['coord']
flowErrorList.append(error * 10**6 * 20000)
flowErrorTrueList.append(error * 10**6)
print('error={} cm^3/s, inVel={} cm/s, inR={} cm, inFlow={} cm^3/s, outVel={} cm/s, outR={} cm, outFlow={} cm^3/s, coord={}'.format(errorT, inVel, inR, inFlow, outVel, outR, outFlow, coord))
elif eqnType == 'boundary':
velocityIndex, velocityIn = eqnInfoDict['velocityIndex'], eqnInfoDict['velocityIn']
velocityActual = np.abs(velocityPressure[velocityIndex])
error = np.abs(velocityActual - velocityIn)
print('error={}, desired inlet velocity={} cm/s, actual velocity={} cm/s'.format(error, velocityIn*100, velocityActual*100))
totalErrorList = pressureErrorList + flowErrorList
totalError = norm(totalErrorList)
print('There are {} flow eqns where headPressure<=tailPressure'.format(counter))
print('Pressure error: mean+-std={}+-{} mmHg, min={} mmHg, max={} mmHg'.format(np.mean(pressureErrorTrueList), np.std(pressureErrorTrueList), np.amin(pressureErrorTrueList), np.max(pressureErrorTrueList)))
print('Flow error: mean+-std={}+-{} cm^3/s, min={} cm^3/s, max={} cm^3/s'.format(np.mean(flowErrorTrueList), np.std(flowErrorTrueList), np.amin(flowErrorTrueList), np.max(flowErrorTrueList)))
print('Combined error (magnified): {}'.format(totalError))
def BFS(self, startNodes, boundaryNodes):
"""
Start from given node(s), visit other nodes at larger depth in a BFS fashion.
Parameters
----------
startNodes : list
A list of nodes to start with.
boundaryNodes : list
A list of nodes used as the boundary.
Returns
-------
resultDict : dict
A dictionary containing the indices of visited edges and nodes.
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
visitedNodes, visitedEdges = [], []
for startNode in startNodes:
nodesAtCurrentDepth = [startNode]
while len(nodesAtCurrentDepth) != 0:
nodesAtNextDepth = []
for currentNode in nodesAtCurrentDepth:
visitedNodes.append(currentNode)
newNodes = [node for node in G[currentNode].keys() if nodeInfoDict[currentNode]['depth'] < nodeInfoDict[node]['depth'] and node not in boundaryNodes and node not in visitedNodes]
newEdges = [G[currentNode][newNode]['edgeIndex'] for newNode in newNodes]
nodesAtNextDepth += newNodes
visitedEdges += newEdges
nodesAtCurrentDepth = nodesAtNextDepth
resultDict = {'visitedNodes': visitedNodes, 'visitedEdges': visitedEdges}
return resultDict
def calculateVariableBounds(self):
"""
Calculate the pressure bound for each node and velocity bound for each branch (because pressure at child nodes
cannot be higher than that of the parent node).
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
for currentDepth in range(maxDepth-1, 0, -1):
nodesAtCurrentDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth and G.degree(node) != 1]
for nodeAtCurrentDepth in nodesAtCurrentDepth:
childNodes = [node for node in G[nodeAtCurrentDepth].keys() if nodeInfoDict[node]['depth'] > currentDepth]
minPressureAtChildNodes = [nodeInfoDict[node]['simulationData']['minPressure'] if 'argsIndex' in nodeInfoDict[node] else nodeInfoDict[node]['simulationData']['pressure'] for node in childNodes]
nodeInfoDict[nodeAtCurrentDepth]['simulationData']['minPressure'] = np.amax(minPressureAtChildNodes)
# print('minPressure for node {} is set'.format(nodeAtCurrentDepth))
# Save #
self.nodeInfoDict = nodeInfoDict
def perturbNetwork(self, option=1, extraInfo=None):
"""
Perturb the network in various ways
Option=1: randomly choose {numOfEdgesToPerturb} branches and decrease the radius by {reducePercentage}
Option=2: use the radius from year={perturbedYear}
Option=3: radius of the edges in {partitionToPerturb} are decreased by {reducePercentage}
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
if option == 1:
if extraInfo is None:
numOfEdgesToPerturb = 5
reducePercentage = 30
else:
numOfEdgesToPerturb, reducePercentage = itemgetter('numOfEdgesToPerturb', 'reducePercentage')(extraInfo)
edgeIndexToPerturb = np.random.choice(edgeIndexList, numOfEdgesToPerturb)
for edgeIndex in edgeIndexToPerturb:
edgeInfoDict[edgeIndex]['meanRadius'] *= (1 - reducePercentage / 100)
elif option == 2:
perturbedYear, excludedEdgeIndex = itemgetter('perturbedYear', 'excludedEdgeIndex')(extraInfo)
self.loadNetwork(version=4, year=perturbedYear)
resultDict = self.loadedNetwork
GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)
for edgeIndex in edgeIndexList:
if edgeIndex not in excludedEdgeIndex:
segmentIndex = edgeInfoDict[edgeIndex]['segmentIndex'] # segmentIndex is the index of the edges in the old files
perturbedRadius = segmentInfoDictOld[segmentIndex]['meanRadius']
edgeInfoDict[edgeIndex]['meanRadius'] = perturbedRadius
elif option == 3:
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []},
'ACA': {'startNodes': [10], 'boundaryNodes': []}}
partitionToPerturb, reducePercentage = itemgetter('partitionToPerturb', 'reducePercentage')(extraInfo)
for partitionName, info in partitionInfo.items():
if partitionName in partitionToPerturb:
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
for edgeIndex in visitedEdges:
edgeInfoDict[edgeIndex]['meanRadius'] *= (1 - reducePercentage / 100)
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def perturbTerminatingPressure(self, option=1, extraInfo=None):
"""
Perturb the terminating pressure in various ways
Option=1: pressureDecreasePerPartition = {'LMCA': 0.3, 'RMCA': -0.01, 'ACA': 0.05, 'LPCA': -0.02, 'RPCA': 0.02}
Option=2: No change
Option=3: All left compartments -30%, no change to all other compartments
Option=4: pressureDropChangePerPartition = {'LMCA': 0.14, 'RMCA': -0.45, 'ACA': -0.26, 'LPCA': 0.095, 'RPCA': -0.44}
Option=5: pressureDropChangePerPartition obtained from extraInfo
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
if option == 1:
pressureDecreasePerPartition = {'LMCA': 0.3, 'RMCA': -0.01, 'ACA': 0.05, 'LPCA': -0.02, 'RPCA': 0.02}
elif option == 2:
pressureDecreasePerPartition = {'LMCA': 0, 'RMCA': 0, 'ACA': 0, 'LPCA': 0, 'RPCA': 0}
elif option == 3:
pressureDecreasePerPartition = {'LMCA': -0.3, 'RMCA': 0, 'ACA': 0, 'LPCA': -0.3, 'RPCA': 0}
elif option == 4:
pressureDropChangePerPartition = {'LMCA': 0.14, 'RMCA': -0.45, 'ACA': -0.26, 'LPCA': 0.095, 'RPCA': 0.44}
elif option == 5:
pressureDropChangePerPartition = extraInfo['pressureDropChangePerPartition']
rootPressure = 13560*9.8*0.12 # Pa
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
if option in [1,2,3]:
decreaseAmount = pressureDecreasePerPartition[partitionName]
nodeInfoDict[terminatingNode]['simulationData']['pressure'] *= (1-decreaseAmount)
elif option in [4, 5]:
changeAmount = pressureDropChangePerPartition[partitionName]
oldPressure = nodeInfoDict[terminatingNode]['simulationData']['pressure']
newPressure = rootPressure - (rootPressure - oldPressure) * (1+changeAmount)
nodeInfoDict[terminatingNode]['simulationData']['pressure'] = newPressure
# terminatingPressuresInThisPartition.append(np.round(nodeInfoDict[terminatingNode]['simulationData']['pressure']/13560/9.8*1000, 2)) # mmHg
# terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
# print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
self.nodeInfoDict = nodeInfoDict
def printTerminatingPressurePerPartition(self, partitionInfo=None):
"""
Print out terminating pressures in each compartment.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
if partitionInfo is None:
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
terminatingPressuresInThisPartition.append(np.round(nodeInfoDict[terminatingNode]['simulationData']['pressure']/13560/9.8*1000, 2)) # mmHg
terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
def setTerminatingPressure(self, option=1, extraInfo=None):
"""
Set the terminating pressure based on the terminating pressure vs path length relationship found in ADAN.
Note: make sure to use the right slope!!!
Option=1: all partitions use the slope from ADAN dataset
Option=2: use custom slope for each partition
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'pressureIn': 13560*9.8*0.115}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'pressureIn': 13560*9.8*0.115},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115}}
# Use the slope and intercept from the ADAN dataset
if option == 1:
slopePressurePathLength, interceptPressurePathLength = itemgetter('slopePressurePathLength', 'interceptPressurePathLength')(resultADANDict)
print('slope={}, intercept={}'.format(slopePressurePathLength, interceptPressurePathLength))
fitResultPerPartition = {'LMCA': [slopePressurePathLength, interceptPressurePathLength], 'RMCA': [slopePressurePathLength, interceptPressurePathLength],
'LPCA': [slopePressurePathLength, interceptPressurePathLength], 'RPCA': [slopePressurePathLength, interceptPressurePathLength],
'ACA': [slopePressurePathLength, interceptPressurePathLength]}
# Use the slope and intercept fitted from a ground truth solution
elif option == 2:
fitResultPerPartition = extraInfo['fitResultPerPartition']
elif option == 3:
pass
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes, pressureIn = itemgetter('startNodes', 'boundaryNodes', 'pressureIn')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = []
slopePressurePathLength, interceptPressurePathLength = fitResultPerPartition[partitionName]
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in uniquePathEdgeIndexList]) # meter
pressure = pressureIn + pathLength * slopePressurePathLength * 0.8
nodeInfoDict[terminatingNode]['simulationData']['pressure'] = pressure
terminatingPressuresInThisPartition.append(np.round(pressure/13560/9.8*1000, 2)) # mmHg
terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
self.nodeInfoDict = nodeInfoDict
def fitTerminatingPressureToPathLength(self, showFittingResult=False, figIndex=1, isLastFigure=False):
"""
Extract the terminating pressures from the existing fluid solution and fit them to path length per compartment.
Check the manual correction for LMCA!
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'color': 'r'}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'color': 'g'},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'color': 'b'}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'color': 'y'},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'color': 'c'}}
fitResultPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
terminatingPressurePerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
pathLengthPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = [nodeInfoDict[node]['simulationData']['pressure'] for node in terminatingNodesInThisPartition] # Pascal
pathLengthInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in uniquePathEdgeIndexList]) # meter
pathLengthInThisPartition.append(pathLength)
# Check this manual correction!
# if partitionName == 'LMCA':
# terminatingPressuresInThisPartition = [val for val in terminatingPressuresInThisPartition if val <= 13560*9.8*0.1]
# pathLengthInThisPartition = [val1 for val1, val2 in zip(pathLengthInThisPartition, terminatingPressuresInThisPartition) if val2 <= 13560*9.8*0.1]
terminatingPressurePerPartition[partitionName] = terminatingPressuresInThisPartition
pathLengthPerPartition[partitionName] = pathLengthInThisPartition
# slopeTerminatingPressureVSPathLength, interceptTerminatingPressureVSPathLength = np.polyfit(pathLengthInThisPartition, terminatingPressuresInThisPartition, 1)
slopePressurePathLength, interceptPressurePathLength, rSqPressurePathLength, pPressurePathLength, stdErrorPressurePathLength = stats.linregress(pathLengthInThisPartition, terminatingPressuresInThisPartition)
print('{}: slopePressurePathLength={} Pa/m, interceptPressurePathLength={} Pa, rSquared={}, pValue={}'.format(partitionName, slopePressurePathLength, interceptPressurePathLength, rSqPressurePathLength, pPressurePathLength))
fitResultPerPartition[partitionName] = [slopePressurePathLength, interceptPressurePathLength]
if showFittingResult:
fig = plt.figure(figIndex, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
ax = fig.add_subplot(1,5,1)
for partitionName, info in partitionInfo.items():
terminatingPressuresInThisPartition = terminatingPressurePerPartition[partitionName]
pathLengthInThisPartition = pathLengthPerPartition[partitionName]
xValues = [val * 1000 for val in pathLengthInThisPartition] # mm
yValues = [val / 13560 / 9.8 * 1000 for val in terminatingPressuresInThisPartition] # mmHg
color = info['color']
ax.scatter(xValues, yValues, c=color, label=partitionName)
ax.set_xlabel('Path length (mm)')
ax.set_ylabel('Terminating pressure (mmHg)')
ax.legend(prop={'size': 6})
if isLastFigure:
plt.show()
return fitResultPerPartition
def updateNetworkWithSimulationResult(self, velocityPressure):
"""
Update the flow rate and pressure in `edgeInfoDict` and `nodeInfoDict` with the given `velocityPressure`.
"""
G = self.G
edgeIndexList = self.edgeIndexList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
nodeInfoDict[node]['simulationData']['pressure'] = velocityPressure[argsIndex]
for edgeIndex in edgeIndexList:
if 'argsIndex' in edgeInfoDict[edgeIndex]:
argsIndex = edgeInfoDict[edgeIndex]['argsIndex']
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = velocityPressure[argsIndex] # m/s
flow = velocity * np.pi * radius**2
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def loadFluidResult(self, loadFileName, return_ResultDict=False):
"""
Load the saved fluid simulation result.
For use with GBMTest()
"""
directory = self.directory
loadFolderPath = join(directory, 'fluidSimulationResult')
# loadFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(loadFolderPath, loadFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}'.format(loadFileName, loadFolderPath))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
nodeInfoDictPerturbed, edgeInfoDictPerturbed = itemgetter('nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
numOfNodes = len([node for node in nodeInfoDictPerturbed if 'argsIndex' in nodeInfoDictPerturbed[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDictPerturbed[edgeIndex]])
velocityPressurePerturbed = [0] * (numOfNodes + numOfEdges)
for node in G.nodes():
info = nodeInfoDictPerturbed[node]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressurePerturbed[argsIndex] = pressure
for edgeIndex in edgeIndexList:
info = edgeInfoDictPerturbed[edgeIndex]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressurePerturbed[argsIndex] = velocity
if return_ResultDict is False:
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed
else:
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed, resultDict
def loadFluidResult2(self, loadFileName):
"""
Load the saved fluid simulation result.
For use with computeNetworkTest()
"""
directory = self.directory
loadFolderPath = join(directory, 'fluidSimulationResultRandomNetwork')
# loadFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(loadFolderPath, loadFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}'.format(loadFileName, loadFolderPath))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
nodeInfoDictPerturbed, edgeInfoDictPerturbed = itemgetter('nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
numOfNodes = len([node for node in nodeInfoDictPerturbed if 'argsIndex' in nodeInfoDictPerturbed[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDictPerturbed[edgeIndex]])
velocityPressurePerturbed = [0] * (numOfNodes + numOfEdges)
for node in G.nodes():
info = nodeInfoDictPerturbed[node]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressurePerturbed[argsIndex] = pressure
for edgeIndex in edgeIndexList:
info = edgeInfoDictPerturbed[edgeIndex]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressurePerturbed[argsIndex] = velocity
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed
def GBMTest(self, saveResult=False):
"""
Create a GBM network with radius following the BraVa distribution, generate a ground truth solution, then perturb the network
in a particular way while keeping the terminating pressures unchanged, then try to solve the network.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
success = self.createGroundTruth()
self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
edgeNameDict = {0: 'LICA', 3: 'LICA', 2: 'RICA', 7: 'RICA', 1: 'VA', 4: 'RPCA\nComm', 8: 'LMCA', 9: 'LM', 11: 'RM', 10: 'RMCA', 5: 'LPCA', 6: 'RPCA', 20: 'ACA'}
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeDepth
nodeLabelDict = {} # None
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeDepth
nodeValueList = [0 for node in G.nodes()] # None
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeDepth
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['meanRadius']*spacing*1000, 2) for edge in G.edges()} # edge radius
edgeLabelDict = {edge: edgeNameDict[G[edge[0]][edge[1]]['edgeIndex']] if G[edge[0]][edge[1]]['edgeIndex'] in edgeNameDict else '' for edge in G.edges()} # edge name
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeDepth
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['meanRadius']*spacing*1000, 2) for edge in G.edges()] # edgeIndex
edgeValueList = [0 for edge in G.edges()] # None
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': [],
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': [],
'figTitle': 'Major branch name'}
self.plotNetwork(infoDict, figIndex=2, isLastFigure=True)
return
# print(G.edges(data=True))
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict, 'G': G}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
# Load previous optimization result #
loadFileName = 'fluidSimulationResult3(referenceYear=BraVa, perturbedYear=2013).pkl'
nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed = self.loadFluidResult(loadFileName)
velocityPressureInit = velocityPressurePerturbed
self.nodeInfoDict = nodeInfoDictPerturbed
self.edgeInfoDict = edgeInfoDictPerturbed
computeNetworkDetailExtraInfo = {'excludedEdgeIndex': [0,1,2,3,4,5,6,7,10,11,12,13]}
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
self.calculateVariableBounds()
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = self.nodeInfoDict[node]['argsIndex']
minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
boundsVelocityPressure[argsIndex][0] = minPressure
boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# least square optimization #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# optResult = least_squares(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod), bounds=boundsVelocityPressure, ftol=1e-9, xtol=1e-9)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.cost
# message = optResult.message
# differential evolution, bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# optResult = differential_evolution(computeNetworkDetail, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, maxiter=2000, polish=True, disp=True)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=100, stepsize=50, interval=5, niter_success=10, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM {}'.format(extraInfo['perturbedYear'])}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbedYear'] = {'year': 2013, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict, 'G': G}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest2(self, perturbTerminatingPressureOption=1, saveResult=False):
"""
Perturb the terminating pressure in a specific way and check if the new system could be solved.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
success = self.createGroundTruth(option=2)
self.printTerminatingPressurePerPartition()
# self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G)}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]} # perturbTerminatingPressureOption=2
# perturbTerminatingPressureOption = 1
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
self.perturbTerminatingPressure(option=perturbTerminatingPressureOption)
self.printTerminatingPressurePerPartition()
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# GBM_BraVa_Reference flow_perturbTerminatingPressureOption=4_GBMTest2
# GBM_2013_Solved flow_perturbTerminatingPressureOption=4_GBMTest2
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM {}, TPOption={}'.format(extraInfo['perturbedYear'], perturbTerminatingPressureOption)} # TP->terminating pressure
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbedYear'] = {'year': 2013, 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest2(referenceYear={}, perturbedYear={}, perturbTerminatingPressureOption={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'], perturbTerminatingPressureOption)
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest3(self, perturbTerminatingPressureOption=1, saveResult=False):
"""
Test the solver
flowResult_referenceYear(BraVa)_groundTruthOption=1_GBMTest3
flowResult_solvedYear(BraVa)_groundTruthOption=1_GBMTest3
flowResult_referenceYear(BraVa)_groundTruthOption=2_GBMTest3
flowResult_solvedYear(BraVa)_groundTruthOption=2_GBMTest3
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}, 'solvedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
success = self.createGroundTruth(option=2)
# self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G)}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
# perturbTerminatingPressureOption = 1
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# self.perturbTerminatingPressure(option=perturbTerminatingPressureOption)
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
# computeNetworkDetailExtraInfo = {'excludedEdgeIndex': [0,1,2,3,4,5,6,7,10,11,12,13]}
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
# velocityPressureInit = self.getVelocityPressure() # Get velocityPressure from ground truth solution
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# computeNetworkDetail(velocityPressureInit, eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo)
# self.validateFluidEquations(velocityPressure=velocityPressureInit)
# print(list(zip(self.velocityPressureGroundTruth, velocityPressureInit)))
# return
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=10, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM Solved'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['solvedYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest3(referenceYear={}, solvedYear={}, groundTruthOption=2).pkl'.format(resultDict['referenceYear']['year'], resultDict['solvedYear']['year'])
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest4(self, perturbNetworkOption=1, saveResult=False):
"""
Perturb the radius in a specific way, set the TP using path length relationship and solve the network
Option=1: all LMCA edge radius decrease by 10%
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
extraInfo = {'partitionToPerturb': ['LMCA'], 'reducePercentage': 10}
self.perturbNetwork(option=perturbNetworkOption, extraInfo=extraInfo)
self.setNetwork(option=2)
self.createGroundTruth(option=2)
self.setTerminatingPressure(option=1, extraInfo=None)
computeNetworkDetailExtraInfo = None
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM BraVa, perturbNetworkOption={}'.format(perturbNetworkOption)}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['solvedYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest4(solvedYear=BraVa, perturbNetworkOption={}).pkl'.format(perturbNetworkOption)
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest5(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, change the terminating pressure based on the volume change of the compartment.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(4, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
def GBMTest5b(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, TODO !!!
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResultTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
print(pressureDropChangePerPartition)
def GBMTest6(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Exactly the same as GBMTest5, tweaked the solver setting a little, trying to see if results can be improved.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=2000, stepsize=1000, interval=5, niter_success=16, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def computeNetworkTest(self, saveResult=False):
"""
Check whether the solve can correctly solve a system by creating a ground truth model first and comparing the simulation result with it
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
directory = self.directory
resultDict = {'reference': {}, 'perturbed': {}}
self.generateNetwork(maxDepth=5, allowMerge=False)
self.setNetwork(option=1)
success = False
self.createGroundTruth()
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# self.showTerminatingPressureAndPathLength()
resultDict['reference'] = {'G': G, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that in generateNetwork()!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
self.calculateVariableBounds()
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
minPressure = nodeInfoDict[node]['simulationData']['minPressure']
boundsVelocityPressure[argsIndex][0] = minPressure
boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
## intensionally perturb the inlet/terminating pressure away from ground truth to see how solver reacts
# self.nodeInfoDict[0]['simulationData']['pressure'] = 13560*9.8*0.12*(1-np.random.rand()*0.1) # perturb inlet pressure
## perturb terminating pressure
perturbPercent = 0.1
for node in G.nodes():
if G.degree(node) == 1:
self.nodeInfoDict[node]['simulationData']['pressure'] *= (np.random.rand() * perturbPercent + 1 - perturbPercent / 2)
## Perturb radius
# extraInfo = {'numOfEdgesToPerturb': 10, 'reducePercentage': 30}
# self.perturbNetwork(option=1, extraInfo=extraInfo)
# least square optimization #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# optResult = least_squares(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod), bounds=boundsVelocityPressure, ftol=1e-9, xtol=1e-9)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.cost
# message = optResult.message
# minimize (L-BFGS-B), bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# options = {'maxiter': 25000, 'maxfun': 25000}
# optResult = minimize(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, method='L-BFGS-B', options=options)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# minimize (BFGS), bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# options = {'norm': 2, 'maxiter': 30000}
# optResult = minimize(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod, errorNorm), method='BFGS', options=options)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# basinhopping #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 0
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'options': {'norm': np.inf, 'maxiter': 30000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 30000, 'maxfun': 30000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=300, T=5, stepsize=5, interval=5, niter_success=20, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
# differential evolution, bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# optResult = differential_evolution(computeNetworkDetail, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, maxiter=2000, polish=True, disp=True)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# Matlab fsolve #
# self.setupFluidEquationsMatLab()
# eqnInfoDictList = self.eqnInfoDictList
# import matlab.engine, io
# # eng = matlab.engine.start_matlab()
# eng = matlab.engine.connect_matlab()
# eng.addpath('/Users/zhuj10/Dropbox/NIH/Data/Ron Data/1358-Subject18016/fluidSimulationWithCoW')
# print(matlab.engine.find_matlab())
# out = io.StringIO()
# err = io.StringIO()
# solver = 'fsolve'
# solver = 'lsqnonlin'
# # solver = 'Validate'
# # velocityPressureGroundTruth = self.velocityPressureGroundTruth
# # velocityPressureInit = [float(p) for p in velocityPressureTrue]
# velocityPressureInit = [float(p) for p in velocityPressureInit]
# optResult = eng.performFluidSimulation4ForMatLab(eqnInfoDictList, solver, velocityPressureInit, stdout=out, stderr=err)
# # optResult = eng.testMatLab1(eqnInfoDictList, solver, velocityPressureInit, stdout=out, stderr=err)
# # print(optResult)
# print(out.getvalue())
# print(err.getvalue())
# cost = optResult['error']
# message = optResult['message']
# velocityPressure = optResult['optParam'][0]
##
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
# self.validateFluidEquations(velocityPressure=velocityPressure)
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
self.updateNetworkWithSimulationResult(velocityPressure)
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Simulated'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbed'] = {'G': G, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResultRandomNetwork')
saveFileName = 'fluidSimulationResult.pkl'
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def argsBoundTest(self):
"""
Test the function `calculateVariableBounds`
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
directory = self.directory
# Artificial network
# self.generateNetwork(maxDepth=5, allowMerge=False)
# self.setNetwork(option=1)
# GBM network
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth()
self.calculateVariableBounds()
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
for node in G.nodes():
if 'argsIndex' not in nodeInfoDict[node]:
pass
else:
if 'minPressure' not in nodeInfoDict[node]['simulationData']:
print('Node {} does not have minPressure'.format(node))
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) if 'argsIndex' not in nodeInfoDict[node] else np.round(nodeInfoDict[node]['simulationData']['minPressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) if 'argsIndex' not in nodeInfoDict[node] else np.round(nodeInfoDict[node]['simulationData']['minPressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
def distributeFlowTest(self):
"""
Find a way (by optimization) to distribute the flow in the entire network such that the resulting terminating
pressures match the desired values (does not need to be exactly the same but just to minimize the difference
between them). Unfinished!
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.showFlowInfo()
success = self.createGroundTruth()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
# self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
##
self.setupEquationsForDistributeFlow()
def computeNetwork(self):
pass
def validateNetwork(self):
pass
def plotNetwork(self, infoDict: dict, figIndex: int=1, isLastFigure: bool=True, hideColorbar: bool=False):
"""
Plot the graph G in a tree structure. The color of the nodes and edges reflects corresponding values.
Parameters
----------
G : NetworkX graph
The graph to be plot.
infoDict : dict
A dictionary containing necessary information for plotting.
figIndex : int, optional
The figure index.
isLastFigure : bool, optional
If True, `plt.show()` will be executed.
hideColorbar : bool, optional
If True, the colorbars will be hidden.
"""
G = self.G
## Unpack infoDict ##
nodeLabelDict, nodeValueList = itemgetter('nodeLabelDict', 'nodeValueList')(infoDict)
edgeLabelDict, edgeValueList = itemgetter('edgeLabelDict', 'edgeValueList')(infoDict)
figTitle, nodeColorbarLabel, edgeColorbarLabel = itemgetter('figTitle', 'nodeColorbarLabel', 'edgeColorbarLabel')(infoDict)
## Calculate extra info ##
if 'vmin' not in infoDict or 'vmax' not in infoDict:
vmin, vmax = np.amin(nodeValueList), np.amax(nodeValueList)
else:
vmin, vmax = itemgetter('vmin', 'vmax')(infoDict)
if 'edge_vmin' not in infoDict or 'edge_vmax' not in infoDict:
edge_vmin, edge_vmax = np.amin(edgeValueList), np.amax(edgeValueList)
else:
edge_vmin, edge_vmax = itemgetter('edge_vmin', 'edge_vmax')(infoDict)
## Plot ##
fig = plt.figure(figIndex, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
pos = graphviz_layout(G, prog='dot')
ax = fig.add_axes([0.05, 0.05, 0.7, 0.9])
ax.set_title(figTitle)
ax.set_axis_off()
nodes = nx.draw_networkx_nodes(G, pos, node_size=250, node_color=nodeValueList, cmap=plt.cm.get_cmap('jet'), vmin=vmin, vmax=vmax)
edges = nx.draw_networkx_edges(G, pos, arrowstyle='-', arrowsize=10, edge_color=edgeValueList, edge_cmap=plt.cm.jet, edge_vmin=edge_vmin, edge_vmax=edge_vmax, width=2)
if len(nodeLabelDict) != 0:
nx.draw_networkx_labels(G, pos, labels=nodeLabelDict, font_size=8)
if len(edgeLabelDict) != 0:
nx.draw_networkx_edge_labels(G, pos, edge_labels=edgeLabelDict, font_size=8)
# node colorbar
if len(nodeColorbarLabel) != 0 and not hideColorbar:
# plt.colorbar(nodes, cmap=plt.cm.jet, label=nodeColorbarLabel)
ax1 = fig.add_axes([0.8, 0.05, 0.03, 0.9])
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=mpl.cm.jet, norm=norm, orientation='vertical')
cb1.set_label(nodeColorbarLabel, size=10)
cb1.ax.tick_params(labelsize=10)
# edge colorbar
if len(edgeColorbarLabel) != 0 and not hideColorbar:
ax2 = fig.add_axes([0.9, 0.05, 0.03, 0.9])
norm = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax)
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap=mpl.cm.jet, norm=norm, orientation='vertical')
cb2.set_label(edgeColorbarLabel, size=10)
cb2.ax.tick_params(labelsize=10)
if isLastFigure:
plt.show()
def getNetwork(self):
return self.G
def compareNetworkPropertyTest(self):
"""
Compare the edge properties before and after perturbing the network.
GBM_Radius ratio vs Graph level_Compartment(5)_Single row
GBM_Radius ratio vs Graph level_Graph plot
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.nodeInfoDictBefore = copy.deepcopy(self.nodeInfoDict)
self.edgeInfoDictBefore = copy.deepcopy(self.edgeInfoDict)
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
self.nodeInfoDictAfter = copy.deepcopy(self.nodeInfoDict)
self.edgeInfoDictAfter = copy.deepcopy(self.edgeInfoDict)
edgeIndexList = sorted(list(self.edgeInfoDict.keys()))
spacing = self.spacing
print('Edge difference before and after:')
for edgeIndex in edgeIndexList:
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(self.edgeInfoDictBefore[edgeIndex])
radiusBefore = np.round(radius * spacing * 1000, 3) # mm
lengthBefore = np.round(length * spacing * 100, 3) # cm
cBefore, kBefore = np.round(c, 3), np.round(k, 3)
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(self.edgeInfoDictAfter[edgeIndex])
radiusAfter = np.round(radius * spacing * 1000, 3) # mm
lengthAfter = np.round(length * spacing * 100, 3) # cm
cAfter, kAfter = np.round(c, 3), np.round(k, 3)
print('edgeIndex={}, radius={}/{} mm, length={}/{} cm, c={}/{}, k={}/{}'.format(edgeIndex, radiusBefore, radiusAfter, lengthBefore, lengthAfter, cBefore, cAfter, kBefore, kAfter))
G = self.G
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
radiusRatio = np.round(self.edgeInfoDictAfter[edgeIndex]['meanRadius'] / self.edgeInfoDictBefore[edgeIndex]['meanRadius'], 2)
self.edgeInfoDictAfter[edgeIndex]['radiusRatio'] = radiusRatio
self.edgeInfoDictBefore[edgeIndex]['radiusRatio'] = radiusRatio
self.edgeInfoDict[edgeIndex]['radiusRatio'] = radiusRatio
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: nodeInfoDict[node]['nodeIndex'] for node in G.nodes()}
nodeValueList = [nodeInfoDict[node]['nodeIndex'] for node in G.nodes()]
edgeLabelDict = {edge: self.edgeInfoDictAfter[G[edge[0]][edge[1]]['edgeIndex']]['radiusRatio'] for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [self.edgeInfoDictAfter[G[edge[0]][edge[1]]['edgeIndex']]['radiusRatio'] for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
partitionInfo = {'LMCA': {'startNodes': [5], 'boundaryNodes': [13]}, 'RMCA': {'startNodes': [6], 'boundaryNodes': [13]},
'LPCA': {'startNodes': [4], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [13], 'boundaryNodes': []}}
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
# fig = plt.figure(2, figsize=(15, 8))
# plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
fig = plt.figure(11, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
subplotIndex = 1
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
print('{}:\nvisitedNodes={}\nvisitedEdges={}'.format(partitionName, visitedNodes, visitedEdges))
ax = fig.add_subplot(1,5,subplotIndex)
dictUsed = edgeInfoDict
attribute1, attribute2, attribute3 = 'segmentLevel', 'meanRadius', 'partitionName'
attribute1List = [edgeInfoDict[edgeIndex]['depth'] for edgeIndex in visitedEdges]
# attribute2List = [edgeInfoDict[edgeIndex]['meanRadius']*spacing*1000 for edgeIndex in visitedEdges]
attribute2List = [edgeInfoDict[edgeIndex]['radiusRatio'] for edgeIndex in visitedEdges]
# attribute1List = [info[attribute1] for _, info in dictUsed.items() if attribute1 in info and attribute2 in info and attribute3 in info and info[attribute3] in partitionNames]
# attribute2List = [info[attribute2]*spacing*1000 for _, info in dictUsed.items() if attribute1 in info and attribute2 in info and attribute3 in info and info[attribute3] in partitionNames] # mm
# ax.plot(attribute1List, attribute2List, 'bo')
positions = np.sort(np.unique(attribute1List))
values = []
attribute1Array, attribute2Array = np.array(attribute1List), np.array(attribute2List)
for segmentLevel in positions:
locs = np.nonzero(attribute1Array == segmentLevel)[0]
values.append((attribute2Array[locs]).tolist())
mf.boxPlotWithWhiskers(values, ax, positions=positions, whis='range', xlabel='Graph level', ylabel='Radius (mm)')
ax.set_xlabel('Graph level')
ax.set_ylabel('Radius ratio')
ax.set_title(partitionName)
subplotIndex += 1
plt.show()
def updateEdgeRadius(self, edgeRadiusList):
"""
Update the edge radius with the supplied list.
The i-th element in edgeRadiusList is the radius (in voxel) of the i-th edge.
Parameters
----------
edgeRadiusList : list
A list of new edge radius.
"""
edgeInfoDict = self.edgeInfoDict
for edgeIndex, radius in enumerate(edgeRadiusList):
edgeInfoDict[edgeIndex]['meanRadius'] = radius
self.edgeInfoDict = edgeInfoDict
self.setNetwork(option=2)
def applyFlowToNetwork(self, edgeFlowList):
"""
Apply the flow from edgeFlowList to the corresponding edges and recalculates all the pressures.
The i-th element in edgeFlowList is the flow (in m^3/s) of the i-th edge.
Parameters
----------
edgeFlowList : list
A list of flow rates to be applied to each edges.
"""
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
edgeList = self.edgeList
spacing = self.spacing
for edgeIndex, flow in enumerate(edgeFlowList):
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = flow / (np.pi * radius**2) # m/s
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeDepthArray = np.array([edgeInfoDict[edgeIndex]['depth'] for edgeIndex in edgeIndexList])
edgeIndexListSorted = np.array(edgeIndexList)[edgeDepthArray.argsort()].tolist()
for edgeIndex in edgeIndexListSorted:
edge = edgeList[edgeIndex]
edgeHead, edgeTail = edge
if nodeInfoDict[edgeHead]['depth'] > nodeInfoDict[edgeTail]['depth']:
edgeHead, edgeTail = edgeTail, edgeHead
pressureHead = nodeInfoDict[edgeHead]['simulationData']['pressure']
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
flow = edgeFlowList[edgeIndex]
deltaPressure = 10.67 * flow**k * length / c**k / (2*radius)**4.8704
if pressureHead is None:
print('Error! EdgeIndex={} has pressure = None'.format(edgeIndex))
pressureTail = pressureHead - deltaPressure
nodeInfoDict[edgeTail]['simulationData']['pressure'] = pressureTail
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def showVolumePerPartition(self, numOfTimeSteps=4, interpolationOption=1, figIndex=1, isLastFigure=True):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, and check how the volume of each partition changes among different time steps.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
volumeTimeStepListPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': [], 'Left': [], 'Right': []}
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
for currentTimeStep in range(0, numOfTimeSteps):
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
for partitionName, volume in volumePerPartition.items():
volumeTimeStepListPerPartition[partitionName].append(volume)
volumeTimeStepListPerPartition['Left'] = (np.array(volumeTimeStepListPerPartition['LMCA']) + np.array(volumeTimeStepListPerPartition['LPCA'])).tolist()
volumeTimeStepListPerPartition['Right'] = (np.array(volumeTimeStepListPerPartition['RMCA']) + np.array(volumeTimeStepListPerPartition['RPCA'])).tolist()
print('volumeTimeStepListPerPartition={}'.format(volumeTimeStepListPerPartition))
fig = plt.figure(figIndex, figsize=(7, 3))
plt.subplots_adjust(left=0.1, right=0.9, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
ax = fig.add_subplot(1,1,1)
for partitionName, volumeList in volumeTimeStepListPerPartition.items():
xValues = list(range(numOfTimeSteps))
yValues = volumeList
ax.plot(xValues, yValues, 'o-', label=partitionName)
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax.set_ylabel(r'Volume ($\mathrm{mm}^3/s$)')
# ax.legend()
ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, ncol=7, prop={'size': 8})
if isLastFigure:
plt.show()
def test1(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, split the flow according to the cross-sectional area (option 2 in
createGroundTruth()) and see how the terminating pressures change.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
aa = [self.edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in self.edgeIndexList]
# print(aa)
# success = self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
cTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
allNodes = list(range(np.max(list(self.nodeInfoDict.keys())) + 1))
terminatingNodes = [node for node in G.nodes() if G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
nodePressuresTimeStepArray = np.zeros((len(allNodes), numOfTimeSteps))
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
for currentTimeStep in range(0, numOfTimeSteps):
# print(currentTimeStep)
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
# print(radiusList)
self.updateEdgeRadius(radiusList)
success = self.createGroundTruth(option=2)
if not success:
print('Time step={} failed'.format(currentTimeStep))
terminatingPressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in terminatingNodes]
terminatingPressuresTimeStepArray[:, currentTimeStep] = terminatingPressures
nodePressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in allNodes]
nodePressuresTimeStepArray[:, currentTimeStep] = nodePressures
cValues = [self.edgeInfoDict[edgeIndex]['c'] for edgeIndex in edgeIndexList]
cTimeStepArray[edgeIndexList, currentTimeStep] = cValues
# G = self.G
# nodeInfoDict = self.nodeInfoDict
# edgeInfoDict = self.edgeInfoDict
# nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
# infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
# 'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
# 'figTitle': 'Ground truth'}
# self.plotNetwork(infoDict, figIndex=1, isLastFigure=True)
# Clear the simulation result #
for node in G.nodes():
self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
for edgeIndex in edgeIndexList:
self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
print(terminatingPressuresTimeStepArray)
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
fig = plt.figure(1, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
subplotIndex = 1
colorList = ['r','g','b']
# terminatingNodes = {'LMCA': [], 'RMCA': [], 'ACA': [], 'LPCA': [], 'RPCA': []}
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# terminatingNodes[partitionName] = terminatingNodesInThisPartition
ax = fig.add_subplot(2,4,subplotIndex, projection='3d')
for ii, node in enumerate(terminatingNodesInThisPartition):
rowNum = terminatingNodes.index(node)
pressures = terminatingPressuresTimeStepArray[rowNum, :]
xValues = [node] * numOfTimeSteps
yValues = list(range(numOfTimeSteps))
zValues = list(pressures)
ax.plot(xValues, yValues, zValues, 'bo-')
ax.set_xlabel('Node index')
ax.set_ylabel('Time step')
ax.set_zlabel('Terminating pressure (mmHg)')
ax.set_title(partitionName)
subplotIndex += 1
edgeRadiusTimeStepArray = np.array(edgeRadiusTimeStepList)
spacing = self.spacing
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
if partitionName != 'LPCA' and partitionName != 'LMCA' and partitionName != 'ACA':
continue
print('{}:'.format(partitionName))
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
edgeIndexAlongPath = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
for currentTimeStep in range(numOfTimeSteps):
pressuresAlongPath = np.round(nodePressuresTimeStepArray[path, currentTimeStep], 2) # mmHg
edgeRadiusAlongPath = np.round(edgeRadiusTimeStepArray[edgeIndexAlongPath, currentTimeStep]*spacing*1000, 2) # mm
cAlongPath = np.round(cTimeStepArray[edgeIndexAlongPath, currentTimeStep], 3)
print('Terminating node {} (time step={}): pressures along path are {} mmHg, radius along path are {} mm, c={}'.format(terminatingNode, currentTimeStep, pressuresAlongPath, edgeRadiusAlongPath, cAlongPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
plt.show()
def test2(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, apply the same flow/different flow and check the differences in terminating pressures
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeFlowList = [0] * len(edgeIndexList)
for edgeIndex in edgeIndexList:
edgeFlowList[edgeIndex] = self.edgeInfoDict[edgeIndex]['simulationData']['flow']
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: | np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) | numpy.round |
import json
import random
import imageio
import os
import argparse
import time
import numpy as np
import tensorflow as tf
from scipy import misc
import utils
from ops import *
import sys
from generate_gif import *
BOOL_LABEL=3
def load_dataset(dataset_file, min_group_size, max_jpgs=-1):
with open(dataset_file) as f:
doc = json.load(f)
last_fid = -2
data = []
for i, line in enumerate(doc):
if max_jpgs != -1 and i > max_jpgs:
break
fid = line[1]
if fid - last_fid == 1:
data[-1].append(line)
else:
data.append([line])
last_fid = fid
return [group for group in data if len(group) >= min_group_size]
def parse_time():
return time.strftime("%Y.%m.%d-%H:%M:%S", time.localtime())
def print_in_file(sstr):
sys.stdout.write(str(sstr)+'\n')
sys.stdout.flush()
os.fsync(sys.stdout)
class Net:
def __init__(self, H):
self.batch_size = H["batch_size"]
self.dataset_file = H["dataset_file"]
self.sim_dataset_file = H.get("sim_dataset_file", self.dataset_file)
self.test_size = H.get("test_size", 0)
self.h_dim = H["h_dim"]
self.r_dim = H.get("r_dim", 64)
self.d_noise = H["d_noise"]
self.physics = H["physics"]
self.symb_dim = H["symb_dim"]
self.mode = H["mode"]
self.lstm_dim = H["lstm_dim"]
self.im_height = self.im_width = H["im_height"]
self.group_size = H["group_size"]
self.max_jpgs = H["max_jpgs"]
self.test_batch_size = H.get('self.test_batch_size', 32)
self.loc_vec = [0, 2, 4, 6, 8, 10]
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
self.build()
def regressor(self, x, keep_prob, reuse=False):
with tf.variable_scope('G', initializer=tf.random_uniform_initializer(-0.1, 0.1), reuse=reuse):
ip = tf.get_variable('ip', shape=(256, self.symb_dim))
feat = make_feat(x, self.im_height, keep_prob)
return tf.matmul(feat, ip)
def discriminator(self, xs, reuse, scope='D'):
with tf.variable_scope(scope, initializer=tf.random_uniform_initializer(-0.1, 0.1), reuse=reuse):
w1 = tf.get_variable('w1', shape=(self.symb_dim, self.h_dim))
w2 = tf.get_variable('w2', shape=(self.h_dim, self.h_dim))
w3 = tf.get_variable('w3', shape=(self.h_dim, self.lstm_dim))
ip = tf.get_variable('ip', shape=(self.lstm_dim, 2))
disc_input = xs
h = tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(disc_input, w1)), w2)), w3)
lstm_input = tf.reshape(h, (self.batch_size, self.group_size, self.lstm_dim))
lstm_output = build_lstm_inner(lstm_input, self.batch_size, self.lstm_dim, self.group_size)[-1]
return tf.matmul(lstm_output, ip)
def build(self):
self.keep_prob = keep_prob = tf.placeholder(tf.float32, [], name='kb')
self.learning_rate = learning_rate = tf.placeholder(tf.float32, [], name='lr')
self.x_image = x_image = tf.placeholder(tf.float32, shape=[None, self.im_height, self.im_width, 3], name='x_image')
self.x_label = x_label = tf.placeholder(tf.float32, shape=[None, self.symb_dim], name='x_label')
self.x_real = x_real = tf.placeholder(tf.float32, shape=[None, self.symb_dim], name='x_real')
self.x_bool_label = x_bool_label = tf.placeholder(tf.float32, shape=[None])
self.y = y = x_label + self.d_noise * tf.random_normal(tf.shape(x_label))
self.y_ = y_ = self.regressor(self.x_image, keep_prob, reuse=False)
self.pred = pred = self.discriminator(y, reuse=None, scope='D2')
self.pred_ = pred_ = self.discriminator(y_, reuse=True, scope='D2')
scale = 10.
self.g_loss_label = tf.reduce_sum(tf.reduce_mean(tf.abs(y_ - x_real), axis=1) * x_bool_label)
self.g_loss_w = -tf.reduce_mean(pred_)
self.d_loss_w = -tf.reduce_mean(pred) + tf.reduce_mean(pred_)
eps = tf.random_uniform([], 0., 1.)
y_hat = eps * self.y + (1 - eps) * self.y_
d_hat = self.discriminator(y_hat, reuse=True, scope='D2')
ddy = tf.gradients(d_hat, y_hat)[0]
ddy = tf.sqrt(tf.reduce_sum(tf.square(ddy), axis=1))
ddy = tf.reduce_mean(tf.square(ddy - 1.) * scale)
self.loss_g_with_label = self.g_loss_w + scale * self.g_loss_label
self.loss_g_only_label = self.g_loss_label
self.loss_d = self.d_loss_w + ddy
self.opt_g = opt_g = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, beta2=0.9)
self.opt_d = opt_d = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, beta2=0.9)
theta_G = [v for v in tf.trainable_variables() if 'G' in v.name]
self.train_op_g_with_label = opt_g.minimize(self.loss_g_with_label, var_list = theta_G)
self.train_op_g_only_label = opt_g.minimize(self.loss_g_only_label, var_list = theta_G)
theta_D = [v for v in tf.trainable_variables() if 'D' in v.name]
self.train_op_d = opt_d.minimize(self.loss_d, var_list=theta_D)
self.saver = tf.train.Saver(max_to_keep=None)
def read_info(self, group, phase):
labels = np.array([label for _, _, label, _, _ in group])
bool_labels = np.array([bool_label for _, _, _, bool_label, _ in group])
pck_limits = np.array([pck_limit for _, _, _, _, pck_limit in group])
if phase != 'sim':
imgs = np.array([utils.image_cache(fname, self.im_height) for fname, _, _, _, _ in group])
return imgs, labels, bool_labels, pck_limits
else:
return _, labels, bool_labels, pck_limits
def gen_data_render_catch(self, dataset, phase, num_with_label):
def label_data(dataset_to_label):
for group in dataset_to_label:
for frame in group:
frame[BOOL_LABEL] = 1
return
if phase == 'train':
label_data(dataset[:num_with_label])
elif phase == 'label':
label_data(dataset)
print_in_file('Beginning %s phase, dataset has %d groups' % (phase, len(dataset)))
# print dataset[0]
while True:
if phase == 'train' or phase == 'label' or phase == 'sim':
random.shuffle(dataset)
for group in dataset:
start_id = np.random.randint(0, len(group) - self.group_size + 1)
group_short = group[start_id:start_id + self.group_size]
yield self.read_info(group_short, phase)
def eval(self, sess, args):
if not os.path.exists(args.logdir+'/output'):
os.makedirs(args.logdir+'/output')
if args.eval_only:
self.dataset_for_test = load_dataset(self.dataset_file, self.group_size)
if args.weights is not None:
self.saver.restore(sess, args.weights)
print_in_file("Saved")
pck_result = np.zeros((6))
flattened_set = []
for group in self.dataset_for_test:
flattened_set.extend(group)
test_num = len(flattened_set)
test_imgs, test_labels, _, test_pck_limits = self.read_info(flattened_set, 'test')
i_pos = range(test_num//self.test_batch_size)
s_pos = [0 for _ in i_pos]
i_pos = [i * self.test_batch_size for i in i_pos]
i_pos.append(test_num - self.test_batch_size)
s_pos.append(test_num // self.test_batch_size * self.test_batch_size - i_pos[-1])
for pos in range(len(i_pos)):
feed = {self.x_image: test_imgs[i_pos[pos]:i_pos[pos]+self.test_batch_size], self.keep_prob: 1.0}
np_x_real = test_labels[i_pos[pos]:i_pos[pos]+self.test_batch_size]
np_pck_limits = test_pck_limits[i_pos[pos]:i_pos[pos]+self.test_batch_size]
np_y_ = sess.run(self.y_, feed_dict=feed)
assert np_x_real.shape == np_y_.shape
for pos_s in range(s_pos[pos], self.test_batch_size):
for loc in self.loc_vec:
if np.abs(np_y_[pos_s,loc] - ((1+np_x_real[pos_s,loc])*32) ) < np_pck_limits[pos_s] and \
np.abs(np_y_[pos_s,loc+1] - ((1+np_x_real[pos_s,loc+1])*32) ) < np_pck_limits[pos_s]:
pck_result[loc//2] += 1
for loc in xrange(6):
print_in_file(pck_result[loc] / (test_num))
def reshape_arrays(self, imgs, real, bool_labels, pck_limits, labels):
imgs = np.reshape(imgs, (self.batch_size*self.group_size, self.im_height, self.im_width, 3))
real = np.reshape(real, (self.batch_size*self.group_size, self.symb_dim))
real = (1+real)*32
bool_labels = np.reshape(bool_labels, (self.batch_size*self.group_size))
pck_limits = np.reshape(pck_limits, (self.batch_size*self.group_size))
labels = np.reshape(labels, (self.batch_size*self.group_size, self.symb_dim))
return imgs, real, bool_labels, pck_limits, labels
def calculate_pck(self, group_len, y_, x_real, pck_limits):
pck_result = | np.zeros((self.symb_dim/2)) | numpy.zeros |
import math
import numpy as np
import scipy.integrate
from scipy.special import cbrt
from scipy.special import gamma
from scipy.special import iv
from multipcc.utils import w_function, w_function_2
integrand = lambda x, a, b, c: ((x ** a) * (np.sin(b * np.arctan(c * x)) ** 2)) / (
(1 + (c * x) ** 2) ** b
)
integrand_vec = np.vectorize(integrand)
quad = lambda func, a, b, c: scipy.integrate.quad(func, 0, 1, (a, b, c))
quad_vec = np.vectorize(quad)
class Multiphonon:
"""
Multiphonon capture class
"""
def __init__(self, data) -> None:
self.data = data
def derived_parameters(self):
phycon = self.data.physical_constants
inputs = self.data.inputs
derived = self.data.derived
mpder = self.data.multiphonon_derived_parameters
# radius of sphere with Brillouin zone volume in metre inverse
mpder.q_D = cbrt(6 * np.pi ** 2) / inputs.a_0
mpder.sa = 4 * math.sqrt(
np.pi * derived.r_eh * phycon.eVJ / (phycon.kB * inputs.T)
) # sommerfiled factor
mpder.pekar = (1 / inputs.epsilon_h) - (1 / inputs.epsilon_l) # pekar factor
mpder.V_0 = (inputs.a_0) ** 3 # volume of the unit cell in cubic meters
mpder.omega = (inputs.Eph * phycon.eVJ) / phycon.hbar # frequecy of the phonon
def Huang_Rhys_Factor_deformation_potential_coupling(self):
phycon = self.data.physical_constants
inputs = self.data.inputs
derived = self.data.derived
mpder = self.data.multiphonon_derived_parameters
egrid = self.data.energy_grids
hrfd = self.data.deformation_potential_coupling
hrfd.SHRD = (
(inputs.Dij * phycon.eVJ * 100) / (inputs.Eph * phycon.eVJ)
) ** 2 / (
2 * inputs.Mr * mpder.omega / phycon.hbar
) # deformation coupling
# array of the three differnt values of mu depending on charge state. The value of mu for
hrfd.mu = np.array([-egrid.nu, egrid.nu * 1e-6, egrid.nu])
# neutral charge state was supposed to be zero but has been given a small value to avoid
# division by zero error.
hrfd.a = 0
hrfd.b = 2 * hrfd.mu
hrfd.c = (mpder.q_D * derived.a_ebr * egrid.nu) / 2
hrfd.bcsqre = (hrfd.b * hrfd.c) ** 2
# integral part of the Huang Rhys Factor
hrfd.ans, hrfd.err = quad_vec(integrand_vec, hrfd.a, hrfd.b, hrfd.c)
hrfd.I = hrfd.ans / hrfd.bcsqre
# final values of Huang Rhys Factor. SHR is an array of size mu x Et. Each coloumn contains the
hrfd.SHR_D = hrfd.SHRD * hrfd.I
# values of SHR for every possible value of energy for a particular charge state
def Huang_Rhys_Factor_polar_coupling(self):
phycon = self.data.physical_constants
inputs = self.data.inputs
derived = self.data.derived
mpder = self.data.multiphonon_derived_parameters
egrid = self.data.energy_grids
hrfp = self.data.polar_coupling
hrfp.SHRP = (3 / (2 * ((inputs.Eph * phycon.eVJ) ** 2))) * (
(phycon.Qe ** 2)
* (inputs.Mr / mpder.V_0)
* inputs.Eph
* phycon.eVJ
/ (inputs.Mr * (mpder.q_D ** 2))
* mpder.pekar
) # polar coupling
# array of the three differnt values of mu depending on charge state. The value of mu for
hrfp.mu = np.array([-egrid.nu, egrid.nu * 1e-6, egrid.nu])
# neutral charge state was supposed to be zero but has been given a small value to avoid
# division by zero error.
hrfp.a = -2
hrfp.b = 2 * hrfp.mu
hrfp.c = (mpder.q_D * derived.a_ebr * egrid.nu) / 2
hrfp.bcsqre = (hrfp.b * hrfp.c) ** 2
# integral part of the Huang Rhys Factor
hrfp.ans, hrfp.err = quad_vec(integrand_vec, hrfp.a, hrfp.b, hrfp.c)
hrfp.I = hrfp.ans / hrfp.bcsqre
# final values of Huang Rhys Factor. SHR is an array of size mu x Et. Each coloumn contains the
hrfp.SHR_P = hrfp.SHRP * hrfp.I
# values of SHR for every possible value of energy for a particula
# r charge state
def Huang_Rhys_factor(self):
inputs = self.data.inputs
egrid = self.data.energy_grids
hrfd = self.data.deformation_potential_coupling
hrfp = self.data.polar_coupling
hrf = self.data.huang_rhys_factor
# array of the three differnt values of mu depending on charge state. The value of mu for
hrf.mu = np.array([-egrid.nu, egrid.nu * 1e-6, egrid.nu])
# neutral charge state was supposed to be zero but has been given a small value to avoid
# division by zero error.
if inputs.dir == "dp":
hrf.SHR = hrfd.SHR_D
elif inputs.dir == "pc":
hrf.SHR = hrfp.SHR_P
elif inputs.dir == "com":
hrf.SHR = hrfd.SHR_D + hrfp.SHR_P
else:
print("Please select Multiphonon coupling potential")
def multiphonon_capture_coefficients(self):
phycon = self.data.physical_constants
inputs = self.data.inputs
derived = self.data.derived
mpder = self.data.multiphonon_derived_parameters
egrid = self.data.energy_grids
hrf = self.data.huang_rhys_factor
mpcoef = self.data.multiphonon_capture_coefficients
mpcoef.theta = (inputs.Eph * phycon.eVJ) / (2 * phycon.kB * inputs.T)
# round to next highest integer
mpcoef.p = np.ceil(egrid.ET / inputs.Eph)
mpcoef.p_vec = np.ones(hrf.mu.shape) * mpcoef.p # matching the shape of mu
mpcoef.X = np.zeros(hrf.SHR.shape)
mpcoef.X[hrf.SHR < mpcoef.p_vec] = hrf.SHR[hrf.SHR < mpcoef.p_vec] / (
mpcoef.p_vec[hrf.SHR < mpcoef.p_vec] * math.sinh(mpcoef.theta)
)
mpcoef.X[hrf.SHR > mpcoef.p_vec] = mpcoef.p_vec[hrf.SHR > mpcoef.p_vec] / (
hrf.SHR[hrf.SHR > mpcoef.p_vec] * math.sinh(mpcoef.theta)
)
mpcoef.sa = np.array([mpder.sa, 1, mpder.sa])
mpcoef.Y = np.sqrt(1 + mpcoef.X ** 2)
mpcoef.V_T = (
(4 / 3) * np.pi * (derived.a_ebr * egrid.nu / 2) ** 3
) # volume of the wave function
mpcoef.k1 = (
(mpcoef.V_T)
* ((mpcoef.p ** 2) * mpder.omega * math.sqrt(2 * np.pi))
/ (np.sqrt(mpcoef.p * mpcoef.Y))
)
mpcoef.k2 = (
mpcoef.theta
+ mpcoef.Y
- mpcoef.X * math.cosh(mpcoef.theta)
- np.log((1 + mpcoef.Y) / mpcoef.X)
)
# recombination coefficients in m^3/s
mpcoef.k = mpcoef.k1 * np.exp(mpcoef.p * mpcoef.k2)
mpcoef.capt_cs = mpcoef.k / inputs.v_th # capture cross section
def trap_state_mp(self):
inputs = self.data.inputs
mpder = self.data.multiphonon_derived_parameters
mpcoef = self.data.multiphonon_capture_coefficients
tsm = self.data.trap_state_mp
if inputs.trap_state == "don":
# electron capture coefficient from the CB by donar [reversesed for the same reason as radiative]
tsm.mp_sign = mpder.sa * mpcoef.k[2, :][::-1]
# hole capture coefficent from VB by donar
tsm.mp_sigp = mpcoef.k[1, :]
else:
# electron capture coefficient from CB by acceptor
tsm.mp_sign = mpcoef.k[1, :][::-1]
# hole capture coefficent from VB by acceptor
tsm.mp_sigp = mpder.sa * mpcoef.k[0, :]
class Radiative:
def __init__(self, data):
self.data = data
def photon_energy(self):
"""
In photon energy grid all matrices are 2D. Along rows, ET remains fixed. along columns Ek remains fixed.
Ept increases from left to right due to increase in Ek from left to right and increases from top to bottom due to increase of ET from top to bottom
"""
mat = self.data.matrix
egrid = self.data.energy_grids
pgrid = self.data.photon_energy
pgrid.Ept = (
(np.zeros(mat.mat2D.shape) + egrid.Ek).T + egrid.ET
).T # photon energy grid
pgrid.theta = mat.Ek2D / mat.ET2D # theta grid of the shape of Ept
def charge_states(self):
"""
In charge state grid, all matrices are 3D, formed by repeating the matrices of photon energy grid three times for three charge states. ET,Ek,deltaE and nu consist of three equal blocks. mu is -nu, 0 and nu for -ve, neutral and +ve charge state respectively.
"""
mat = self.data.matrix
pgrid = self.data.photon_energy
cgrid = self.data.charge_states
cgrid.mu = np.array([-mat.nu2D, mat.nu2D * 1e-6, mat.nu2D]) # mu is 3D matrix
# Ept grid of the shape of mu
cgrid.Ept = np.array([pgrid.Ept, pgrid.Ept, pgrid.Ept])
# theta grid of the shape of mu
cgrid.theta = np.array([pgrid.theta, pgrid.theta, pgrid.theta])
def broadening_function(self):
hrf = self.data.huang_rhys_factor
phycon = self.data.physical_constants
inputs = self.data.inputs
bfunc = self.data.broadening_function
bfunc.fB = 1 / (
scipy.exp((inputs.Eph * phycon.eVJ) / (phycon.kB * inputs.T)) - 1
)
bfunc.bessel = iv(hrf.SHR, 2 * hrf.SHR * scipy.sqrt(bfunc.fB * (bfunc.fB + 1)))
bfunc.broadening = (
scipy.exp(-2 * hrf.SHR * (bfunc.fB + 1))
* scipy.exp(hrf.SHR * inputs.Eph * phycon.eVJ / (phycon.kB * inputs.T))
* bfunc.bessel
)
def photoionization_cross_section(self):
"""
Photoionization cross section found using eq.5.91 from QPC by BKR. Unit is [m^2].
For negative charege state the values will be invalid for nu >= 0.5. So nu is taken from 0.5.
The remaining of the calculations are done keepting those values masked.
The cross section is weighted to the unoccupied states to get rid of the final state energy dependence
The weighted p_crosssection is multiplied by the thermal velocity to get the coefficient
"""
phycon = self.data.physical_constants
inputs = self.data.inputs
derived = self.data.derived
mat = self.data.matrix
cgrid = self.data.charge_states
pion = self.data.photoionization_cross_section
# bfunc = self.data.broadening_function
# wf = self.data.weighing_function
# photoionization cross section (PCS)
# constant part of PCS: PCS_C
# Mu varrying part of PCS: PCS_vMU
# E varrying part of PCS: PCS_vE
pion.PCS_C = (
(16 / 3)
* phycon.alpha
* phycon.a_br ** 2
* phycon.r_h
* phycon.eVJ
* (1 / derived.eta_r)
* (phycon.m_e / inputs.M_eff)
* (2 * scipy.pi)
* ( | np.sqrt(2 * inputs.M_eff) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Currently battery storage functions are not utilizied.
"""
import numpy as np
import tariff_functions as tFuncs
import sys
def cartesian(arrays, out=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of numpy.ndarray
1-D arrays to form the cartesian product of.
out : numpy.ndarray
Array to place the cartesian product in.
Returns
-------
out : numpy.ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
# if arrays[1:]:
# cartesian(arrays[1:], out=out[0:m,1:])
# for j in range(1, arrays[0].size):
# out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
class Battery:
"""
Todo
----
Degradation functions were just rough estimations from a slide deck, and currently have a disjoint at transition.
"""
def __init__(self, nameplate_cap=0.0, nameplate_power=0.0, SOC_min=0.2, eta_charge=0.91, eta_discharge=0.91, cycles=0):
self.SOC_min = SOC_min
self.eta_charge = eta_charge
self.eta_discharge = eta_charge
self.cycles = cycles
self.nameplate_cap = nameplate_cap
self.effective_cap = nameplate_cap*(1-SOC_min)
self.nameplate_power = nameplate_power
self.effective_power = nameplate_power
def set_cap_and_power(self, nameplate_cap, nameplate_power):
self.nameplate_cap = nameplate_cap
self.effective_cap = nameplate_cap*(1-self.SOC_min)
self.nameplate_power = nameplate_power
self.effective_power = nameplate_power
self.cycles = 0
def set_cycle_deg(self, cycles):
if cycles < 2300:
deg_coeff = (-7.5e-12*cycles**3 + 4.84e-8*cycles**2 - 0.0001505*cycles + 0.9997)
else:
deg_coeff = -8.24e-5*cycles + 1.0094
self.effective_cap = deg_coeff * self.nameplate_cap
self.effective_power = deg_coeff * (1 - (1-deg_coeff)*1.25) * self.nameplate_power
#%%
def determine_optimal_dispatch(load_profile, pv_profile, batt, t, export_tariff, d_inc_n=50, DP_inc=50, estimator_params=None, estimated=False, restrict_charge_to_pv_gen=False, estimate_demand_levels=False):
"""
Function that determines the optimal dispatch for a battery, and determines the resulting first year bill with the system.
Parameters
----------
load_profile : numpy.ndarray
Original load profile prior to modification by PV or storage
pv_profile : numpy.ndarray
PV profile of equal length to the `load_profile`
t : :class:`python.tariff_functions.Tariff`
Tariff class object
batt : :class:`python.dispatch_functions.Battery`
Battery class object
export_tariff : :class:`python.tariff_functions.Export_Tariff`
Export tariff class object
Note
----
In the battery level matrices, 0 index corresponds to an empty battery, and
the highest index corresponds to a full battery
Todo
----
1) Having cost-to-go equal cost of filling the battery at the end may not be
working.
2) Have warnings for classes of errors. Same for bill calculator, such as when
net load in a given period is negative either have warnings, or outright nans, when an illegal move is chosen
3) If there are no demand charges, don't calc & don't have a limit on
demand_max_profile for the following dispatch.
"""
load_and_pv_profile = load_profile - pv_profile
if batt.effective_cap == 0.0:
opt_load_traj = load_and_pv_profile
demand_max_profile = load_and_pv_profile
batt_level_profile = np.zeros(len(load_and_pv_profile), float)
bill_under_dispatch, _ = tFuncs.bill_calculator(opt_load_traj, t, export_tariff)
demand_max_exceeded = False
batt_dispatch_profile = np.zeros([len(load_profile)])
else:
# =================================================================== #
# Determine cheapest possible demand states for the entire year
# =================================================================== #
month_hours = np.array([0, 744, 1416, 2160, 2880, 3624, 4344, 5088, 5832, 6552, 7296, 8016, 8760]);
cheapest_possible_demands = np.zeros((12,np.max([t.d_tou_n+1, 2])), float)
demand_max_profile = np.zeros(len(load_and_pv_profile), float)
batt_level_profile = np.zeros(len(load_and_pv_profile), float)
# Determine the cheapest possible set of demands for each month, and create an annual profile of those demands
batt_start_level = batt.effective_cap
for month in range(12):
# Extract the load profile for only the month under consideration
load_and_pv_profile_month = load_and_pv_profile[month_hours[month]:month_hours[month+1]]
pv_profile_month = pv_profile[month_hours[month]:month_hours[month+1]]
d_tou_month_periods = t.d_tou_8760[month_hours[month]:month_hours[month+1]]
# columns [:-1] of cheapest_possible_demands are the achievable demand levels, column [-1] is the cost
# d_max_vector is an hourly vector of the demand level of that period (to become a max constraint in the DP), which is cast into an 8760 for the year.
cheapest_possible_demands[month,:], d_max_vector, batt_level_month = calc_min_possible_demands_vector(d_inc_n, load_and_pv_profile_month, pv_profile_month, d_tou_month_periods, batt, t, month, restrict_charge_to_pv_gen, batt_start_level, estimate_demand_levels)
demand_max_profile[month_hours[month]:month_hours[month+1]] = d_max_vector
batt_level_profile[month_hours[month]:month_hours[month+1]] = batt_level_month
batt_start_level = batt_level_month[-1]
# =================================================================== #
# Complete (not estimated) dispatch of battery with dynamic programming
# =================================================================== #
if estimated == False:
DP_res = batt.effective_cap / (DP_inc-1)
illegal = 99999999
batt_actions_to_achieve_demand_max = np.zeros(len(load_profile), float)
batt_actions_to_achieve_demand_max[1:-1] = batt_level_profile[1:-1] - batt_level_profile[0:-2]
batt_actions_to_achieve_demand_max = np.zeros(len(load_profile), float)
batt_actions_to_achieve_demand_max[1:] = batt_level_profile[1:] - batt_level_profile[0:-1]
# Calculate the reverse cumsum, then mod the result by the resolution of the battery discretization
batt_act_rev_cumsum = np.cumsum(batt_actions_to_achieve_demand_max[np.arange(8759,-1,-1)])[np.arange(8759,-1,-1)]
batt_act_rev_cumsum += batt.effective_cap - batt_level_profile[-1]
batt_act_rev_cumsum_mod = np.mod(batt_act_rev_cumsum, DP_res)
# batt_x_limits are the number of rows that the battery energy
# level can move in a single step. The actual range exceeds what is
# possible (due to discretization), but will be restricted by a
# pass/fail test later on with cost-to-go.
batt_charge_limit = int(batt.effective_power*batt.eta_charge/DP_res) + 1
batt_discharge_limit = int(batt.effective_power/batt.eta_discharge/DP_res) + 1
batt_charge_limits_len = batt_charge_limit + batt_discharge_limit + 1
batt_levels = np.zeros([DP_inc+1,8760], float)
batt_levels[1:,:] = np.linspace(0,batt.effective_cap,DP_inc, float).reshape(DP_inc,1)
batt_levels[1:,:-1] = batt_levels[1:,:-1] + (DP_res - batt_act_rev_cumsum_mod[1:].reshape(1,8759)) # Shift each column's values, such that the DP can always find a way through
batt_levels[0,:] = 0.0 # The battery always has the option of being empty
batt_levels[-1,:] = batt.effective_cap # The battery always has the option of being full
# batt_levels_buffered is the same as batt_levels, except it has
# buffer rows of 'illegal' values
batt_levels_buffered = np.zeros([np.shape(batt_levels)[0]+batt_charge_limit+batt_discharge_limit, np.shape(batt_levels)[1]], float)
batt_levels_buffered[:batt_discharge_limit,:] = illegal
batt_levels_buffered[-batt_charge_limit:,:] = illegal
batt_levels_buffered[batt_discharge_limit:-batt_charge_limit,:] = batt_levels
# Build an adjustment that adds a very small amount to the
# cost-to-go, as a function of rate of charge. Makes the DP prefer
# to charge slowly, all else being equal
adjuster = np.zeros(batt_charge_limits_len, float)
base_adjustment = 0.0000001
adjuster[np.arange(batt_discharge_limit,-1,-1)] = base_adjustment * np.array(list(range(batt_discharge_limit+1)))*np.array(list(range(batt_discharge_limit+1))) / (batt_discharge_limit*batt_discharge_limit)
adjuster[batt_discharge_limit:] = base_adjustment * np.array(list(range(batt_charge_limit+1)))*np.array(list(range(batt_charge_limit+1))) / (batt_charge_limit*batt_charge_limit)
# Initialize some objects for later use in the DP
expected_values = np.zeros((DP_inc+1, np.size(load_and_pv_profile)), float)
DP_choices = np.zeros((DP_inc+1, np.size(load_and_pv_profile)), int)
influence_on_load = np.zeros((DP_inc+1, batt_charge_limits_len), float)
selected_net_loads = np.zeros((DP_inc+1, np.size(load_and_pv_profile)), float)
net_loads = np.zeros((DP_inc+1, batt_charge_limits_len), float)
costs_to_go = np.zeros((DP_inc+1, batt_charge_limits_len), float)
change_in_batt_level_matrix = np.zeros((DP_inc+1, batt_charge_limits_len), float)
# Expected value of final states is the energy required to fill the battery up
# at the most expensive electricity rate. This encourages ending with a full
# battery, but solves a problem of demand levels being determined by a late-hour
# peak that the battery cannot recharge from before the month ends
# This would be too strict under a CPP rate.
# I should change this to evaluating the required charge based on the batt_level matrix, to keep self-consistent
expected_values[:,-1] = np.linspace(batt.effective_cap,0,DP_inc+1)/batt.eta_charge*np.max(t.e_prices_no_tier) #this should be checked, after removal of buffer rows
# option_indicies is a map of the indicies corresponding to the
# possible points within the expected_value matrix that that state
# can reach.
# Each row is the set of options for a single battery state
option_indicies = np.zeros((DP_inc+1, batt_charge_limits_len), int)
option_indicies[:,:] = list(range(batt_charge_limits_len))
for n in range(DP_inc+1):
option_indicies[n,:] += n - batt_discharge_limit
option_indicies[option_indicies<0] = 0 # Cannot discharge below "empty"
option_indicies[option_indicies>DP_inc] = DP_inc # Cannot charge above "full"
###################################################################
############### Dynamic Programming Energy Trajectory #############
for hour in np.arange(np.size(load_and_pv_profile)-2, -1, -1):
if hour == 4873:
print("full stop")
# Rows correspond to each possible battery state
# Columns are options for where this particular battery state could go to
# Index is hour+1 because the DP decisions are on a given hour, looking ahead to the next hour.
# this is just an inefficient but obvious way to assembled this matrix. It should be possible in a few quicker operations.
for row in range(DP_inc+1):
change_in_batt_level_matrix[row,:] = (-batt_levels[row,hour] + batt_levels_buffered[row:row+batt_charge_limits_len,hour+1])
#Because of the 'illegal' values, neg_batt_bool shouldn't be necessary
resulting_batt_level = change_in_batt_level_matrix + batt_levels[:,hour].reshape(DP_inc+1,1) # This are likely not necessary because options are restricted
# neg_batt_bool = resulting_batt_level<0 # This are likely not necessary because options are restricted
overfilled_batt_bool = resulting_batt_level>batt.effective_cap # This are likely not necessary because options are restricted
charging_bool = change_in_batt_level_matrix>0
discharging_bool = change_in_batt_level_matrix<0
influence_on_load = np.zeros(np.shape(change_in_batt_level_matrix), float)
influence_on_load += (change_in_batt_level_matrix*batt.eta_discharge) * discharging_bool
influence_on_load += (change_in_batt_level_matrix/batt.eta_charge) * charging_bool
influence_on_load -= 0.000000001 # because of rounding error? Problems definitely occur (sometimes) without this adjustment. The adjustment magnitude has not been tuned since moving away from ints.
net_loads = load_and_pv_profile[hour+1] + influence_on_load
# Determine the incremental cost-to-go for each option
costs_to_go[:,:] = 0 # reset costs to go
importing_bool = net_loads>=0 # If consuming, standard price
costs_to_go += net_loads*t.e_prices_no_tier[t.e_tou_8760[hour+1]]*importing_bool
exporting_bool = net_loads<0 # If exporting, NEM price
costs_to_go += net_loads*export_tariff.prices[export_tariff.periods_8760[hour+1]]*exporting_bool
# Make the incremental cost of impossible/illegal movements very high
# costs_to_go += neg_batt_bool * illegal # This are likely not necessary because options are restricted
costs_to_go += overfilled_batt_bool * illegal # This are likely not necessary because options are restricted
demand_limit_exceeded_bool = net_loads>demand_max_profile[hour+1]
costs_to_go += demand_limit_exceeded_bool * illegal
# add very small cost as a function of battery motion, to discourage unnecessary motion
costs_to_go += adjuster
total_option_costs = costs_to_go + expected_values[option_indicies, hour+1]
expected_values[:, hour] = np.min(total_option_costs,1)
#Each row corresponds to a row of the battery in DP_states. So the 0th row are the options of the empty battery state.
#The indicies of the results correspond to the battery's movement. So the (approximate) middle option is the do-nothing option
#Subtract the negative half of the charge vector, to get the movement relative to the row under consideration
DP_choices[:,hour] = np.argmin(total_option_costs,1) - batt_discharge_limit # adjust by discharge?
selected_net_loads[:,hour] = net_loads[list(range(DP_inc+1)),np.argmin(total_option_costs,1)]
#=================================================================#
################## Reconstruct trajectories #######################
#=================================================================#
# Determine what the indexes of the optimal trajectory were.
# Start at the 0th hour, imposing a full battery.
# traj_i is the indexes of the battery's trajectory.
traj_i = np.zeros(len(load_and_pv_profile), int)
traj_i[0] = DP_inc-1
for n in range(len(load_and_pv_profile)-1):
traj_i[n+1] = traj_i[n] + DP_choices[int(traj_i[n]), n]
opt_load_traj = np.zeros(len(load_and_pv_profile), float)
for n in range(len(load_and_pv_profile)-1):
opt_load_traj[n+1] = selected_net_loads[traj_i[n], n]
# Determine what influence the battery had. Positive means the
# battery is discharging.
batt_dispatch_profile = load_and_pv_profile - opt_load_traj
# This is now necessary in some cases, because coincident peak
# charges are not calculated in the dispatch
bill_under_dispatch, _ = tFuncs.bill_calculator(opt_load_traj, t, export_tariff)
demand_max_exceeded = np.any(opt_load_traj[1:] > demand_max_profile[1:])
#=====================================================================#
##################### Estimate Bill Savings ###########################
#=====================================================================#
elif estimated == True:
if t.coincident_peak_exists == True:
if t.coincident_style == 0:
coincident_demand_levels = np.average(load_profile[t.coincident_hour_def], 1)
coincident_charges = tFuncs.tiered_calc_vec(coincident_demand_levels, t.coincident_levels, t.coincident_prices)
coincident_monthly_charges = coincident_charges[t.coincident_monthly_periods]
coincident_charges = np.sum(coincident_monthly_charges)
else:
coincident_charges = 0
batt_arbitrage_value = estimate_annual_arbitrage_profit(batt.effective_power, batt.effective_cap, batt.eta_charge, batt.eta_discharge, estimator_params['cost_sum'], estimator_params['revenue_sum'])
bill_under_dispatch = sum(cheapest_possible_demands[:,-1]) + coincident_charges + 12*t.fixed_charge + estimator_params['e_chrgs_with_PV'] - batt_arbitrage_value
opt_load_traj = np.zeros([len(load_profile)])
batt_dispatch_profile = np.zeros([len(load_profile)])
demand_max_profile = np.zeros([len(load_profile)])
batt_level_profile = np.zeros([len(load_profile)])
#energy_charges = estimator_params['e_chrgs_with_PV'] - batt_arbitrage_value
demand_max_exceeded = False
#=========================================================================#
########################### Package Results ###############################
#=========================================================================#
results = {'load_profile_under_dispatch':opt_load_traj,
'bill_under_dispatch':bill_under_dispatch,
'demand_max_exceeded':demand_max_exceeded,
'demand_max_profile':demand_max_profile,
'batt_level_profile':batt_level_profile,
'batt_dispatch_profile':batt_dispatch_profile}
return results
#%% Energy Arbitrage Value Estimator
def calc_estimator_params(load_and_pv_profile, tariff, export_tariff, eta_charge, eta_discharge):
"""
Create four 12-length vectors, weekend/weekday and
cost/revenue. They are a summation of each day's 12 hours of lowest/highest
cost electricity.
Parameters
----------
load_and_pv_profile : numpy.ndarray
8760 array of the agent's load_profile - pv_profile
t : :class:`python.tariff_functions.Tariff`
Tariff class object
export_tariff : :class:`python.tariff_functions.Export_Tariff`
Export tariff class object
Note
----
1) TOU windows are aligned with when the battery would be dispatching for demand peak shaving.
2) The battery will be able to dispatch fully and recharge fully every 24 hour cycle.
Todo
----
1) Bring back consideration of tiers.
2) Consider coming up with a better method that captures exportation, CPP, etc
Maybe? Or just confirm a simple estimation works with our dGen set,
and use the accurate dispatch for any other analysis.
"""
# Calculate baseline energy costs with the given load+pv profile
_, tariff_results = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
e_chrgs_with_PV = tariff_results['e_charges']
# Estimate the marginal retail energy costs of each hour
e_value_8760 = np.average(tariff.e_prices, 0)[tariff.e_tou_8760]
e_value_8760[load_and_pv_profile<=0] = export_tariff.prices[export_tariff.periods_8760][load_and_pv_profile<=0]
# Reshape into 365 24-hour day vectors and then sort by increasing cost
e_value_365_24 = e_value_8760.reshape((365,24), order='C')
e_value_365_24_sorted = np.sort(e_value_365_24)
# Split the lower half into costs-to-charge and upper half into revenue-from-discharge
e_cost = e_value_365_24_sorted[:,:12]
e_revenue = e_value_365_24_sorted[:,np.arange(23,11,-1)]
# Estimate which hours there is actually an arbitrage profit, where revenue
# exceeds costs for a pair of hours in a day. Not strictly correct, because
# efficiencies means that hours are not directly compared.
arbitrage_opportunity = e_revenue*eta_discharge > e_cost*eta_charge
# Where there is no opportunity, replace both cost and revenue values with
# 0, to reflect no battery action in those hours.
e_cost[arbitrage_opportunity==False] = 0.0
e_revenue[arbitrage_opportunity==False] = 0.0
cost_sum = np.sum(e_cost, 0)
revenue_sum = np.sum(e_revenue, 0)
results = {'e_chrgs_with_PV':e_chrgs_with_PV,
'cost_sum':cost_sum,
'revenue_sum':revenue_sum}
return results
#%%
def estimate_annual_arbitrage_profit(power, capacity, eta_charge, eta_discharge, cost_sum, revenue_sum):
"""
This function uses the 12x24 marginal energy costs from calc_estimator_params to estimate the potential arbitrage value of a battery.
Parameters
----------
power : float
Inherited from :class:`python.dispatch_functions.Battery`
capacity : float
Inherited from :class:`python.dispatch_functions.Battery`
eta_charge : float
Inherited from :class:`python.dispatch_functions.Battery`
eta_discharge: float
Inherited from :class:`python.dispatch_functions.Battery`
cost_sum : numpy.ndarray
12-length sorted vector of summed energy costs for charging in the cheapest 12 hours of each day
revenue_sum : numpy.ndarray
12-length sorted vector of summed energy revenue for discharging in the most expensive 12 hours of each day
Todo
----
Restrict action if cap > (12 * power)
"""
charge_blocks = np.zeros(12)
charge_blocks[:int(np.floor(capacity/eta_charge/power))] = power
charge_blocks[int(np.floor(capacity/eta_charge/power))] = np.mod(capacity/eta_charge,power)
# Determine how many hour 'blocks' the battery will need to cover to discharge,
# and what the kWh discharged during those blocks will be
discharge_blocks = np.zeros(12)
discharge_blocks[:int(np.floor(capacity*eta_discharge/power)+1)] = power
discharge_blocks[int(np.floor(capacity*eta_discharge/power)+1)] = np.mod(capacity*eta_discharge,power)
revenue = np.sum(revenue_sum * eta_discharge * discharge_blocks)
cost = np.sum(cost_sum * eta_charge * charge_blocks)
annual_arbitrage_profit = revenue - cost
return annual_arbitrage_profit
#%%
def calc_min_possible_demands_vector(res, load_and_pv_profile, pv_profile, d_periods_month, batt, t, month, restrict_charge_to_pv_gen, batt_start_level, estimate_demand_levels):
"""
Currently battery storage functions are not utilizied.
Function that determines the minimum possible demands that this battery can achieve for a particular month.
Parameters
----------
t : :class:`python.tariff_functions.Tariff`
Tariff class object
batt : :class:`python.dispatch_functions.Battery`
Battery class object
Todo
----
Add a vector of forced discharges, for demand response representation
"""
# Recast d_periods_month vector into d_periods_index, which is in terms of increasing integers starting at zero
try:
unique_periods = np.unique(d_periods_month)
Dn_month = len(unique_periods)
d_periods_index = np.copy(d_periods_month)
for n in range(len(unique_periods)): d_periods_index[d_periods_month==unique_periods[n]] = n
# Calculate the original and minimum possible demands in each period
original_demands = np.zeros(Dn_month)
min_possible_demands = np.zeros(Dn_month)
for period in range(Dn_month):
original_demands[period] = np.max(load_and_pv_profile[d_periods_index==period])
min_possible_demands = original_demands - batt.effective_power
# d_ranges is the range of demands in each period that will be investigated
d_ranges = np.zeros((res,Dn_month), float)
for n in range(Dn_month):
d_ranges[:,n] = np.linspace(min_possible_demands[n], original_demands[n], res)
# Assemble list of demands that cuts diagonally across search space
d_combo_n = res
d_combinations = np.zeros((d_combo_n,Dn_month+1), float)
d_combinations[:,:Dn_month] = d_ranges
TOU_demand_charge = np.sum(tFuncs.tiered_calc_vec(d_combinations[:,:Dn_month], t.d_tou_levels[:,unique_periods], t.d_tou_prices[:,unique_periods]),1) #check that periods line up with rate
# monthly_demand_charge = tFuncs.tiered_calc_vec(np.max(d_combinations[:,:Dn_month],1), t.d_flat_levels[:,month], t.d_flat_prices[:,month])
# d_combinations[:,-1] = TOU_demand_charge + monthly_demand_charge
d_combinations[:,-1] = TOU_demand_charge
# Evaluate the diagonal set of demands, determining which one is the
# cheapest. This will restrict the larger search space in the next step.
cheapest_d_states, batt_level_profile, i_of_first_success = determine_cheapest_possible_of_given_demand_levels(load_and_pv_profile, pv_profile, unique_periods, d_combinations, d_combo_n, Dn_month, d_periods_index, batt, restrict_charge_to_pv_gen, batt_start_level, t)
if estimate_demand_levels == False:
# Assemble a list of all combinations of demand levels within the ranges of
# interest. For a 2D situation, this search space will consist of
# quadrants 1 and 3 around the i_of_first_success, as quadrant 2
# contains no possible solutions and quadrant 4 is dominated. For ND
# situations, each tuple of the cartesian should contain i:Dmin for one
# dimension and i:Dmax for the other dimensions
set_of_all_demand_combinations = np.zeros([0,Dn_month])
for dimension in range(Dn_month):
list_of_ranges = list()
for d_n in range(Dn_month):
if d_n==dimension: list_of_ranges.append(d_ranges[:i_of_first_success+1, d_n])
else: list_of_ranges.append(d_ranges[i_of_first_success:, d_n])
set_of_demands_for_this_dimension = cartesian(list_of_ranges)
set_of_all_demand_combinations = np.concatenate((set_of_all_demand_combinations, set_of_demands_for_this_dimension))
d_combo_n = len(set_of_all_demand_combinations)
d_combinations = np.zeros((d_combo_n,Dn_month+1), float)
d_combinations[:,:Dn_month] = set_of_all_demand_combinations
# Calculate the demand charges of the search space and sort by
# increasing cost.
TOU_demand_charge = np.sum(tFuncs.tiered_calc_vec(d_combinations[:,:Dn_month], t.d_tou_levels[:,unique_periods], t.d_tou_prices[:,unique_periods]),1) #check that periods line up with rate
monthly_demand_charge = tFuncs.tiered_calc_vec(np.max(d_combinations[:,:Dn_month],1), t.d_flat_levels[:,month], t.d_flat_prices[:,month])
d_combinations[:,-1] = TOU_demand_charge + monthly_demand_charge
cheapest_d_states, batt_level_profile, _ = determine_cheapest_possible_of_given_demand_levels(load_and_pv_profile, pv_profile, unique_periods, d_combinations, d_combo_n, Dn_month, d_periods_index, batt, restrict_charge_to_pv_gen, batt_start_level, t)
d_max_vector = cheapest_d_states[d_periods_month]
if restrict_charge_to_pv_gen == True:
d_max_vector = np.minimum(load_and_pv_profile+pv_profile, d_max_vector)
return cheapest_d_states, d_max_vector, batt_level_profile
except Exception as e:
print(e)
print(('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e))
#%%
def determine_cheapest_possible_of_given_demand_levels(load_and_pv_profile, pv_profile, unique_periods, d_combinations, d_combo_n, Dn_month, d_periods_index, batt, restrict_charge_to_pv_gen, batt_start_level, tariff):
demand_vectors = d_combinations[:,:Dn_month][:, d_periods_index]
poss_charge = np.minimum(batt.effective_power*batt.eta_charge, (demand_vectors-load_and_pv_profile)*batt.eta_charge)
if restrict_charge_to_pv_gen == True:
poss_charge = np.minimum(poss_charge, pv_profile*batt.eta_charge)
necessary_discharge = (demand_vectors-load_and_pv_profile)/batt.eta_discharge
poss_batt_level_change = demand_vectors - load_and_pv_profile
poss_batt_level_change = np.where(necessary_discharge<=0, necessary_discharge, poss_charge)
# Walk through the battery levels. A negative value in a row means that
# particular constraint is not able to be met under the given conditions.
batt_e_levels = np.zeros([d_combo_n, len(d_periods_index)])
batt_e_levels[:,0] = batt_start_level
for n in np.arange(1, len(d_periods_index)):
batt_e_levels[:,n] = batt_e_levels[:,n-1] + poss_batt_level_change[:,n]
batt_e_levels[:,n] = np.clip(batt_e_levels[:,n], -99, batt.effective_cap)
able_to_meet_targets = np.all(batt_e_levels>=0, 1)
i_of_first_success = | np.argmax(able_to_meet_targets) | numpy.argmax |
import librosa
import argparse
import numpy as np
import moviepy.editor as mpy
import random
import torch
from scipy.misc import toimage
from tqdm import tqdm
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
save_as_images, display_in_terminal)
#get input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--song",required=True)
parser.add_argument("--resolution", default='512')
parser.add_argument("--duration", type=int)
parser.add_argument("--pitch_sensitivity", type=int, default=220)
parser.add_argument("--tempo_sensitivity", type=float, default=0.25)
parser.add_argument("--depth", type=float, default=1)
parser.add_argument("--classes", nargs='+', type=int)
parser.add_argument("--num_classes", type=int, default=12)
parser.add_argument("--sort_classes_by_power", type=int, default=0)
parser.add_argument("--jitter", type=float, default=0.5)
parser.add_argument("--frame_length", type=int, default=512)
parser.add_argument("--truncation", type=float, default=1)
parser.add_argument("--smooth_factor", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=30)
parser.add_argument("--use_previous_classes", type=int, default=0)
parser.add_argument("--use_previous_vectors", type=int, default=0)
parser.add_argument("--output_file", default="output.mp4")
args = parser.parse_args()
#read song
if args.song:
song=args.song
print('\nReading audio \n')
y, sr = librosa.load(song)
else:
raise ValueError("you must enter an audio file name in the --song argument")
#set model name based on resolution
model_name='biggan-deep-' + args.resolution
frame_length=args.frame_length
#set pitch sensitivity
pitch_sensitivity=(300-args.pitch_sensitivity) * 512 / frame_length
#set tempo sensitivity
tempo_sensitivity=args.tempo_sensitivity * frame_length / 512
#set depth
depth=args.depth
#set number of classes
num_classes=args.num_classes
#set sort_classes_by_power
sort_classes_by_power=args.sort_classes_by_power
#set jitter
jitter=args.jitter
#set truncation
truncation=args.truncation
#set batch size
batch_size=args.batch_size
#set use_previous_classes
use_previous_vectors=args.use_previous_vectors
#set use_previous_vectors
use_previous_classes=args.use_previous_classes
#set output name
outname=args.output_file
#set smooth factor
if args.smooth_factor > 1:
smooth_factor=int(args.smooth_factor * 512 / frame_length)
else:
smooth_factor=args.smooth_factor
#set duration
if args.duration:
seconds=args.duration
frame_lim=int(np.floor(seconds*22050/frame_length/batch_size))
else:
frame_lim=int(np.floor(len(y)/sr*22050/frame_length/batch_size))
# Load pre-trained model
model = BigGAN.from_pretrained(model_name)
#set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
########################################
########################################
########################################
########################################
########################################
#create spectrogram
spec = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,fmax=8000, hop_length=frame_length)
#get mean power at each time point
specm=np.mean(spec,axis=0)
#compute power gradient across time points
gradm=np.gradient(specm)
#set max to 1
gradm=gradm/np.max(gradm)
#set negative gradient time points to zero
gradm = gradm.clip(min=0)
#normalize mean power between 0-1
specm=(specm-np.min(specm))/np.ptp(specm)
#create chromagram of pitches X time points
chroma = librosa.feature.chroma_cqt(y=y, sr=sr, hop_length=frame_length)
#sort pitches by overall power
chromasort=np.argsort(np.mean(chroma,axis=1))[::-1]
########################################
########################################
########################################
########################################
########################################
if args.classes:
classes=args.classes
if len(classes) not in [12,num_classes]:
raise ValueError("The number of classes entered in the --class argument must equal 12 or [num_classes] if specified")
elif args.use_previous_classes==1:
cvs=np.load('class_vectors.npy')
classes=list( | np.where(cvs[0]>0) | numpy.where |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: an implementation of a deep learning recommendation model (DLRM)
# The model input consists of dense and sparse features. The former is a vector
# of floating point values. The latter is a list of sparse indices into
# embedding tables, which consist of vectors of floating point values.
# The selected vectors are passed to mlp networks denoted by triangles,
# in some cases the vectors are interacted through operators (Ops).
#
# output:
# vector of values
# model: |
# /\
# /__\
# |
# _____________________> Op <___________________
# / | \
# /\ /\ /\
# /__\ /__\ ... /__\
# | | |
# | Op Op
# | ____/__\_____ ____/__\____
# | |_Emb_|____|__| ... |_Emb_|__|___|
# input:
# [ dense features ] [sparse indices] , ..., [sparse indices]
#
# More precise definition of model layers:
# 1) fully connected layers of an mlp
# z = f(y)
# y = Wx + b
#
# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])
# z = Op(e1,...,ek)
# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]
#
# 3) Operator Op can be one of the following
# Sum(e1,...,ek) = e1 + ... + ek
# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]
# Cat(e1,...,ek) = [e1', ..., ek']'
# where ' denotes transpose operation
#
# References:
# [1] <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, "Deep Learning Recommendation Model for Personalization and
# Recommendation Systems", CoRR, arXiv:1906.00091, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
import builtins
import functools
from itertools import repeat
# import bisect
# import shutil
import time
import json
import os
import copy
import math
import subprocess
# data generation
import dlrm_data_pytorch as dp
# numpy
import numpy as np
import multiprocessing as mp
from multiprocessing import Process, Pool, Manager, Queue, Lock, current_process
from multiprocessing import shared_memory
import pandas as pd
import copy
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# pytorch
import torch
import torch.nn as nn
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
def dash_separated_ints(value):
vals = value.split('-')
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value)
return value
def dash_separated_floats(value):
vals = value.split('-')
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value)
return value
if __name__ == "__main__":
### import packages ###
import sys
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2")
# j will be replaced with the table number
parser.add_argument(
"--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument(
"--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=['dot', 'cat'], default="dot")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
# data
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--dataset-multiprocessing", action="store_true", default=False,
help="The Kaggle dataset can be multiprocessed in an environment \
with more than 7 CPU cores and more than 20 GB of memory. \n \
The Terabyte dataset can be multiprocessed in an environment \
with more than 24 CPU cores and at least 1 TB of memory.")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action='store_true', default=False)
parser.add_argument("--mlperf-bin-shuffle", action='store_true', default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--test-mini-batch-size", type=int, default=1)
parser.add_argument("--test-num-workers", type=int, default=0)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
# Input Profiling
# Percentage Threshold
parser.add_argument("--hot-emb-gpu-mem", type=int, default=268435456, help="GPU memory for hot embeddings") #536870912 (512MB), 268435456 (256MB), 134217728 (128MB)
parser.add_argument("--ip-sampling-rate", type=int, default=5, help="Input sampling rate (in %)")
args = parser.parse_args()
### main loop ###
def time_wrap():
return time.time()
# Using CPU only for input profiling
device = torch.device("cpu")
print("Using CPU...")
# Input Profiling for datasets only
train_data, train_ld = dp.make_alibaba_data_and_loader(args)
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(list(map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb
)))
train = []
for i, train_tuple in enumerate(train_data):
lS_i, X, T = train_tuple
train.append([lS_i, X, T])
train = np.array(train, dtype = object)
train = train.tolist()
X_bytes = train[0][1].nbytes
lS_i_bytes = train[0][0].nbytes
T_bytes = train[0][2].nbytes
input_bytes = X_bytes + lS_i_bytes + T_bytes
# Shared Memories for Multiprocessing based final input classification
shm_train_hot = shared_memory.SharedMemory(create = True, size = input_bytes * len(train))
train_hot_array = np.ndarray(len(train), dtype = object, buffer = shm_train_hot.buf)
shm_train_normal = shared_memory.SharedMemory(create = True, size = input_bytes * len(train))
train_normal_array = np.ndarray(len(train), dtype = object, buffer = shm_train_normal.buf)
def single_process_ip_classification(train_data, hot_emb_dict, train_hot_array, train_normal_array, chunksize):
hot_ctr = 0
normal_ctr = 0
i = int(current_process().name)
print("Running process : ", int(current_process().name), " with pid : ", os.getpid())
for a, train_tuple in enumerate(train_data):
lS_i = []
for j, lS_i_row in enumerate(train_tuple[0]):
lS_i_t = []
for k, lS_i_index in enumerate(lS_i_row):
if (j, int(lS_i_index)) in hot_emb_dict[j].keys():
lS_i_t.append(hot_emb_dict[j][(j, int(lS_i_index))])
else:
break
if ( len(lS_i_t) == len(lS_i_row)):
lS_i.append(lS_i_t)
else:
break
if ( len(lS_i) == len(train_tuple[0])):
lS_i = np.array(lS_i).astype(np.float32)
train_tuple[0] = lS_i
train_hot_array[i*chunksize + hot_ctr] = train_tuple
hot_ctr += 1
else:
train_normal_array[i*chunksize + normal_ctr] = train_tuple
normal_ctr += 1
print("Process : ", int(current_process().name), " done with hot inputs ", hot_ctr, " and normal inputs ", normal_ctr)
# Input Profiler
print("Input Profiling Initializing!!\n")
L = args.hot_emb_gpu_mem
x = args.ip_sampling_rate
num_hot_emb = args.hot_emb_gpu_mem // (4 * args.arch_sparse_feature_size)
print("Available GPU Memory for Hot Emb : ", L / (1024 * 1024), " MB")
print("Input Sampling Rate for Profiling : ", x, "%")
# =============================== PROFILING START ======================================
profiling_begin = time_wrap()
sample_train_data_len = int((x / 100) * len(train_data))
print("Training Input Dataset Length (D) : ", len(train_data))
sampled_train_data = np.random.randint(0, len(train_data), size = sample_train_data_len)
print("Sampled Training Input Dataset Length (D^) : ", len(sampled_train_data))
# ================== Skew Table Creation ======================
skew_table = []
for i in range(len(ln_emb)):
temp_list = np.zeros((ln_emb[i],3), dtype = int)
skew_table.append(temp_list)
# =================== Filling Skew Table Emb Table ======================
for i in range(len(ln_emb)):
for j in range(ln_emb[i]):
skew_table[i][j][0] = i
# =================== Filling Skew Table Emb Index ======================
for i in range(len(ln_emb)):
for j in range(ln_emb[i]):
skew_table[i][j][1] = j
# =================== Filling Skew Table Emb Counter ======================
# Updating Skew table with sampled input profiling data
for i, sample in enumerate(sampled_train_data):
lS_i, X, label = train_data[sample]
for j, lS_i_row in enumerate(lS_i):
for k, lS_i_index in enumerate(lS_i_row):
skew_table[j][int(lS_i_index)][2] = skew_table[j][int(lS_i_index)][2] + 1
# Combining skew table list into a 2D array
skew_table_array = np.vstack(skew_table)
# =================== Sorting Skew Table based on Counter ==============
skew_table_array = skew_table_array[skew_table_array[:,2].argsort()[::-1]]
# =================== Getting hot embedding entries ====================
hot_emb_entries = skew_table_array[0:num_hot_emb]
# =================== Getting Top Emb Dict ==============================
hot_emb_dict = []
emb_dict = {}
for i in range(len(ln_emb)):
new_emb_dict = copy.deepcopy(emb_dict)
hot_emb_dict.append(new_emb_dict)
for i in range(len(hot_emb_entries)):
hot_emb_dict[hot_emb_entries[i][0]][(hot_emb_entries[i][0], hot_emb_entries[i][1])] = np.float32(i)
len_hot_emb_dict = 0
for i in range(len(hot_emb_dict)):
len_hot_emb_dict += len(hot_emb_dict[i])
del skew_table_array
print("Hot Emb Dict Size : ", (len_hot_emb_dict * 4 * args.arch_sparse_feature_size) / (1024 ** 2), " MB")
print("Hot Emb Dict Creation Completed!!")
# ===================== Input Profiling ========================
print("Starting Input Classification")
num_cores = mp.cpu_count()
print("Num Cores : ", num_cores)
chunksize = len(train) // num_cores
processes = [Process(target = single_process_ip_classification,
name = "%i" % i,
args = (train[i*chunksize : (i+1)*chunksize],
hot_emb_dict,
train_hot_array,
train_normal_array,
chunksize
)
)
for i in range(0, num_cores)]
for process in processes:
process.start()
for process in processes:
process.join()
# Removing None elements from both train hot and train normal arrays
nan_array_hot = pd.isnull(train_hot_array)
not_nan_array_hot = ~ nan_array_hot
train_hot_array = train_hot_array[not_nan_array_hot]
nan_array_normal = pd.isnull(train_normal_array)
not_nan_array_normal = ~ nan_array_normal
train_normal_array = train_normal_array[not_nan_array_normal]
print("===================== Input Profiling Stats ==================")
print("Train Hot Data : ", len(train_hot_array))
print("Train Normal Data : ", len(train_normal_array))
print("Total Data : ", len(train_hot_array) + len(train_normal_array))
print("Percentage : ", (len(train_hot_array) / (len(train_hot_array) + len(train_normal_array))) * 100 )
print("==============================================================")
# Closing the shared memories and unlinking
shm_train_hot.close()
shm_train_hot.unlink()
shm_train_normal.close()
shm_train_normal.unlink()
profiling_end = time_wrap()
print("FAE Profiling Time : ", profiling_end - profiling_begin, " s")
train_hot = np.array(train_hot_array, dtype = object)
train_normal = np.array(train_normal_array, dtype = object)
hot_emb_dict = np.array(hot_emb_dict, dtype = object)
np.savez_compressed('./data/taobao_hot_cold/train_hot.npz', train_hot)
np.savez_compressed('./data/taobao_hot_cold/train_normal.npz', train_normal)
| np.savez_compressed('./data/taobao_hot_cold/hot_emb_dict.npz', hot_emb_dict) | numpy.savez_compressed |
# -*- coding: utf-8 -*-
"""
Functions for estimating electricity prices, eeg levies, remunerations and other components, based on customer type and annual demand
@author: Abuzar and Shakhawat
"""
from typing import ValuesView
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
def calculate_mean_price(customer_type, val_yearly_demand):
"""
Parameters
----------
customer_type : Type of customer, differentiated between household and industrial customers
total_demand : yearly electricity demand for household customers in KWh/y and for industrial customers in MWh/y
Returns
-------
mean_price: average price for the customer for the next year in cents/kWh
"""
def plotting(x,y, title, x_label, y_label, name_plot):
fig = plt.figure()
values = x
plt.plot (x,y)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(x,values)
plt.xticks(rotation = 45)
fig.savefig(name_plot, dpi=fig.dpi)
def haupt_tarif(data):
#haupt_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
haupt_tarrif = df_with_data[df_with_data["hour"].isin([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])]
cond = df_with_data['hour'].isin(haupt_tarrif['hour'])
df_with_data.drop(haupt_tarrif[cond].index, inplace = True)
ht_factor = haupt_tarrif.price.mean()/yearly_mean
return ht_factor
def neben_tarif(data):
#neben_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
neben_tarrif = df_with_data[(df_with_data["hour"].isin([1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])) |(df_with_data["Day"].isin(['Saturday', 'Sunday']))]
neben_tarrif.head()
cond = df_with_data['hour'].isin(neben_tarrif['hour'])
df_with_data.drop(neben_tarrif[cond].index, inplace = True)
nt_factor = neben_tarrif.price.mean()/yearly_mean
return nt_factor
ht_factor = haupt_tarif("ht_nt_price.xlsx")
nt_factor = neben_tarif("ht_nt_price.xlsx")
#industrial 2000 - 20000 MWh
industrie_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.3 Strom - € - Industrie', skiprows = 5, nrows = 26, index_col = 0)
industrie_prices_without_VAT = industrie_prices_without_VAT.iloc[:,0]
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT["index"]= industrie_prices_without_VAT["index"].str.slice(start = 5)
industrie_prices_without_VAT.columns = ["year","price"]
industrie_prices_without_VAT = industrie_prices_without_VAT.set_index("year")
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index = pd.to_datetime(industrie_prices_without_VAT.index, errors='ignore')
industrie_prices_without_VAT = industrie_prices_without_VAT.astype(float)
industrie_prices_without_VAT = industrie_prices_without_VAT.resample('12M').mean()
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index= industrie_prices_without_VAT.index.str.slice(start = 0, stop = -6)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT.price * ht_factor
nt_industrie_prices_without_VAT = industrie_prices_without_VAT.price * nt_factor
ht_industrie_prices_without_VAT = ht_industrie_prices_without_VAT.reset_index()
nt_industrie_prices_without_VAT = nt_industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT[industrie_prices_without_VAT.year >= str(2016)]
#industrial prices > 150000 MWh/y
v_big_industrial_prices_BDEW = {'year': range(2019,2021), 'price': [3.77,3.05]}
v_big_industrial_prices_BDEW = pd.DataFrame(data=v_big_industrial_prices_BDEW)
v_big_industrial_prices_BDEW
#industrial prices between 70000-150000 MWh/y
big_industrial_prices_BDEW = {'year': range(2016,2021), 'price': [8.37, 9.96, 8.96, 9.28, 10.07]}
big_industrial_prices_BDEW = pd.DataFrame(data=big_industrial_prices_BDEW)
big_industrial_prices_BDEW
#industrial prices between 20000-70000 MWh/y
mid_industrie_prices = pd.read_excel(r'mid_size_industrial_prices.xlsx')
mid_industrie_prices.columns = ['year', 'price']
mid_industrie_prices
#household electricity prices between 2500-5000 KWh/y
household_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.2 Strom - € - Haushalte', skiprows = 5, nrows = 26, index_col = 0)
household_prices_without_VAT = household_prices_without_VAT.iloc[:,0]
household_prices_without_VAT = household_prices_without_VAT.reset_index()
household_prices_without_VAT["index"]= household_prices_without_VAT["index"].str.slice(start = 5)
household_prices_without_VAT.columns = ["year","price"]
household_prices_without_VAT = household_prices_without_VAT.set_index("year")
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index = pd.to_datetime(household_prices_without_VAT.index, errors='ignore')
household_prices_without_VAT = household_prices_without_VAT.astype(float)
household_prices_without_VAT = household_prices_without_VAT.resample('12M').mean()
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index= household_prices_without_VAT.index.str.slice(start = 0, stop = -6)
household_prices_without_VAT = household_prices_without_VAT[6:].reset_index()
household_prices_without_VAT = household_prices_without_VAT[household_prices_without_VAT.year >= str(2016)]
household_prices_without_VAT
if ((customer_type == 0) & ((val_yearly_demand >= 2500) & (val_yearly_demand <= 5000))):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
# ht_industrie_prices_without_VAT = household_prices
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_household_prices_without_VAT = household_prices_without_VAT
nt_household_prices_without_VAT["year"] = nt_household_prices_without_VAT["year"].astype(int)
nt_year = nt_household_prices_without_VAT["year"]
nt_price = nt_household_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (val1))
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
f = interpolate.interp1d(yt_year, yt_price, fill_value = "extrapolate")
p_2021 = f(2021)
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (f(2021)))
# ht_new_price = ht_new_price * ht_factor
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 0) & (val_yearly_demand < 2000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = household_prices_without_VAT
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand >= 2000) & (val_yearly_demand <= 20000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"]
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price * nt_factor, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 20000) & (val_yearly_demand <= 70000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = mid_industrie_prices
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 70000) & (val_yearly_demand <= 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = | np.append(ht_price, val1) | numpy.append |
import os, csv
import sys
import errno
import pudb
from collections import defaultdict
from copy import deepcopy
import torch
import torch as th
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from torchviz import make_dot
import numpy as np
from scipy.constants import golden
import matplotlib.pyplot as plt
import seaborn as sns
import skimage
from skimage import data, io
import gym
from gym import wrappers
import azad.local_gym
from azad.local_gym.wythoff import create_moves
from azad.local_gym.wythoff import create_all_possible_moves
from azad.local_gym.wythoff import locate_moves
from azad.local_gym.wythoff import create_cold_board
from azad.local_gym.wythoff import create_board
from azad.local_gym.wythoff import cold_move_available
from azad.local_gym.wythoff import locate_closest_cold_move
from azad.local_gym.wythoff import locate_cold_moves
from azad.models import Table
from azad.models import DeepTable3
from azad.models import HotCold2
from azad.models import HotCold3
from azad.models import ReplayMemory
from azad.policy import epsilon_greedy
from azad.policy import softmax
class WythoffOptimalStrategist(object):
"""Mimic an optimal Wythoffs player, while behaving like a pytorch model."""
def __init__(self, m, n, hot_value=-1, cold_value=1):
self.m = int(m)
self.n = int(m)
self.hot_value = float(hot_value)
self.cold_value = float(cold_value)
self.board = create_cold_board(self.m,
self.n,
cold_value=cold_value,
default=hot_value)
def forward(self, x):
try:
x = tuple(x.detach().numpy().flatten())
except AttributeError:
pass
i, j = x
return self.board[int(i), int(j)]
def __call__(self, x):
return self.forward(x)
def wythoff_stumbler_strategist(num_episodes=10,
num_stumbles=1000,
stumbler_game='Wythoff10x10',
learning_rate_stumbler=0.1,
epsilon=0.5,
anneal=True,
gamma=1.0,
num_strategies=1000,
strategist_game='Wythoff50x50',
heuristic=True,
learning_rate_strategist=0.01,
num_hidden1=100,
num_hidden2=25,
cold_threshold=0.0,
hot_threshold=0.5,
hot_value=1,
cold_value=-1,
reflect_cold=True,
optimal_strategist=False,
num_eval=1,
learning_rate_influence=0.01,
new_rules=False,
tensorboard=None,
update_every=5,
seed=None,
save=None,
load_model=None,
save_model=False,
stumbler_monitor=None,
strategist_monitor=None,
monitor=None,
return_none=False,
debug=False):
"""Learn Wythoff's with a stumbler-strategist network"""
# -----------------------------------------------------------------------
# Init games
m, n, _, _ = peek(create_env(strategist_game, monitor=False))
o, p, _, _ = peek(create_env(stumbler_game, monitor=False))
if tensorboard:
try:
os.makedirs(tensorboard)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
writer = SummaryWriter(log_dir=tensorboard)
if monitor:
monitored = create_monitored(monitor)
# Force some casts, mostly to make CL invocation seamless
num_episodes = int(num_episodes)
num_strategies = int(num_strategies)
num_stumbles = int(num_stumbles)
num_eval = int(num_eval)
num_hidden1 = int(num_hidden1)
num_hidden2 = int(num_hidden2)
# -----------------------------------------------------------------------
# Init agents
player = None
opponent = None
strategist = None
bias_board = None
# Override w/ data from disk?
if load_model is not None:
if debug:
print(">>> Loading model from {}".format(load_model))
player, opponent = load_stumbler(player, opponent, load_model)
strategist = init_strategist(num_hidden1, num_hidden2)
strategist = load_strategist(strategist, load_model)
# If the rules have changed (wythoff -> nim,euclid) only the stumblers
# action spaces don't match, and should be reset.
if new_rules:
player, opponent = None, None
# Optimal overrides all others
if optimal_strategist:
strategist = WythoffOptimalStrategist(m,
n,
hot_value=hot_value,
cold_value=cold_value)
score_b = 0.0
# -----------------------------------------------------------------------
influence = 0.0
score_a = 0.0
score_b = 0.0
total_reward_a = 0.0
for episode in range(num_episodes):
# Stumbler
save_a = None
if save is not None:
save_a = save + "_episode{}_stumbler".format(episode)
(player, opponent), (score_a, total_reward_a) = wythoff_stumbler(
num_episodes=num_stumbles,
game=stumbler_game,
epsilon=epsilon,
anneal=anneal,
gamma=gamma,
learning_rate=learning_rate_stumbler,
model=player,
opponent=opponent,
bias_board=bias_board,
influence=influence,
score=score_a,
total_reward=total_reward_a,
tensorboard=tensorboard,
update_every=update_every,
initial=episode * num_stumbles,
debug=debug,
save=save_a,
save_model=False,
monitor=stumbler_monitor,
return_none=False,
seed=seed)
# Strategist
if not optimal_strategist:
save_b = None
if save is not None:
save_b = save + "_episode{}_strategist".format(episode)
strategist, score_b = wythoff_strategist(
player,
stumbler_game,
num_episodes=num_strategies,
game=strategist_game,
model=strategist,
num_hidden1=num_hidden1,
num_hidden2=num_hidden2,
score=score_b,
cold_threshold=cold_threshold,
hot_threshold=hot_threshold,
learning_rate=learning_rate_strategist,
tensorboard=tensorboard,
update_every=update_every,
hot_value=hot_value,
cold_value=cold_value,
reflect_cold=reflect_cold,
initial=episode * num_strategies,
debug=debug,
save=save_b,
monitor=strategist_monitor,
save_model=False,
return_none=False,
heuristic=heuristic,
seed=seed)
# --------------------------------------------------------------------
# Use the trained strategist to generate a bias_board,
bias_board = create_bias_board(m, n, strategist)
# Est performance. Count strategist wins.
wins, eval_score_a, eval_score_b = evaluate_wythoff(
player,
strategist,
stumbler_game,
strategist_game,
num_episodes=num_eval,
debug=debug)
# Update the influence and then the bias_board
win = wins / num_eval
if win > 0.5:
influence += learning_rate_influence
else:
influence -= learning_rate_influence
influence = | np.clip(influence, 0, 1) | numpy.clip |
from pdb import set_trace as st
import os
import numpy as np
import cv2
import argparse
from glob import glob
from skimage.util import random_noise as imnoise
parser = argparse.ArgumentParser('create image pairs')
parser.add_argument('--output_fold', dest='output_fold', help='output directory for aligned image left blur right sharp', type=str, default='full')
parser.add_argument('--fold_B', dest='fold_B', help='ouput directory for image B', type=str, default='sharp')
parser.add_argument('--num_imgs', dest='num_imgs', help='number of images',type=int, default=110000000)
parser.add_argument('--phase', dest='phase', help='test or train', type=str, default='test')
parser.add_argument('--fine_size', dest='fine_size', type=int, default=720, help='then crop to this size')
args = parser.parse_args()
# set frames in each blurry image
N = 11
# set input directory
img_input_fold = './' + args.phase
# set output directory
img_output_fold = img_input_fold + '_' + args.output_fold
if not os.path.isdir(img_output_fold):
os.makedirs(img_output_fold)
# folders in /test or /train
splits = os.listdir(img_input_fold)
# fine_size and image width
fine_size = args.fine_size
counter = 0
for folder in splits:
if not folder.startswith('.'):
img_subset = os.path.join(img_input_fold, folder)
img_list = os.listdir(img_subset)
img_list = sorted(img_list, key=str.lower)
num_imgs = min(args.num_imgs, len(img_list))
out_num, extra_num = divmod(num_imgs, N)
counter += 1
prefix = '%02d' % counter
for n in range(out_num):
img_in = []
for i in range(N):
name_single = img_list[n*N + i]
path_in = os.path.join(img_subset, name_single)
im_single = cv2.imread(path_in, cv2.IMREAD_COLOR).astype(np.float)
img_in.append(im_single)
img_in = np.array(img_in)
img_blur = | np.mean(img_in, axis=0) | numpy.mean |
#! /usr/bin/python3.7
# -- coding: utf-8 -- **
### Here are a set of functions used in elec_pipe
### and a set of qthread class for elec_main_gui
import sys
import os
import re
import math
import numpy as np
from numpy import ndarray
import nibabel as nib
from scipy import ndimage
from sklearn.mixture import GaussianMixture as GMM
from sklearn.linear_model import LinearRegression, Lasso
from PyQt5.QtCore import QThread, pyqtSignal
# import matplotlib
# matplotlib.use("Qt5Agg")
# from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
# from matplotlib.figure import Figure
# from matplotlib import pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D, art3d
# import electrode
CMD_Hough3D = './hough-3d-lines/hough3dlines'
def run(cmd):
"""
Print the command.
Execute a command string on the shell (on bash).
Parameters
----------
cmd : str
Command to be sent to the shell.
"""
print(f"Running shell command: {cmd}")
os.system(cmd)
print(f"Done!\n")
def align(inp, ref, xfm=None, out=None, dof=12, searchrad=True, bins=256, interp=None, cost="mutualinfo", sch=None, wmseg=None, init=None, finesearch=None,):
"""Aligns two images using FSLs flirt function and stores the transform between them
Parameters
----------
inp : str
path to input image being altered to align with the reference image as a nifti image file
ref : str
path to reference image being aligned to as a nifti image file
xfm : str, optional
where to save the 4x4 affine matrix containing the transform between two images, by default None
out : str, optional
determines whether the image will be automatically aligned and where the resulting image will be saved, by default None
dof : int, optional
the number of degrees of free dome of the alignment, by default 12
searchrad : bool, optional
whether to use the predefined searchradius parameter (180 degree sweep in x, y, and z), by default True
bins : int, optional
number of histogram bins, by default 256
interp : str, optional
interpolation method to be used (trilinear,nearestneighbour,sinc,spline), by default None
cost : str, optional
cost function to be used in alignment (mutualinfo, corratio, normcorr, normmi, leastsq, labeldiff, or bbr), by default "mutualinfo"
sch : str, optional
the optional FLIRT schedule, by default None
wmseg : str, optional
an optional white-matter segmentation for bbr, by default None
init : str, optional
an initial guess of an alignment in the form of the path to a matrix file, by default None
finesearch : int, optional
angle in degrees, by default None
"""
cmd = f"flirt -in {inp} -ref {ref}"
if xfm is not None:
cmd += f" -omat {xfm}"
if out is not None:
cmd += f" -out {out}"
if dof is not None:
cmd += f" -dof {dof}"
if bins is not None:
cmd += f" -bins {bins}"
if interp is not None:
cmd += f" -interp {interp}"
if cost is not None:
cmd += f" -cost {cost}"
if searchrad is not None:
cmd += " -searchrx -180 180 -searchry -180 180 " + "-searchrz -180 180"
if sch is not None:
cmd += f" -schedule {sch}"
if wmseg is not None:
cmd += f" -wmseg {wmseg}"
if init is not None:
cmd += f" -init {init}"
run(cmd)
def align_nonlinear(inp, ref, xfm, out, warp, ref_mask=None, in_mask=None, config=None):
"""Aligns two images using nonlinear methods and stores the transform between them using fnirt
Parameters
----------
inp : str
path to the input image
ref : str
path to the reference image that the input will be aligned to
xfm : str
path to the file containing the affine transform matrix created by align()
out : str
path for the desired output image
warp : str
the path to store the output file containing the nonlinear warp coefficients/fields
ref_mask : str, optional
path to the reference image brain_mask, by default None
in_mask : str, optional
path for the file with mask in input image space, by default None
config : str, optional
path to the config file specifying command line arguments, by default None
"""
cmd = f"fnirt --in={inp} --ref={ref} --aff={xfm} --iout={out} --cout={warp} --warpres=8,8,8"
if ref_mask is not None:
cmd += f" --refmask={ref_mask} --applyrefmask=1"
if in_mask is not None:
cmd += f" --inmask={in_mask} --applyinmask=1"
if config is not None:
cmd += f" --config={config}"
run(cmd)
def dataExtraction(intraFile, thre=0.2):
rawData = nib.load(intraFile).get_fdata()
maxVal = np.amax(rawData)
# print(f"maxVal={maxVal}")
thre = maxVal * thre
threData = np.copy(rawData)
threData[threData < thre] = 0
xs, ys, zs = np.where(threData != 0)
return xs, ys, zs
def trackRecognition(patient, cmd_hough3d, CTresult_dir, intraFile, thre=0.2):
xs, ys, zs = dataExtraction(intraFile, thre)
X = np.transpose(np.array((xs, ys, zs)))
# print(X.shape)
fname = f"{CTresult_dir}/{patient}_3dPointClouds.dat"
np.savetxt(fname, X, fmt='%.4f', delimiter=',', newline='\n', header='point clouds', footer='', comments='# ', encoding=None)
cmd_hough = f"{cmd_hough3d} -o {CTresult_dir}/{patient}.txt -minvotes 5 {fname}"
run(cmd=cmd_hough)
return xs, ys, zs
def locateLine(row, info):
ax = info[row][1]
ay = info[row][2]
az = info[row][3]
bx = info[row][4]
by = info[row][5]
bz = info[row][6]
axx = np.linspace(ax, ax+bx*50, 50)
ayy = np.linspace(ay, ay+by*50, 50)
azz = | np.linspace(az, az+bz*50, 50) | numpy.linspace |
import numpy as np
class Scorecard:
def __init__(self, ruleset, scores=None):
self.scores = np.zeros(ruleset.num_categories, dtype="int")
self.filled = np.zeros(ruleset.num_categories, dtype="bool")
if scores is not None:
assert len(scores) == ruleset.num_categories
for i in range(ruleset.num_categories):
if scores[i] is not None:
self.scores[i] = scores[i]
self.filled[i] = 1
self.ruleset_ = ruleset
def copy(self):
new_card = Scorecard(self.ruleset_)
new_card.scores[...] = self.scores
new_card.filled[...] = self.filled
return new_card
def register_score(self, roll, cat_index):
roll = | np.array(roll) | numpy.array |
import numpy as np
import scipy.io as scio
import math
"""
Elastoviscoplastic
D:\FluidSim\FluidSim\FEMNEW\2002-CC_KR-Elastoviscoplastic_FE_Analysis_in_Matlab\Software3\example4
完成状态:第115行有bug.n
"""
coordinates = scio.loadmat('coordinates.mat')['coordinates']
elements = scio.loadmat('elements.mat')['elements'] - 1
neumann = scio.loadmat('neumann.mat')['neumann'] - 1
dirichlet = scio.loadmat('dirichlet.mat')['dirichlet'] - 1
lam=1.107438169066076e+05
mu=8.019379844961240e+04
C1=lam+2*mu/3
sigma_y=450
th = 1
dt = 0.05
nu = 0
sigma = np.zeros((32,9))
for t in range(1):
C2=nu/(nu/(2*mu)+th*dt)
C3=th*dt*sigma_y/(nu/(2*mu)+th*dt);
trsigma = sigma[:,0] + sigma[:,4] + sigma[:,8]
devsigma = np.zeros((32,9))
devarray = np.array([1,0,0,0,1,0,0,0,1])
e0 = | np.zeros((32,9)) | numpy.zeros |
r"""
Kernel module, implements a few standard kernels for use with the
``GaussianProcess`` class. Options include the Squared Exponential
kernel and Matern 5/2 kernel, both with either a single correlation
length (``UniformSqExp``, ``UniformMat52``) or correlation lengths
for each input dimension (``SquaredExponential``, ``Matern52``).
The product form of the Matern 5/2 kernel (``ProductMat52``) is
also available.
"""
import numpy as np
class KernelBase(object):
"Base Kernel"
def get_n_params(self, inputs):
"""
Determine number of correlation length parameters based on inputs
Determines the number of parameters required for a given set of inputs.
Returns the number of parameters as an integer.
:param inputs: Set of inputs for which the number of correlation length
parameters is desired.
:type inputs: ndarray
:returns: Number of correlation length parameters
:rtype: int
"""
inputs = np.array(inputs)
assert inputs.ndim == 2, "Inputs must be a 2D array"
return inputs.shape[1]
def _check_inputs(self, x1, x2, params):
r"""
Common function for checking dimensions of inputs (default version)
This function checks the inputs to any kernel evaluation for consistency and ensures
that all input arrays have the correct dimensionality. It returns the reformatted
arrays, the number of inputs, and the number of hyperparameters. If the method
determines that the array dimensions are not all consistent with one another,
it will raise an ``AssertionError``. This method is called internally whenever
the kernel is evaluated.
:param x1: First parameter array. Should be a 1-D or 2-D array (1-D is acceptable
if either there is only a single point, or each point has only a single
parameter). If there is more than one parameter, the last dimension
must match the last dimension of ``x2`` and be one less than the length
of ``params``.
:type x1: array-like
:param x2: Second parameter array. The same restrictions apply that hold for ``x1``
described above.
:type x2: array-like
:param params: Hyperparameter array. Must have a length that is one more than the
last dimension of ``x1`` and ``x2``, hence minimum length is 2.
:type params: array-like
:returns: A tuple containing the following: reformatted ``x1``, ``n1``, reformatted
``x2``, ``n2``, ``params``, and ``D``. ``x1`` will be an array with
dimensions ``(n1, D - 1)``, ``x2`` will be an array with dimensions
``(n2, D - 1)``, and ``params`` will be an array with dimensions ``(D,)``.
``n1``, ``n2``, and ``D`` will be integers.
"""
params = np.array(params)
assert params.ndim == 1, "parameters must be a vector"
D = len(params)
assert D >= 1, "minimum number of parameters in a covariance kernel is 1"
x1 = np.array(x1)
assert x1.ndim == 1 or x1.ndim == 2, "bad number of dimensions in input x1"
if x1.ndim == 2:
assert x1.shape[1] == D, "bad shape for x1"
else:
if D == 1:
x1 = np.reshape(x1, (len(x1), 1))
else:
x1 = np.reshape(x1, (1, D))
n1 = x1.shape[0]
x2 = np.array(x2)
assert x2.ndim == 1 or x2.ndim == 2, "bad number of dimensions in input x2"
if x2.ndim == 2:
assert x2.shape[1] == D, "bad shape for x2"
else:
if D == 1:
x2 = np.reshape(x2, (len(x2), 1))
else:
x2 = np.reshape(x2, (1, D))
n2 = x2.shape[0]
return x1, n1, x2, n2, params, D
def kernel_f(self, x1, x2, params):
r"""
Compute kernel values for a set of inputs
Returns the value of the kernel for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric and then evaluates the kernel function
for those distances.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding all kernel values between points in arrays ``x1``
and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1``
is the length of the first axis of ``x1`` and ``n2`` is the length
of the first axis of ``x2``.
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
return self.calc_K(self.calc_r2(x1, x2, params))
def kernel_deriv(self, x1, x2, params):
r"""
Compute kernel gradient for a set of inputs
Returns the value of the kernel gradient for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric, kernel function, and the appropriate
derivative functions of the distance and kernel functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the gradient of the kernel function between points in arrays
``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with
shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the
length of the first axis of ``x1`` and ``n2`` is the length of the first axis
of ``x2``. The first axis indicates the different derivative components
(i.e. the derivative with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
dKdr2 = self.calc_dKdr2(self.calc_r2(x1, x2, params))
dr2dtheta = self.calc_dr2dtheta(x1, x2, params)
dKdtheta = dKdr2*dr2dtheta
return dKdtheta
def kernel_hessian(self, x1, x2, params):
r"""
Calculate the Hessian of the kernel evaluated for all pairs of points with
respect to the hyperparameters
Returns the value of the kernel Hessian for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric, kernel function, and the appropriate
derivative functions of the distance and kernel functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the Hessian of the pair-wise distances between points in arrays
``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with
shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is
the length of the first axis of ``x1`` and ``n2`` is the length of the first
axis of ``x2``. The first two axes indicates the different derivative components
(i.e. the second derivative with respect to the first parameter is [0,0,:,:],
the mixed partial with respect to the first and second parameters is [0,1,:,:]
or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
r2_matrix = self.calc_r2(x1, x2, params)
dKdr2 = self.calc_dKdr2(r2_matrix)
d2Kdr22 = self.calc_d2Kdr22(r2_matrix)
dr2dtheta = self.calc_dr2dtheta(x1, x2, params)
d2r2dtheta2 = self.calc_d2r2dtheta2(x1, x2, params)
d2Kdtheta2 = (d2Kdr22 * dr2dtheta[:,np.newaxis,:,:] * dr2dtheta[np.newaxis,:,:,:] +
dKdr2 * d2r2dtheta2)
return d2Kdtheta2
class UniformKernel(KernelBase):
r"""
Kernel with a single correlation length
"""
def get_n_params(self, inputs):
"""
Determine number of correlation length parameters based on inputs
Determines the number of parameters required for a given set of inputs.
Returns the number of parameters as an integer.
:param inputs: Set of inputs for which the number of correlation length
parameters is desired.
:type inputs: ndarray
:returns: Number of correlation length parameters
:rtype: int
"""
return 1
def _check_inputs(self, x1, x2, params):
r"""
Common function for checking dimensions of inputs
This function checks the inputs to any kernel evaluation for consistency and ensures
that all input arrays have the correct dimensionality. It returns the reformatted
arrays, the number of inputs, and the number of hyperparameters. If the method
determines that the array dimensions are not all consistent with one another,
it will raise an ``AssertionError``. This method is called internally whenever
the kernel is evaluated.
:param x1: First parameter array. Should be a 1-D or 2-D array (1-D is acceptable
if each point has only a single input parameter).
:type x1: array-like
:param x2: Second parameter array. The same restrictions apply that hold for ``x1``
described above.
:type x2: array-like
:param params: Hyperparameter array. Must have a length that is one more than the
last dimension of ``x1`` and ``x2``, hence minimum length is 2.
:type params: array-like
:returns: A tuple containing the following: reformatted ``x1``, ``n1``, reformatted
``x2``, ``n2``, ``params``, and ``D``.
``n1``, ``n2``, and ``D`` will be integers.
"""
params = np.array(params)
assert params.ndim == 1, "parameters must be a vector"
D = len(params)
assert D == 1, "Uniform kernels only support a single correlation length"
x1 = np.array(x1)
assert x1.ndim == 1 or x1.ndim == 2, "bad number of dimensions in input x1"
if not x1.ndim == 2:
x1 = np.reshape(x1, (-1, 1))
n1 = x1.shape[0]
x2 = np.array(x2)
assert x2.ndim == 1 or x2.ndim == 2, "bad number of dimensions in input x2"
if not x2.ndim == 2:
x2 = np.reshape(x2, (-1, 1))
n2 = x2.shape[0]
assert x1.shape[1] == x2.shape[1], "Input arrays do not have the same number of inputs"
return x1, n1, x2, n2, params, D
def calc_r2(self, x1, x2, params):
r"""
Calculate squared distance between all pairs of points
This method computes the scaled Euclidean distance between all pairs of points
in ``x1`` and ``x2``.
For example, if
``x1 = [1.]``, ``x2`` = [2.], and ``params = [2.]`` then ``calc_r`` would
return :math:`{\sqrt{exp(2)*(1 - 2)^2}=\sqrt{exp(2)}}`
as an array with shape ``(1,1)``.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding all pair-wise squared distances between points in arrays ``x1``
and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1``
is the length of the first axis of ``x1`` and ``n2`` is the length
of the first axis of ``x2``.
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(params)[0]
r2_matrix = np.sum(exp_theta*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2, axis=-1)
if np.any(np.isinf(r2_matrix)):
raise FloatingPointError("Inf enountered in kernel distance computation")
return r2_matrix
def calc_dr2dtheta(self, x1, x2, params):
r"""
Calculate the first derivative of the distance between all pairs of points with
respect to the hyperparameters
This method computes the derivative of the scaled Euclidean distance between
all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters.
The gradient is held in an array with shape ``(D, n1, n2)``, where ``D`` is
the length of ``params``, ``n1`` is the length of the first axis of ``x1``,
and ``n2`` is the length of the first axis of ``x2``. This is used in the
computation of the gradient and Hessian of the kernel. The first index
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first axis
indicates the different derivative components (i.e. the derivative
with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
return np.reshape(self.calc_r2(x1, x2, params), (1, n1, n2))
def calc_d2r2dtheta2(self, x1, x2, params):
r"""
Calculate all second derivatives of the distance between all pairs of points with
respect to the hyperparameters
This method computes all second derivatives of the scaled Euclidean distance
between all pairs of points in ``x1`` and ``x2`` with respect to the
hyperparameters. The gradient is held in an array with shape ``(D, D, n1, n2)``,
where ``D`` is the length of ``params``, ``n1`` is the length of the first axis
of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in
the computation of the gradient and Hessian of the kernel. The first two indices
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the second derivatives of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first two axes
indicates the different derivative components (i.e. the second derivative
with respect to the first parameter is [0,0,:,:], the mixed partial with
respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
return np.reshape(self.calc_r2(x1, x2, params), (1, 1, n1, n2))
class StationaryKernel(KernelBase):
r"""
Generic class representing a stationary kernel
This base class implements the necessary scaffolding for defining a stationary kernel.
Stationary kernels are only dependent on a distance measure between any two points, so
the base class holds all the necessary information for doing the distance computation.
Individual subclasses will implement the functional dependence of the kernel on the
distance, plus first and second derivatives (if desired) to compute the gradient or
Hessian of the kernel with respect to the hyperparameters.
This implementation uses a scaled euclidean distance metric. Each individual parameter
has a hyperparameter scale associated with it that is used in the distance computation.
If a different metric is to be defined, a new base class needs to be defined that
implements the ``calc_r``, and optionally ``calc_drdtheta`` and ``calc_d2rdtheta2``
methods if gradient or Hessian computation is desired. The methods ``kernel_f``,
``kernel_gradient``, and ``kernel_hessian`` can then be used to compute the appropriate
quantities with no further modification.
Note that the Kernel object just collates all of the methods together; the class itself
does not hold any information on the data point or hyperparamters, which are passed
directly to the appropriate methods. Thus, no information needs to be provided when
creating a new ``Kernel`` instance.
"""
def calc_r2(self, x1, x2, params):
r"""
Calculate squared distance between all pairs of points
This method computes the scaled Euclidean distance between all pairs of points
in ``x1`` and ``x2``. Each component distance is multiplied by the exponential of
the corresponding hyperparameter, prior to summing and taking the square root.
For example, if
``x1 = [1.]``, ``x2`` = [2.], and ``params = [2., 2.]`` then ``calc_r`` would
return :math:`{\sqrt{exp(2)*(1 - 2)^2}=\sqrt{exp(2)}}`
as an array with shape ``(1,1)``.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding all pair-wise squared distances between points in arrays ``x1``
and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1``
is the length of the first axis of ``x1`` and ``n2`` is the length
of the first axis of ``x2``.
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(params)
r2_matrix = np.sum(exp_theta*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2, axis=-1)
if np.any(np.isinf(r2_matrix)):
raise FloatingPointError("Inf enountered in kernel distance computation")
return r2_matrix
def calc_dr2dtheta(self, x1, x2, params):
r"""
Calculate the first derivative of the distance between all pairs of points with
respect to the hyperparameters
This method computes the derivative of the scaled Euclidean distance between
all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters.
The gradient is held in an array with shape ``(D, n1, n2)``, where ``D`` is
the length of ``params``, ``n1`` is the length of the first axis of ``x1``,
and ``n2`` is the length of the first axis of ``x2``. This is used in the
computation of the gradient and Hessian of the kernel. The first index
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first axis
indicates the different derivative components (i.e. the derivative
with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(params)
dr2dtheta = np.transpose(exp_theta*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2,
(2, 0, 1))
return dr2dtheta
def calc_d2r2dtheta2(self, x1, x2, params):
r"""
Calculate all second derivatives of the distance between all pairs of points with
respect to the hyperparameters
This method computes all second derivatives of the scaled Euclidean distance
between all pairs of points in ``x1`` and ``x2`` with respect to the
hyperparameters. The gradient is held in an array with shape ``(D, D, n1, n2)``,
where ``D`` is the length of ``params``, ``n1`` is the length of the first axis
of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in
the computation of the gradient and Hessian of the kernel. The first two indices
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the second derivatives of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first two axes
indicates the different derivative components (i.e. the second derivative
with respect to the first parameter is [0,0,:,:], the mixed partial with
respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(-params[:(D - 1)])
d2r2dtheta2 = np.zeros((D, D, n1, n2))
idx = np.arange(D)
d2r2dtheta2[idx, idx] = self.calc_dr2dtheta(x1, x2, params)
return d2r2dtheta2
class ProductKernel(KernelBase):
"Product form of kernel"
def calc_r2(self, x1, x2, params):
r"""
Calculate squared distance between all pairs of points
This method computes the scaled Euclidean distance between all pairs of points
in ``x1`` and ``x2`` along each axis. Each component distance is multiplied by
the exponential of the corresponding hyperparameter.
For example, if
``x1 = [[1., 2.]]``, ``x2`` = [[2., 4.]], and ``params = [2., 2.]`` then ``calc_r`` would
return the array :math:`{[exp(2)*(1 - 2)^2, exp(2)*(2 - 4)^2]}`
as an array with shape ``(2, 1,1)``.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding all pair-wise squared distances between points in arrays ``x1``
and ``x2``. Will be an array with shape ``(D, n1, n2)``, where ``D`` is
the number of dimensions, ``n1`` is the length of the first axis of
``x1`` and ``n2`` is the length of the first axis of ``x2``.
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(params)
r2_matrix = exp_theta[np.newaxis, np.newaxis, :]*(x1[:, np.newaxis, :] - x2[np.newaxis, :, :])**2
if np.any( | np.isinf(r2_matrix) | numpy.isinf |
###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Polynomial fitters.
:author: <NAME>
:license: Modified BSD
"""
from __future__ import division
from builtins import range
import numpy as np
import scipy
import scipy.interpolate
from .generic import ScatterFit, NotFittedError
from .linlstsq import LinearLeastSquaresFit
from .utils import offset_scale_mat
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : Polynomial1DFit
# ----------------------------------------------------------------------------------------------------------------------
class Polynomial1DFit(ScatterFit):
"""Fit polynomial to 1-D data.
This is built on top of :class:`LinearLeastSquaresFit`. It improves on the
standard NumPy :func:`numpy.polyfit` routine by automatically centring the
data, handling measurement uncertainty and calculating the resulting
parameter covariance matrix.
Parameters
----------
max_degree : int, non-negative
Maximum polynomial degree to use (automatically reduced if there are
not enough data points)
rcond : float, optional
Relative condition number of fit (smallest singular value that will be
used to fit polynomial, has sensible default)
Attributes
----------
poly : array of float, shape (P,)
Polynomial coefficients (highest order first), set after :func:`fit`
cov_poly : array of float, shape (P, P)
Covariance matrix of coefficients, only set after :func:`fit`
"""
def __init__(self, max_degree, rcond=None):
ScatterFit.__init__(self)
self.max_degree = max_degree
self._lstsq = LinearLeastSquaresFit(rcond)
# The following attributes are only set after :func:`fit`
self.poly = None
self.cov_poly = None
self._mean = None
def _regressor(self, x):
"""Form normalised regressor / design matrix from input vector.
The design matrix is Vandermonde for polynomial regression.
Parameters
----------
x : array of float, shape (N,)
Input to function as a numpy array
Returns
-------
X : array of float, shape (P, N)
Regressor / design matrix to be used in least-squares fit
"""
return np.vander(x - self._mean, len(self.poly)).T
def fit(self, x, y, std_y=1.0):
"""Fit polynomial to data.
Parameters
----------
x : array-like, shape (N,)
Known input values as a 1-D numpy array or sequence
y : array-like, shape (N,)
Known output values as a 1-D numpy array, or sequence
std_y : float or array-like, shape (N,), optional
Measurement error or uncertainty of `y` values, expressed as
standard deviation in units of `y`
Returns
-------
self : :class:`Polynomial1DFit` object
Reference to self, to allow chaining of method calls
"""
# Upcast x and y to doubles, to ensure a high enough precision
# for the polynomial coefficients
x = np.atleast_1d(np.asarray(x, dtype='double'))
y = np.atleast_1d(np.asarray(y, dtype='double'))
# Polynomial fits perform better if input data is centred
# around origin [see numpy.polyfit help]
self._mean = x.mean()
# Reduce polynomial degree if there are not enough points to fit
# (degree should be < len(x))
degree = min(self.max_degree, len(x) - 1)
# Initialise parameter vector, as its length is used
# to create design matrix of right shape in _regressor
self.poly = np.zeros(degree + 1)
# Solve least-squares regression problem
self._lstsq.fit(self._regressor(x), y, std_y)
# Convert polynomial (and cov matrix) so that it applies
# to original unnormalised data
tfm = offset_scale_mat(len(self.poly), self._mean)
self.poly = np.dot(tfm, self._lstsq.params)
self.cov_poly = np.dot(tfm, np.dot(self._lstsq.cov_params, tfm.T))
return self
def __call__(self, x, full_output=False):
"""Evaluate polynomial on new data.
Parameters
----------
x : array-like of float, shape (M,)
Input to function as a 1-D numpy array, or sequence
full_output : {False, True}, optional
True if output uncertainty should also be returned
Returns
-------
y : array of float, shape (M,)
Output of function as a 1-D numpy array
std_y : array of float, shape (M,), optional
Uncertainty of function output, expressed as standard deviation
"""
x = np.atleast_1d(np.asarray(x))
if (self.poly is None) or (self._mean is None):
raise NotFittedError("Polynomial not fitted to data yet - "
"first call .fit method")
return self._lstsq(self._regressor(x), full_output)
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : Polynomial2DFit
# ----------------------------------------------------------------------------------------------------------------------
class Polynomial2DFit(ScatterFit):
"""Fit polynomial to 2-D data.
This models the one-dimensional (scalar) `y` data as a polynomial function
of the two-dimensional (vector) `x` data. The 2-D polynomial has
P = (degrees[0] + 1) * (degrees[1] + 1) coefficients. This fitter is built
on top of :class:`LinearLeastSquaresFit`.
Parameters
----------
degrees : list of 2 ints
Non-negative polynomial degree to use for each dimension of *x*
rcond : float, optional
Relative condition number of fit (smallest singular value that will be
used to fit polynomial, has sensible default)
Attributes
----------
poly : array of float, shape (P,)
Polynomial coefficients (highest order first), set after :func:`fit`
cov_poly : array of float, shape (P, P)
Covariance matrix of coefficients, only set after :func:`fit`
"""
def __init__(self, degrees, rcond=None):
ScatterFit.__init__(self)
self.degrees = degrees
# Underlying least-squares fitter
self._lstsq = LinearLeastSquaresFit(rcond)
# The following attributes are only set after :func:`fit`
self.poly = None
self.cov_poly = None
self._mean = None
self._scale = None
def _regressor(self, x):
"""Form normalised regressor / design matrix from set of input vectors.
Parameters
----------
x : array of float, shape (2, N)
Input to function as a 2-D numpy array
Returns
-------
X : array of float, shape (P, N)
Regressor / design matrix to be used in least-squares fit
Notes
-----
This normalises the 2-D input vectors by centering and scaling them.
It then forms a regressor matrix with a column per input vector. Each
column is given by the outer product of the monomials of the first
dimension with the monomials of the second dimension of the input
vector, in decreasing polynomial order. For example, if *degrees* is
(1, 2) and the normalised elements of each input vector in *x* are
*x_0* and *x_1*, respectively, the column takes the form::
outer([x_0, 1], [x1 ^ 2, x1, 1])
= [x_0 * x_1 ^ 2, x_0 * x_1, x_0 * 1, 1 * x_1 ^ 2, 1 * x_1, 1 * 1]
= [x_0 * x_1 ^ 2, x_0 * x_1, x_0, x_1 ^ 2, x_1, 1]
This is closely related to the Vandermonde matrix of *x*.
"""
x_norm = (x - self._mean[:, np.newaxis]) / self._scale[:, np.newaxis]
v1 = np.vander(x_norm[0], self.degrees[0] + 1)
v2 = np.vander(x_norm[1], self.degrees[1] + 1).T
return np.vstack([v1[:, n][np.newaxis, :] * v2
for n in range(v1.shape[1])])
def fit(self, x, y, std_y=1.0):
"""Fit polynomial to data.
This fits a polynomial defined on 2-D data to the provided (x, y)
pairs. The 2-D *x* coordinates do not have to lie on a regular grid,
and can be in any order.
Parameters
----------
x : array-like, shape (2, N)
Known input values as a 2-D numpy array, or sequence
y : array-like, shape (N,)
Known output values as a 1-D numpy array, or sequence
std_y : float or array-like, shape (N,), optional
Measurement error or uncertainty of `y` values, expressed as
standard deviation in units of `y`
Returns
-------
self : :class:`Polynomial2DFit` object
Reference to self, to allow chaining of method calls
"""
# Upcast x and y to doubles, to ensure a high enough precision
# for the polynomial coefficients
x = np.atleast_2d(np.array(x, dtype='double'))
y = np.atleast_1d(np.array(y, dtype='double'))
# Polynomial fits perform better if input data is centred
# around origin and scaled [see numpy.polyfit help]
self._mean = x.mean(axis=1)
self._scale = np.abs(x - self._mean[:, np.newaxis]).max(axis=1)
self._scale[self._scale == 0.0] = 1.0
# Solve least squares regression problem
self._lstsq.fit(self._regressor(x), y, std_y)
# Convert polynomial (and cov matrix) so that it applies
# to original unnormalised data
tfm0 = offset_scale_mat(self.degrees[0] + 1, self._mean[0],
self._scale[0])
tfm1 = offset_scale_mat(self.degrees[1] + 1, self._mean[1],
self._scale[1])
tfm = np.kron(tfm0, tfm1)
self.poly = np.dot(tfm, self._lstsq.params)
self.cov_poly = np.dot(tfm, np.dot(self._lstsq.cov_params, tfm.T))
return self
def __call__(self, x, full_output=False):
"""Evaluate polynomial on new data.
Parameters
----------
x : array-like, shape (2, M)
Input to function as a 2-D numpy array, or sequence
full_output : {False, True}, optional
True if output uncertainty should also be returned
Returns
-------
y : array, shape (M,)
Output of function as a 1-D numpy array
std_y : array of float, shape (M,), optional
Uncertainty of function output, expressed as standard deviation
"""
x = np.atleast_2d(np.asarray(x))
if ((self.poly is None) or (self._mean is None) or
(self._scale is None)):
raise NotFittedError("Polynomial not fitted to data yet - "
"first call .fit method")
return self._lstsq(self._regressor(x), full_output)
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : PiecewisePolynomial1DFit
# ----------------------------------------------------------------------------------------------------------------------
def _stepwise_interp(xi, yi, x):
"""Step-wise interpolate (or extrapolate) (xi, yi) values to x positions.
Given a set of N ``(x, y)`` points, provided in the *xi* and *yi* arrays,
this will calculate ``y``-coordinate values for a set of M
``x``-coordinates provided in the *x* array, using step-wise (zeroth-order)
interpolation and extrapolation.
The input *x* coordinates are compared to the fixed *xi* values, and the
largest *xi* value smaller than or approximately equal to each *x* value is
selected. The corresponding *yi* value is then returned. For *x* values
below the entire set of *xi* values, the smallest *xi* value is selected.
The steps of the interpolation therefore start at each *xi* value and
extends to the right (above it) until the next bigger *xi*, except for the
first step, which extends to the left (below it) as well, and the last
step, which extends until positive infinity.
Parameters
----------
xi : array, shape (N,)
Array of fixed x-coordinates, sorted in ascending order and with no
duplicate values
yi : array, shape (N,)
Corresponding array of fixed y-coordinates
x : float or array, shape (M,)
Array of x-coordinates at which to do interpolation of y-values
Returns
-------
y : float or array, shape (M,)
Array of interpolated y-values
Notes
-----
The equality check of *x* values is approximate on purpose, to handle some
degree of numerical imprecision in floating-point values. This is important
for step-wise interpolation, as there are potentially large discontinuities
in *y* at the *xi* values, which makes it sensitive to small mismatches in
*x*. For continuous interpolation (linear and up) this is unnecessary.
"""
# Find lowest xi value >= x (end of segment containing x)
end = np.atleast_1d(xi.searchsorted(x))
# Associate any x smaller than smallest xi with closest segment (first one)
# This linearly extrapolates the first segment to -inf on the left
end[end == 0] += 1
start = end - 1
# *After* setting segment starts, associate any x bigger than biggest xi
# with the last segment (order important, otherwise last segment
# will be ignored)
end[end == len(xi)] -= 1
# First get largest "equality" difference tolerated for x and xi
# (set to zero for integer types)
try:
# pylint: disable-msg=E1101
xi_smallest_diff = 20 * np.finfo(xi.dtype).resolution
except ValueError:
xi_smallest_diff = 0
try:
# pylint: disable-msg=E1101
x_smallest_diff = 20 * np.finfo(x.dtype).resolution
except ValueError:
x_smallest_diff = 0
smallest_diff = max(x_smallest_diff, xi_smallest_diff)
# Find x that are exactly equal to some xi or slightly below it,
# which will assign it to the wrong segment
equal_or_just_below = xi[end] - x < smallest_diff
# Move these segments one higher (except for the last one, which stays put)
start[equal_or_just_below] = end[equal_or_just_below]
# Ensure that output y has same shape as input x
# (especially, let scalar input result in scalar output)
start = np.reshape(start, np.shape(x))
return yi[start]
def _linear_interp(xi, yi, x):
"""Linearly interpolate (or extrapolate) (xi, yi) values to x positions.
Given a set of N ``(x, y)`` points, provided in the *xi* and *yi* arrays,
this will calculate ``y``-coordinate values for a set of M
``x``-coordinates provided in the *x* array, using linear interpolation
and extrapolation.
Parameters
----------
xi : array, shape (N,)
Array of fixed x-coordinates, sorted in ascending order and with no
duplicate values
yi : array, shape (N,)
Corresponding array of fixed y-coordinates
x : float or array, shape (M,)
Array of x-coordinates at which to do interpolation of y-values
Returns
-------
y : float or array, shape (M,)
Array of interpolated y-values
"""
# Find lowest xi value >= x (end of segment containing x)
end = np.atleast_1d(xi.searchsorted(x))
# Associate any x found outside xi range with closest segment (first or
# last one). This linearly extrapolates the first and last segment
# to -inf and +inf, respectively.
end[end == 0] += 1
end[end == len(xi)] -= 1
start = end - 1
# Ensure that output y has same shape as input x
# (especially, let scalar input result in scalar output)
start, end = np.reshape(start, np.shape(x)), np.reshape(end, np.shape(x))
# Set up weight such that xi[start] => 0 and xi[end] => 1
end_weight = (x - xi[start]) / (xi[end] - xi[start])
return (1.0 - end_weight) * yi[start] + end_weight * yi[end]
class PiecewisePolynomial1DFit(ScatterFit):
"""Fit piecewise polynomial to 1-D data.
This fits a series of polynomials between adjacent points in a
one-dimensional data set. The resulting piecewise polynomial curve passes
exactly through the given data points and may also match the local gradient
at each point if the maximum polynomial degree *max_degree* is at least 3.
If *max_degree* is 0, step-wise interpolation is done between the points in
the data set. Each input *x* value is assigned the *y* value of the largest
*x* value in the data set that is smaller than or equal to the input *x*.
If the input *x* is smaller than all the *x* values in the data set, the
*y* value of the smallest data set *x* value is chosen instead.
If *max_degree* is 1, linear interpolation is done. The resulting curve is
continuous but has sharp corners at the data points. If *max_degree* is 3,
cubic interpolation is used and the resulting is curve is smooth (up to the
first derivative).
This should primarily be used for interpolation between points and not for
extrapolation outside the data range, which could lead to wildly inaccurate
results (especially if *max_degree* is high).
Parameters
----------
max_degree : int
Maximum polynomial degree (>= 0) to use in each segment between data
points (automatically reduced if there are not enough data points or
where derivatives are not available, such as in the first and last
segment)
Notes
-----
This is based on :class:`scipy.interpolate.PiecewisePolynomial`.
"""
def __init__(self, max_degree=3):
ScatterFit.__init__(self)
self.max_degree = max_degree
self._poly = None
def fit(self, x, y):
"""Fit piecewise polynomial to data.
Parameters
----------
x : array-like, shape (N,)
Known input values as a 1-D numpy array or sequence
y : array-like, shape (N,)
Known output values as a 1-D numpy array, or sequence
Returns
-------
self : :class:`PiecewisePolynomial1DFit` object
Reference to self, to allow chaining of method calls
Raises
------
ValueError
If *x* contains duplicate values, which leads to infinite gradients
"""
# Upcast x and y to doubles, to ensure a high enough precision
# for the polynomial coefficients
x = np.atleast_1d(np.array(x, dtype='double'))
# Only upcast y if numerical interpolation will actually happen -
# since stepwise interpolation simply copies y values, this allows
# interpolation of non-numeric types (e.g. strings)
if (len(x) == 1) or (self.max_degree == 0):
y = np.atleast_1d(y)
else:
y = np.atleast_1d(np.array(y, dtype='double'))
# Sort x in ascending order, as PiecewisePolynomial expects sorted data
x_ind = np.argsort(x)
x, y = x[x_ind], y[x_ind]
# This list will contain y values and corresponding derivatives
y_list = np.atleast_2d(y).transpose().tolist()
if np.any( | np.diff(x) | numpy.diff |
"""
render_lt_text is inspired by the example code of FreeType-py
* https://github.com/rougier/freetype-py/tree/master/examples
freetype-py is licensed under the terms of the new or revised BSD license, as
follows:
Copyright (c) 2011-2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the freetype-py Development Team nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The example code demonstrates how freetype-py works, it is then
adapted by Haozhe in order to enable various pre-rasterization
transformations.
"""
import math
import numpy as np
from freetype import *
from PIL import Image
from numba import jit
_lt_keywords = ["rotation", "shear_x", "shear_y", "scale_x", "scale_y"]
_lt_keywords += ["alpha", "beta", "gamma", "delta"]
_default_1_keywords = {"scale_x": 3, "scale_y": 4, "alpha": 5, "delta": 8}
_warning_square_layout = "Set margins[0] + margins[2] == margins[1] + margins[3] "
_warning_square_layout += "if you want to ensure square layout."
def param_dict2list(param_dict):
'''
param_dict: dict
'''
if param_dict is None or len(param_dict) == 0:
return [1, 0, 0, 1]
# normalize keywords
for keyword in param_dict:
if len(keyword) >= 2 and keyword[-1] == "_" and keyword[-2] != "_":
param_dict[keyword[:-1]] = param_dict.pop(keyword)
res = []
if "a" in param_dict or "b" in param_dict or "d" in param_dict or "e" in param_dict:
# low level parameterization
for keyword in ["a", "b", "d", "e"]:
if keyword in param_dict:
res.append(param_dict[keyword])
elif keyword in ["a", "e"]:
res.append(1)
else:
res.append(0)
else:
# high level parameterization
for keyword in _lt_keywords:
if keyword in param_dict:
res.append(param_dict[keyword])
else:
res.append(0)
for key, value in _default_1_keywords.items():
if key not in param_dict:
res[value] = 1
return res
def linear_transform_parameter_formatter(rotation=0,
shear_x=0,
shear_y=0,
scale_x=1,
scale_y=1,
alpha_=1,
beta_=0,
gamma_=0,
delta_=1):
'''
rotation : float
Rotation in degrees
'''
# convert degree to radian
rotation = (rotation / 180.0) * math.pi
cos = math.cos(rotation)
sin = math.sin(rotation)
a_ = scale_x * alpha_ * cos + scale_x * gamma_ * shear_x * cos
a_ += - scale_x * alpha_ * shear_y * sin - scale_x * gamma_ * sin
b_ = scale_y * beta_ * cos + scale_y * delta_ * shear_x * cos
b_ += - scale_y * beta_ * shear_y * sin - scale_y * delta_ * sin
d_ = scale_x * alpha_ * sin + scale_x * gamma_ * shear_x * sin
d_ += scale_x * alpha_ * shear_y * cos + scale_x * gamma_ * cos
e_ = scale_y * beta_ * sin + scale_y * delta_ * shear_x * sin
e_ += scale_y * beta_ * shear_y * cos + scale_y * delta_ * cos
return a_, b_, d_, e_
@jit(nopython=True)
def _fill_data(bitmap_buffer, rows, width, pitch):
data = []
for j in range(rows):
data.extend(bitmap_buffer[j * pitch : j * pitch + width])
return data
def render_lt_text(text, font_file_path, transform_param=None,
font_size=192, font_weight=None, stroke_radius=None,
pre_elastic=None, stretch_ascender=None,
stretch_descender=None):
'''
render initial text (after pre-rasterization transformation)
Parameters:
-----------
text : string
Text to be displayed
filename : string
Path to a font
transform_param: tuple
linear transformation parameters, 4 real numbers
size : int
Font size in 1/64th points
pre_elastic: float in the range [0, 1)
Pre-rasterization elastic transformation.
The actual elastic variation range depends on the product of
the range of original anchor points and the value of pre_elastic.
If this value is too high, the visual effect may not be good.
It is recommended that pre_elastic <= 0.04.
pre_elastic=0.1 can already lead to unrecognizable text.
stretch_ascender: int, positive or negative.
The effect of hundreds is sometimes invisible.
stretch_descender: int, positive or negative.
The effect of hundreds is sometimes invisible.
'''
if isinstance(transform_param, dict):
transform_param = param_dict2list(transform_param)
if transform_param is None:
# identity transform
a_, b_, d_, e_ = 1, 0, 0, 1
elif not hasattr(transform_param, "__len__"):
# rotation only
a_, b_, d_, e_ = linear_transform_parameter_formatter(transform_param, 0, 0, 1, 1)
elif len(transform_param) == 0:
# identity transform
a_, b_, d_, e_ = 1, 0, 0, 1
elif len(transform_param) == 1:
# rotation only
a_, b_, d_, e_ = linear_transform_parameter_formatter(transform_param[0], 0, 0, 1, 1)
elif len(transform_param) == 4:
# a_, b_, d_, e_, low level parameterization
a_, b_, d_, e_ = transform_param
elif len(transform_param) == 5 or len(transform_param) == 9:
# rotation, shear_x, shear_y, scale_x, scale_y, alpha_, beta_, gamma_, delta_,
# high level parameterization
a_, b_, d_, e_ = linear_transform_parameter_formatter(*transform_param)
else:
raise Exception("Wrong transform parameter format.")
assert a_ * e_ - b_ * d_ != 0, "Transform is not invertible."
manipulate_anchor_points = False
if pre_elastic is not None:
assert (isinstance(pre_elastic, float) or pre_elastic == 0) \
and pre_elastic >= 0 and pre_elastic < 1
manipulate_anchor_points = True
if stretch_ascender is not None:
assert isinstance(stretch_ascender, int)
manipulate_anchor_points = True
if stretch_descender is not None:
assert isinstance(stretch_descender, int)
manipulate_anchor_points = True
face = Face(font_file_path)
# 64 == 2^6, 26.6 fixed-point format
face.set_char_size(font_size * 64)
# set font weight (stroke width) for variable fonts
if font_weight is not None and face.has_multiple_masters:
face.set_var_design_coords((font_weight,))
# 0x10000 == 65536 == 2^16, 16.16 fixed-point format
matrix = FT_Matrix(int(a_ * 65536), int(b_ * 65536),
int(d_ * 65536), int(e_ * 65536))
pen = FT_Vector(0, 0)
FT_Set_Transform(face._FT_Face, byref(matrix), byref(pen))
flags = FT_LOAD_NO_BITMAP # FT_LOAD_RENDER
previous = 0
xmin, xmax = 0, 0
ymin, ymax = 0, 0
delta_xy = []
# First pass to compute bbox
for idx, c in enumerate(text):
face.load_char(c, flags)
if pre_elastic:
pxmin, pymin, pxmax, pymax, = get_anchor_point_statistics(c, face)
elastic_limit = int((pxmax - pxmin + pymax - pymin) / 2 * pre_elastic)
if manipulate_anchor_points:
delta_xy.append([])
for i in range(face.glyph.outline._FT_Outline.n_points):
x_delta, y_delta = 0, 0
if stretch_ascender:
# stretch ascender
if face.glyph.outline._FT_Outline.points[i].y >= face.ascender:
y_delta += stretch_ascender
if stretch_descender:
# stretch descender
if face.glyph.outline._FT_Outline.points[i].y <= face.descender:
y_delta -= stretch_descender
if pre_elastic:
# pre-rasterization elastic transformation
x_delta += np.random.randint(- elastic_limit, elastic_limit + 1)
y_delta += np.random.randint(- elastic_limit, elastic_limit + 1)
face.glyph.outline._FT_Outline.points[i].x += x_delta
face.glyph.outline._FT_Outline.points[i].y += y_delta
delta_xy[idx].append((x_delta, y_delta))
ret = FT_Render_Glyph(face.glyph._FT_GlyphSlot, FT_RENDER_MODE_NORMAL)
assert ret == 0, """Cannot render the transformed glyph. Please decrease
the value of pre-rasterization transformation."""
kerning = face.get_kerning(previous, c)
previous = c
pitch = face.glyph.bitmap.pitch
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
top = face.glyph.bitmap_top
left = face.glyph.bitmap_left
pen.x += kerning.x
x0 = (pen.x >> 6) + left
x1 = x0 + width
y0 = (pen.y >> 6) - (rows - top)
y1 = y0 + rows
xmin, xmax = min(xmin, x0), max(xmax, x1)
ymin, ymax = min(ymin, y0), max(ymax, y1)
pen.x += face.glyph.advance.x
pen.y += face.glyph.advance.y
L = np.zeros((ymax - ymin, xmax - xmin),dtype=np.ubyte)
previous = 0
pen.x, pen.y = 0, 0
# Second pass for actual rendering
for idx, c in enumerate(text):
face.load_char(c, flags)
if manipulate_anchor_points:
for i in range(face.glyph.outline._FT_Outline.n_points):
face.glyph.outline._FT_Outline.points[i].x += delta_xy[idx][i][0]
face.glyph.outline._FT_Outline.points[i].y += delta_xy[idx][i][1]
ret = FT_Render_Glyph(face.glyph._FT_GlyphSlot, FT_RENDER_MODE_NORMAL)
assert ret == 0, """Cannot render the transformed glyph. Please decrease
the value of pre-rasterization transformation."""
kerning = face.get_kerning(previous, c)
previous = c
bitmap = face.glyph.bitmap
pitch = face.glyph.bitmap.pitch
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
top = face.glyph.bitmap_top
left = face.glyph.bitmap_left
pen.x += kerning.x
x = (pen.x >> 6) - xmin + left
y = (pen.y >> 6) - ymin - (rows - top)
data = _fill_data( | np.array(bitmap.buffer) | numpy.array |
'''
###############################################################################
FIT POLYNOMIAL MODULE
###############################################################################
This module contains the following functions:
Polynomial fits
---------------
> fit_poly(x,y)
> fit_polynomial(data)
> polyfit2d(x, y, z, deg)
> polyfit2sep(x, y, z)
Polynomial phase fits
---------------------
> fit_phase(w,Ew)
> fit_marginal_phase(X,Y,Z)
> polyfit2phase(Wi,Ws,Fk)
> weighted_polyfit2phase(Wi,Ws,Fk)
'''
import numpy as np
import numpy.polynomial.polynomial as poly
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit, least_squares
from skimage.restoration import unwrap_phase
from .functions import *
from .stats import *
from .fitGaussian import *
'''
-------------------------------------------------------------------------------
Polynomial fits
-------------------------------------------------------------------------------
'''
def fit_poly(x,y,
p0=None,fit='poly3'):
'''
Fit the spectral phase of a complex field Ew to a polynomial of degree
2 or 3.
Parameters
----------
x:ndarray
y:ndarray
p0: ndarray,optional
Initial guess for fit parameters.
fit: {'poly3','poly2'}, optional
Specify the degree of the polynomial to fit to.
Returns
----------
out:ndarray
Optimal fit parameters.
'''
if fit=='poly3':
poly=poly3
elif fit=='poly2':
poly=poly2
elif fit=='poly1':
poly=poly1
popt,_=curve_fit(poly,x,y,p0=p0)
return popt
def fit_polynomial(data,
p0=None):
n=np.shape(data)[0]
if n==2:
x,y=data
res=least_squares(poly_cost,p0,args=(x,y))
return res
elif n==3:
X,Y,Z=data
res=least_squares(poly2D_cost,p0,args=(X.reshape(-1),Y.reshape(-1),Z.reshape(-1)))
return res
def polyfit2d(x, y, z, deg):
'''
Least-squares fit of a 2D polynomial to data. Uses Vandermonde
matrices.
Return the coefficients of a polynomial of degree deg that is the
least squares fit to the data values z given at points (x,y). Similar
to numpy.polynomial.polynomial.polyfit but for 2D polynomials.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample (data) points (x[i], y[i], z[i]).
y : array_like, shape (M,)
y-coordinates of the M sample (data) points (x[i], y[i], z[i]).
z: array_like, shape (M,)
z-coordinates of the sample (data) points (x[i], y[i], z[i]).
deg : 1-D array_like
Degree(s) of the fitting polynomials.
Returns
----------
coef : ndarray, shape (deg[0] + 1, deg[1] +1)
Polynomial coefficients ordered from low to high.
'''
#DATA
x = np.asarray(x)
y = np.asarray(y)
z = np.asarray(z)
#Degrees of the polynomial
deg = np.asarray(deg)
vander = poly.polyvander2d(x, y, deg)
vander = vander.reshape((-1,vander.shape[-1]))
z = z.reshape((vander.shape[0],))
c, r, rank, s = np.linalg.lstsq(vander, z,rcond=None)
return c.reshape(deg+1),r,rank,s
def polyfit2sep(x,y,z,
deg=3):
'''
Least-squares fit of a 2D separable polynomial. Fit to a specific
polynomial.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample (data) points (x[i], y[i], z[i]).
y : array_like, shape (M,)
y-coordinates of the M sample (data) points (x[i], y[i], z[i]).
z: array_like, shape (M,)
z-coordinates of the sample (data) points (x[i], y[i], z[i]).
'''
p=np.zeros((4,4))
B = z
#A = np.array([x*0+1, x, x**2, x**3, y ,y**2, y**3, x*y]).T
#Deg 2
if deg==2:
A = np.array([x*0+1, x, x**2, y ,y**2]).T
c, r, rank, s = np.linalg.lstsq(A, B,rcond=None)
#Create polynomial
p[:,0]=[c[0],c[1],c[2],0]
p[0,:]=[c[0],c[3],c[4],0]
#Deg 3
elif deg==3:
A = np.array([x*0+1, x, x**2, x**3, y ,y**2, y**3]).T
c, r, rank, s = np.linalg.lstsq(A, B,rcond=None)
#Create polynomial
p[:,0]=[c[0],c[1],c[2],c[3]]
p[0,:]=[c[0],c[4],c[5],c[6]]
return p, r, rank, s
'''
-------------------------------------------------------------------------------
Polynomial phase fits
-------------------------------------------------------------------------------
'''
def fit_phase(w,Ew,
fit='poly',p0=None,deg=3,cutoff=None,xlim=[],plot=False):
'''
Fit the spectral phase of a complex field Ew to a polynomial of the
specified degree.
Parameters
----------
w: ndarray
Frequency 1D array.
Ew: complex ndarray
Complex field amplitude.
fit:{'poly','poly3','poly2'}
Polynomial fitting function.
cutoff: float, optional
Cutoff for fit in percentage of field intensity.
xlim: [xmin,xmax], optional
Bounds for fitting.
Returns
----------
out:ndarray
Array of optimal fit parameters.
'''
phase=np.unwrap(np.angle(Ew))
if ((cutoff!=None) & (len(xlim)!=0)):
xmin,xmax=xlim
roi=np.where((np.abs(Ew)**2/np.max(np.abs(Ew)**2)>cutoff) &(w>xmin) & (w<xmax))
elif cutoff!=None:
roi=np.where(np.abs(Ew)**2/np.max(np.abs(Ew)**2)>cutoff)
elif len(xlim)!=0:
xmin,xmax=xlim
roi=np.where((w>xmin) & (w<xmax))
else:
roi=np.where(np.abs(Ew)**2)
if fit=='poly3':
popt,_=curve_fit(poly3,w[roi],phase[roi],p0=p0)
elif fit=='poly2':
popt,_=curve_fit(poly2,w[roi],phase[roi],p0=p0)
elif fit=='poly':
fitg=fit_gaussian((w,np.abs(Ew)**2))
w0=fitg.x[1]
popt=poly.polyfit(w[roi]-w0,phase[roi],deg)
if plot==True:
fig,ax=plt.subplots(1,1,figsize=(5,4))
ax.plot(w,np.abs(Ew)**2)
ax2=ax.twinx()
ax2.plot(w,np.unwrap(np.angle(Ew)),'--')
if fit!='poly':
ax2.plot(w[roi],fpoly(w[roi],*popt))
else:
ax2.plot(w[roi],poly.polyval(w[roi]-w0,popt))
ax.set_title(r'$E(\omega)$')
ax.set_xlabel(r'Frequency (fs$^-1$)')
ax.set_ylabel(r'Amplitude')
ax2.set_ylabel(r'Phase')
plt.tight_layout()
plt.show()
print('Optimal parameters: ',popt)
return popt
def fit_marginal_phase(X,Y,Z,
index=[],tol=0.1,p0=[None,None],plots=True,fits=True):
#phase=unwrap_phase(np.angle(Z))
phase=unwrap_phase(np.angle(Z))
phase[np.where(np.abs(Z)**2/np.max(np.abs(Z)**2)<=tol)]=0
#phase=unwrap_phase(phase)
#phase=Z
#Average non-zero elements
xm=X[0,:]
ym=Y[:,0]
#zmx=unwrap(angle.sum(0)/((angle!=0).sum(0)+0.001))
#zmx=unwrap(angle.sum(0))#/((angle!=0).sum(0)+0.001)
#zm=(angle.sum(0))#/((angle!=0).sum(0)+0.001)
#zm=np.unwrap(angle).sum(0)/((angle!=0).sum(0)+0.001)
#zmy=np.unwrap(angle.sum(1))#/((angle!=0).sum(0)+0.001)
#zmx=np.unwrap(np.unwrap(angle,axis=0).sum(0))#/((angle!=0).sum(0)+0.001)
#zmy=np.unwrap(np.unwrap(angle,axis=1).sum(1))#/((angle!=0).sum(0)+0.001)
zmx=np.trapz(phase,x=ym,axis=0)/(ym.max()-ym.min())
zmy=np.trapz(phase,x=xm,axis=1)/(xm.max()-xm.min())
zmx=np.trapz(phase,x=ym,axis=0)/(np.diff(ym)[0]*(phase!=0).sum(0))
zmy=np.trapz(phase,x=xm,axis=1)/(np.diff(xm)[0]*(phase!=0).sum(1))
zmx[np.where(np.isnan(zmx))]=0
zmy[np.where(np.isnan(zmy))]=0
roix=np.where(zmx)
roiy=np.where(zmy)
#zmx=phase.sum(0)
#zmy=phase.sum(1)
if len(index)==4:
xmin,xmax,ymin,ymax=index
else:
xmin,xmax,ymin,ymax=0,None,0,None
if fits:
px,_=curve_fit(poly3,xm[roix][xmin:xmax],zmx[roix][xmin:xmax],p0[0])
py,_=curve_fit(poly3,ym[roiy][ymin:ymax],zmy[roiy][ymin:ymax],p0[1])
if plots:
fig,ax=plt.subplots(1,3,figsize=(12,4))
S=ax[0].pcolormesh(X,Y,phase)
ax[1].plot(xm,zmx,'.C1')
ax[2].plot(ym,zmy,'.C1')
if fits:
ax[1].plot(xm[roix][xmin:xmax],poly3(xm[roix][xmin:xmax],*px),'C1')
ax[2].plot(ym[roiy][ymin:ymax],poly3(ym[roiy][ymin:ymax],*py),'C1')
ax[0].set_xlabel(r'$\omega_i$',fontsize=18)
ax[0].set_ylabel(r'$\omega_s$',fontsize=18)
ax[1].set_xlabel(r'$\omega_i$',fontsize=18)
ax[1].set_ylabel(r'$\phi_i$',fontsize=18)
ax[2].set_xlabel(r'$\omega_s$',fontsize=18)
ax[2].set_ylabel(r'$\phi_s$',fontsize=18)
fig.colorbar(S,ax=ax[0])
plt.tight_layout()
plt.show()
if fits:
print(px)
print(py)
if fits:
return(px,py)
def polyfit2phase(Wi,Ws,Fk,
tol=0.1,plot=True,unwrap=True,origin=0,deg=3,method='sep'):
#Fk=Fk/np.max(np.abs(Fk)**2)
nroi=np.where(np.abs(Fk)**2/np.max(np.abs(Fk)**2)<tol)
roi=np.where(np.abs(Fk)**2/np.max(np.abs(Fk)**2)>=tol)
if unwrap:
phase=unwrap_phase(np.angle(Fk))
else:
phase=(np.angle(Fk))
phase0=phase.copy()
phase0[nroi]=0
if origin:
wi0,ws0=origin
else:
Ik=np.abs(Fk)**2/np.max(np.abs(Fk)**2)
wi0,ws0=fit_gaussian((Wi,Ws,Ik+0.001*np.max(Ik))).x[1:3]
if method=='sep':
coeff,cost,_,_=polyfit2sep(Wi[roi]-wi0,Ws[roi]-ws0,phase[roi],deg=deg)
elif method=='full':
coeff,cost,_,_=polyfit2d(Wi[roi]-wi0,Ws[roi]-ws0,phase[roi],deg=[deg,deg])
fc=poly.polyval2d(Wi-wi0,Ws-ws0,coeff)
fc[nroi]=0
if plot:
fig,ax=plt.subplots(1,2,figsize=(9,4))
S1=ax[0].pcolormesh(Wi-wi0,Ws-ws0,phase0)
S2=ax[1].pcolormesh(Wi-wi0,Ws-ws0,fc)
ax[0].set_title('Reconstructed')
ax[1].set_title('Fit')
fig.colorbar(S1,ax=ax[0])
fig.colorbar(S2,ax=ax[1])
plt.tight_layout()
plt.show()
print('Cost\n',cost[0],'\nCentre\n',wi0,ws0,'\nCoeff\n',coeff)
return coeff
def weighted_polyfit2phase(Wi,Ws,Fk,
p0=[],plots=False):
weights=np.abs(Fk)**2
phase=unwrap_phase(np.angle(Fk))
weighted_phase=weights*phase
wi0,ws0=fit_gaussian((Wi,Ws,np.abs(Fk)**2)).x[1:3]
if len(p0)==0:
p0= | np.random.rand(1,7) | numpy.random.rand |
import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
from shutil import copyfile
import fortranformat as ff
from itertools import zip_longest
from scipy.signal import argrelextrema, argrelmin
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from ast import literal_eval as make_tuple
import pyshtools
from scipy.io import loadmat
from pathlib import Path
from scipy.special import lpmn
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import copy
import cartopy.feature as cfeature
"""
author: <NAME>
contact: <EMAIL>
description: A scipt containing tools to post-process the orbits, obtained from a forward simulation (and also recovery), from epos-oc.
"""
def create_element_lines(ffp, splitstring):
#get titles
with open(ffp) as f:
LINES = f.readlines()
starts = []
for i,line in enumerate(LINES):
if line.startswith(splitstring):
starts.append(i)
ends=[]
for i in range(len(starts)):
ends.append(starts[i]+16)
blocks = list(zip(starts,ends))
format_float = ff.FortranRecordWriter('(E19.13)')
for block in blocks:
with open(ffp) as fp:
for i, line in enumerate(fp):
if i in range(block[0],block[1]):
if i==block[0]:
outfile = open('%s_ELEMENTSnew.txt' %line.strip(),'w')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-C\n')
if i>block[0]+1:
if line.startswith('Sat'):
outfile.write(' --- End initial elements GRACE-C\n')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-D\n')
if line.startswith('ELEMENT'):
val = line.strip().split()
val[5] = str(format_float.write([np.float(val[5])])).replace('E','e')
val[6] = str(format_float.write([np.float(val[6])])).replace('E', 'e')
if val[7] == '0201201': val[7] = '1804701'
if val[7] == '0201202': val[7] = '1804702'
str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') \
% (val[0], val[1], int(val[2]), int(val[3]),
int(val[4]), val[5], val[6], val[7])
outfile.write('%s\n' %str_new2)
if i==block[1]-1:
outfile.write(' --- End initial elements GRACE-D')
break
#
#
# def create_element_lines(ffp, splitstring):
# #input: Unformatted file that contains orbit elements needed to start each of the runs for GRACE-FO simulation
# #output: Orbit elements that can be used as input for prepare_EPOSIN_4_orbit_integration.sh (located at
# #/GFZ/project/f_grace/NGGM_SIM/SIM_FORWARD )
# with open(ffp) as f:
# lines = f.read().splitlines()
# splits = [i for i in lines if i.startswith(splitstring)]
# print(splits)
# n = 2 # group size
# m = 1 # overlap size
# splits_grouped = [splits[i:i + n] for i in range(0, len(splits), n - m)]
# print(splits_grouped)
#
#
# # # print(lines)
# # split = [i for i in lines if i.startswith('PP')]
# for i in splits_grouped:
# if len(i) > 1:
# start = i[0]
# end = i[1]
# out = '%s_ELEMENT_lines.txt' % (start.strip())
# with open(ffp) as infile, open(out, 'w') as outfile:
# copy = False
# titlewritten0 = False
# titlewritten1 = False
# firsttime6 = False
# linesread = 0
# outfile.write("\n")
#
# for line in infile:
# if line.strip() == start.strip():
# copy = True
# continue
# elif line.strip() == end.strip():
# copy = False
# continue
# elif copy:
# linesread += 1
#
# if not titlewritten0:
# outfile.write(' --- Begin initial elements GRACE-C\n')
# titlewritten0 = True
# if line.startswith(
# 'ELEMENT') and titlewritten0: # if line starts with ELEMENT and the first title has been written
# val = list(filter(None, line.strip().split(' ')))[0:-3]
# format_float = ff.FortranRecordWriter('(E19.13)')
# val5 = str(format_float.write([np.float(val[5])]))
# val6 = str(format_float.write([np.float(val[6])]))
#
# val5 = val5.replace('E', 'e')
# val6 = val6.replace('E', 'e')
#
#
# if val[7] == '0201201': val[7] = '1804701'
# if val[7] == '0201202': val[7] = '1804702'
# str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') % (val[0], val[1], int(val[2]), int(val[3]), int(val[4]), val5, val6, val[7])
#
#
# # outfile.write("\n")
# if int(val[2]) < 6:
# outfile.write(str_new2)
# outfile.write("\n")
#
# if int(val[
# 2]) == 6 and not titlewritten1: # if element six has been reached and no 'end1' has been written yet:
# if not firsttime6:
# titlewritten1 = True
# # titlewritten2 = True
# outfile.write(str_new2)
# outfile.write("\n")
# outfile.write(' --- End initial elements GRACE-C\n\n')
# outfile.write(' --- Begin initial elements GRACE-D\n')
#
# if int(val[2]) == 6:
# print(linesread)
# if linesread > 7:
# outfile.write(str_new2)
# outfile.write("\n")
#
# outfile.write(' --- End initial elements GRACE-D')
# outfile.write("\n")
# outfile.write('\n')
# outfile.close()
# infile.close()
def files(path):
#input: path to a directory
#output: files within the directory (omitting nested directories)
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
def create_case_directories(fp, fp_out):
#function to prepare the case directories for each of the simulations specified for the GRACE-FO project.
#It will
element_files = []
# current_dir = os.path.dirname(__file__)
for file in files(fp):
element_files.append(file)
IDs = ['PP.1', 'PP.2']
altitudes = [490, 490]
extens = [0, 0]
angles = [89, 89]
seperations = [200, 100]
repeats = [30, 30]
simdirs = ['FD', 'FD']
df = pd.DataFrame(columns=['id', 'altitude', 'extens', 'seperation', 'repeatvals', 'sim_direction'])
df['id'] = IDs
df['altitude'] = altitudes
df['angles'] = angles
df['extens'] = extens
df['seperation'] = seperations
df['repeatvals'] = repeats
df['sim_direction'] = simdirs
df.set_index('id', inplace=True)
for idx in df.index:
dirname = '%s_%i_%i_%i_%i_%id_%s' % (idx, df.loc[idx].altitude,
df.loc[idx].angles,
df.loc[idx].z,
df.loc[idx].seperation,
df.loc[idx].repeatvals,
df.loc[idx].sim_direction
)
if not os.path.exists(dirname):
os.mkdir(dirname)
ef = [f for f in element_files if f.startswith(idx)][0]
dst = os.path.abspath(fp, dirname, 'ELEMENT_lines')
src = os.path.abspath(os.path.join(os.path.dirname(__file__), ef))
copyfile(src, dst)
def serial_date_to_string(srl_no):
new_date = datetime.datetime(2000,1,1) + datetime.timedelta(srl_no+1)
return new_date.strftime("%Y-%m-%d")
def cart_2_kep_matrix(R, V, mu, Re):
# step1
h_bar = np.cross(R, V)
h = np.linalg.norm(h_bar, axis=1)
# step2
r = | np.linalg.norm(R, axis=1) | numpy.linalg.norm |
"""
eigenmodes.py is a file which contains the formula for computing the spatial eigenmodes of the cavity.
It also contains simple test functions and outputs for examining the cavity modes
For our simple example, consider spatial eigenmodes TMmnp for a rectangular cavity.
Assume that the cavity has dimensions [a,b,d]. The eigenmodes of the cavity
(corresponding to both the field eigenmodes and the vector potential eigenmodes)
are given by:
A_z(x,y,z) = C*sin(m*pi*x/a)*sin(n*pi*y/b)*cos(p*pi*z/d)
with normalization factor C = 2 np.sqrt(a*b*d) for p =/= 0.
Note that this function will assume a pre-specified single mode for now!
<NAME>
08/31/2016
"""
# cavity dimensions in meters
#a = 50. #*1e-2 #x plane
#b = 40. #1e-2 #y plane
#d = 10. #1e-2 #z plane
# Mode numbers
m = 1 # x mode
n = 1 # y mode
p = 0 # z mode
import numpy as np
from scipy.constants import c as c_mks
c = c_mks*1.e2
class RectangularModes(object):
'''Class containing rectangular mode constructors and relevant functions
In the future - this can be made into a subclass for use with cylindrical cavities, and ultimately
generalized for polynomial eigenmode expansions.
'''
def __init__(self, a,b,d):
'''Constructor for modes - needs only dimensions
Arguments:
a (float): cavity length in x-plane
b (float): cavity length in y-plane
d (float): cavity length in z-plane (presumed longitudinal direction)
'''
self.x = a
self.y = b
self.z = d
self.M = 1. / (16. * np.pi * c) * (a * b * d) #M-factor for integration of field quantities
C_base = 1.
self.C_x = lambda m, n, p: C_base * m / m
self.C_y = lambda m, n, p: C_base * (m / m) # differs by a ratio of wavenumbers (kx/ky) such that del*E = 0.
self.C_z = lambda m, n, p: C_base * m / m
def get_mode_frequencies(self,m,n,p):
'''Return mode (angular) frequencies for the cavity
Arguments:
m (int): x-plane eigenvalue
n (int): y-plane eigenvalue
p (int): z-plane eigenvalue
'''
return np.pi * c * np.sqrt((m/self.x) ** 2 + (n/self.y) ** 2 + (p/self.z) ** 2)
def get_mode_wavenumbers(self,m,n,p):
'''Return mode wavenumbers for the modes
Arguments:
m (int): x-plane eigenvalue
n (int): y-plane eigenvalue
p (int): z-plane eigenvalue
'''
#ks = np.zeros()
kxs = 1.0 * m * np.pi / self.x
kys = 1.0 * n * np.pi / self.y
kzs = 1.0 * p * np.pi / self.z
return np.asarray([kxs,kys,kzs])
def get_mode_Ms(self,m,n,p):
'''Return mode Ml quantities for the modes
Arguments:
m (int): x-plane eigenvalue
n (int): y-plane eigenvalue
p (int): z-plane eigenvalue
'''
return self.M*np.ones(len(m))
def get_mode_Ks(self,m,n,p):
'''Return mode Ml quantities for the modes
Arguments:
m (int): x-plane eigenvalue
n (int): y-plane eigenvalue
p (int): z-plane eigenvalue
'''
return self.Ml*(m**2 + n**2)
def calc_A_x(self, kx, ky, kz, x, y, z):
'''
Returns an LxN array of A_x for L modes evaluated at N particle positions.
Arguments:
ks (ndArray): Lx3 array of wavenumbers
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
A_x (ndArray): An LxN array of values
'''
kxx = np.einsum('i,j->ij', kx, x)
kyy = np.einsum('i,j->ij', ky, y)
kzz = np.einsum('i,j->ij', kz, z)
product = np.cos(kxx) * np.sin(kyy) * np.sin(kzz)
return np.einsum('i,ij->ij', self.C_x(kx, ky, kz), product)
def calc_A_y(self, kx, ky, kz, x, y, z):
'''
Returns an LxN array of A_y for L modes evaluated at N particle positions.
Arguments:
ks (ndArray): Lx3 array of wavenumbers
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
A_y (ndArray): An LxN array of values
'''
kxx = np.einsum('i,j->ij', kx, x)
kyy = np.einsum('i,j->ij', ky, y)
kzz = np.einsum('i,j->ij', kz, z)
product = np.sin(kxx) * np.cos(kyy) * np.sin(kzz)
return np.einsum('i,ij->ij',self.C_y(kx, ky, kz), product)
def calc_A_z(self, kx, ky, kz, x, y, z):
'''
Returns an LxN array of A_x for L modes evaluated at N particle positions.
Arguments:
ks (ndArray): Lx3 array of wavenumbers
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
A_z (ndArray): An LxN array of values
'''
kxx = np.einsum('i,j->ij', kx, x)
kyy = np.einsum('i,j->ij', ky, y)
kzz = np.einsum('i,j->ij', kz, z)
product = np.sin(kxx) * np.sin(kyy) * | np.cos(kzz) | numpy.cos |
import os
import shutil
import sys
import time
import multiprocessing
import numpy as np
import json
from collections import namedtuple
import pickle
import zmq
import pyproj
import psutil
import struct
import concurrent.futures
import argparse
from py3dtiles.points.transformations import rotation_matrix, angle_between_vectors, vector_product, inverse_matrix, scale_matrix, translation_matrix
from py3dtiles.points.utils import compute_spacing, name_to_filename
from py3dtiles.points.node import Node
from py3dtiles import TileReader
from py3dtiles.points.shared_node_store import SharedNodeStore
import py3dtiles.points.task.las_reader as las_reader
import py3dtiles.points.task.node_process as node_process
import py3dtiles.points.task.pnts_writer as pnts_writer
def write_tileset(in_folder, out_folder, octree_metadata, offset, scale, projection, rotation_matrix, include_rgb):
# compute tile transform matrix
if rotation_matrix is None:
transform = | np.identity(4) | numpy.identity |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import warnings
import pytest
import numpy as np
from copy import deepcopy
import os
import sys
import shutil
from scipy import constants, interpolate
from pyuvdata import UVCal, UVData
from hera_sim.interpolators import Beam
from hera_sim import DATA_PATH as HS_DATA_PATH
from hera_sim import noise
from uvtools import dspec
from hera_cal import io, datacontainer
from hera_cal import vis_clean
from hera_cal.vis_clean import VisClean
from hera_cal.data import DATA_PATH
from hera_cal import frf
import glob
import copy
# test flagging utility funtions
def test_truncate_flagged_edges():
Nfreqs = 64
Ntimes = 60
data_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in = np.abs(data_in).astype(float)
data_in = data_in + .3j * data_in
# flag channel 30
weights_in[:, 30] = 0.
# flag last channel
weights_in[:, -1] = 0.
# flag last two integrations
weights_in[-2:, :] = 0.
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
# test freq truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, freqs, ax='freq')
assert np.all(np.isclose(xout, freqs[:-1]))
assert np.all(np.isclose(dout, data_in[:, :-1]))
assert np.all(np.isclose(wout, weights_in[:, :-1]))
assert edges == [(0, 1)]
# test time truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, times, ax='time')
assert np.all(np.isclose(xout, times[:-2]))
assert np.all(np.isclose(dout, data_in[:-2, :]))
assert np.all(np.isclose(wout, weights_in[:-2, :]))
assert edges == [(0, 2)]
# test truncating both.
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, (times, freqs), ax='both')
assert np.all(np.isclose(xout[0], times[:-2]))
assert np.all(np.isclose(xout[1], freqs[:-1]))
assert np.all(np.isclose(dout, data_in[:-2, :-1]))
assert np.all(np.isclose(wout, weights_in[:-2, :-1]))
assert edges == [[(0, 2)], [(0, 1)]]
def test_restore_flagged_edges():
Nfreqs = 64
Ntimes = 60
data_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in = np.abs(data_in).astype(float)
data_in = data_in + .3j * data_in
# flag channel 30
weights_in[:, 30] = 0.
# flag last channel
weights_in[:, -1] = 0.
# flag last two integrations
weights_in[-2:, :] = 0.
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
# test freq truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, freqs, ax='freq')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges)
assert np.allclose(weights_in[:, :-1], wrest[:, :-1])
assert np.allclose(wrest[:, -1], 0.0)
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, times, ax='time')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges, ax='time')
assert np.allclose(wout, wrest[:-2, :])
assert np.allclose(wrest[-2:, :], 0.0)
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, (times, freqs), ax='both')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges, ax='both')
assert np.allclose(wrest[-2:, :], 0.0)
assert np.allclose(wrest[:, -1], 0.0)
assert np.allclose(wout, wrest[:-2, :-1])
def test_find_discontinuity_edges():
assert vis_clean.find_discontinuity_edges([0, 1, 4, 9]) == [(0, 2), (2, 3), (3, 4)]
assert vis_clean.find_discontinuity_edges([0, 1, 2, 4, 5, 6, 7, 9, 11, 12]) == [(0, 3), (3, 7), (7, 8), (8, 10)]
def test_flag_rows_with_flags_within_edge_distance():
Nfreqs = 64
Ntimes = 60
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in[32, 2] = 0.
weights_in[33, 12] = 0.
weights_in[2, 30] = 0.
weights_in[-10, 20] = 0.
freqs = np.arange(Nfreqs) * 100e3
# under the above flagging pattern
# freq flagging with min_flag_edge_distance=2 yields 32nd integration flagged only.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='freq')
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# extending edge_distance to 12 should yield 33rd integration being flagged as well.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=13, ax='freq')
for i in range(wout.shape[0]):
if i == 32 or i == 33:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# now do time axis. 30th channel should be flagged for this case.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='time')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# 30th and 20th channels should end up flagged for this case.
times = np.arange(Ntimes) * 10.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(times, weights_in, min_flag_edge_distance=11, ax='time')
for i in range(wout.shape[1]):
if i == 30 or i == 20:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# now do both
wout = vis_clean.flag_rows_with_flags_within_edge_distance([times, freqs], weights_in, min_flag_edge_distance=(3, 3), ax='both')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
def test_flag_rows_with_flags_within_edge_distance_with_breaks():
Nfreqs = 64
Ntimes = 60
freqs = np.hstack([np.arange(23), 30 + np.arange(24), 58 + np.arange(17)]) * 100e3 + 150e6 # freq axis with discontinuities at 23 and 47 integrations.
times = np.hstack([np.arange(20) * 11., 41 * 11. + np.arange(27) * 11., 200 * 11. + np.arange(13) * 11.]) # time axis with discontinuities at 29 abd 47 integrations
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
# frequency direction and time direction separately.
weights_in[2, 30] = 0. # time 2 should not get flagged
weights_in[21, 48] = 0. # time 21 should get flagged
weights_in[55, 46] = 0. # time 55 should get flagged
weights_in[25, -2] = 0. # time 25 should get flagged
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='freq')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=1))[0]) == [21, 25, 55]
weights_in[22, 30] = 0. # channel 30 should be flagged
# channel 48 will also be flagged.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(times, weights_in, min_flag_edge_distance=3, ax='time')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=0))[0]) == [30, 48]
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
# both directions
weights_in[22, 30] = 0. # time 2 should not get flagged
weights_in[55, 46] = 0. # time 55 should get flagged
weights_in[25, -2] = 0. # time 25 should get flagged
weights_in[22, 30] = 0. # channel 30 should be flagged
wout = vis_clean.flag_rows_with_flags_within_edge_distance([times, freqs], weights_in, min_flag_edge_distance=[2, 3], ax='both')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=0))[0]) == [30]
assert list(np.where(np.all(np.isclose(wout, 0.), axis=1))[0]) == [25, 55]
def test_flag_rows_with_contiguous_flags():
Nfreqs = 64
Ntimes = 60
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in[32, 2:12] = 0.
weights_in[35, 12:14] = 0.
weights_in[2:12, 30] = 0.
weights_in[-10:-8, 20] = 0.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=8, ax='freq')
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# extending edge_distance to 12 should yield 33rd integration being flagged as well.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=2, ax='freq')
for i in range(wout.shape[0]):
if i == 32 or i == 35:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# now do time axis. 30th channel should be flagged for this case.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=8, ax='time')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# 30th and 20th channels should end up flagged for this case.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=2, ax='time')
for i in range(wout.shape[1]):
if i == 30 or i == 20:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# now do both
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=(3, 3), ax='both')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
def test_get_max_contiguous_flag_from_filter_periods():
Nfreqs = 64
Ntimes = 60
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
filter_centers = [[0.], [0.]]
filter_half_widths = [[1 / (3. * 10)], [1 / (100e3 * 2)]]
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods(freqs, filter_centers[1], filter_half_widths[1])
assert mcf == 2
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods(times, filter_centers[0], filter_half_widths[0])
assert mcf == 3
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods((times, freqs), filter_centers, filter_half_widths)
assert tuple(mcf) == (3, 2)
# test assertion errors
pytest.raises(ValueError, vis_clean.get_max_contiguous_flag_from_filter_periods, [1.], [0.], [.5])
pytest.raises(ValueError, vis_clean.get_max_contiguous_flag_from_filter_periods, [[1.], [0.]], [[0.], [0.]], [[.5], [.5]])
def test_flag_model_rms():
Nfreqs = 64
Ntimes = 60
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
w = np.ones((Ntimes, Nfreqs), dtype=bool)
d = np.random.randn(Ntimes, Nfreqs) * 1e-3 + 1j * np.random.randn(Ntimes, Nfreqs) * 1e-3
d += np.ones_like(d) * 100
d[30, 12] = 3.12315132e6
w[30, 12] = 0.
mdl = np.ones_like(d) * 100
mdl[30, 24] = 1e6
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='freq')
for i in range(Ntimes):
if i == 30:
assert np.all(skipped[i])
else:
assert np.all(~skipped[i])
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='time')
for i in range(Ntimes):
if i == 24:
assert np.all(skipped[:, i])
else:
assert np.all(~skipped[:, i])
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='both')
for i in range(Nfreqs):
if i == 24:
assert np.all(skipped[:, i])
else:
assert ~np.all(skipped[:, i])
for i in range(Ntimes):
if i == 30:
assert np.all(skipped[i])
else:
assert ~np.all(skipped[i])
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
class Test_VisClean(object):
def test_init(self):
# test basic init
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(24, 25, 'ee')])
assert hasattr(V, 'data')
assert hasattr(V, 'antpos')
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test basic init w/ uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(13, 14, 'ee')])
assert set(V.hd.ant_1_array) == set([13])
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test input cal
fname = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
uvc = io.HERACal(os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA.abs.calfits'))
gains, _, _, _ = uvc.read()
V1 = VisClean(fname, filetype='miriad')
bl = (52, 53, 'ee')
V1.read(bls=[bl])
V2 = VisClean(fname, filetype='miriad', input_cal=uvc)
V2.read(bls=[bl])
g = gains[(bl[0], 'Jee')] * gains[(bl[1], 'Jee')].conj()
assert np.allclose((V1.data[bl] / g)[30, 30], V2.data[bl][30, 30])
V2.apply_calibration(V2.hc, unapply=True)
assert np.allclose(V1.data[bl][30, 30], V2.data[bl][30, 30], atol=1e-5)
# test soft copy
V1.hello = 'hi'
V1.hello_there = 'bye'
V1.foo = 'bar'
V3 = V1.soft_copy(references=["hello*"])
assert hex(id(V1.data[(52, 53, 'ee')])) == hex(id(V3.data[(52, 53, 'ee')]))
assert hasattr(V3, 'hello')
assert hasattr(V3, 'hello_there')
assert not hasattr(V3, 'foo')
assert V3.__class__ == VisClean
# test clear
V1.clear_containers()
assert np.all([len(getattr(V1, c)) == 0 for c in ['data', 'flags', 'nsamples']])
V2.clear_calibration()
assert not hasattr(V2, 'hc')
@pytest.mark.filterwarnings("ignore:Selected polarization values are not evenly spaced")
def test_read_write(self):
# test read data can be turned off for uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
V.read(read_data=False)
assert set(V.hd.ant_1_array) == set([1, 11, 12, 13, 14])
# test read-write-read
V.read()
V.write_data(V.data, "./ex.uvh5", overwrite=True, filetype='uvh5', extra_attrs=dict(vis_units='Jy'))
V2 = VisClean("./ex.uvh5", filetype='uvh5')
V2.read()
assert V2.hd.vis_units == 'Jy'
assert 'Thisfilewasproducedbythefunction' in V2.hd.history.replace('\n', '').replace(' ', '')
V.hd.history, V2.hd.history, V2.hd.vis_units = '', '', V.hd.vis_units
if hasattr(V.hd, "filename"):
# make sure filename attributes are what we're expecting
assert V.hd.filename == ["zen.2458098.43124.subband.uvh5"]
assert V2.hd.filename == ["ex.uvh5"]
V.hd.filename = V2.hd.filename
assert V.hd == V2.hd
os.remove("./ex.uvh5")
# exceptions
pytest.raises(ValueError, V.write_data, V.data, 'foo', filetype='what')
# test write on subset of data
V.read(read_data=True)
data = datacontainer.DataContainer(dict([(k, V.data[k]) for k in list(V.data.keys())[:2]]))
V.write_data(data, "ex.uvh5", overwrite=True, filetype='uvh5')
assert os.path.exists("ex.uvh5")
os.remove('ex.uvh5')
def test_fourier_filter(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# test arg errors
k = (24, 25, 'ee')
fc = [0.]
fw = [100e-9]
ff = [1e-9]
fwt = [1e-3]
assert pytest.raises(ValueError, V.fourier_filter, keys=[k], overwrite=True,
filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='height', mode='dayenu', fitting_options=None)
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='freq', mode='dayenu', output_prefix='clean', zeropad=10, overwrite=True, max_contiguous_edge_flags=20)
# this line is repeated to cover the overwrite skip
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff, max_contiguous_edge_flags=20,
ax='freq', mode='dayenu', zeropad=10, output_prefix='clean', overwrite=False)
assert np.all([V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1']])
# now do a time filter
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fwt, suppression_factors=ff, overwrite=True,
ax='time', mode='dayenu', zeropad=10, max_contiguous_edge_flags=20)
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], rtol=0., atol=atol))
# raise errors.
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff],
mode='dayenu', zeropad=0, overwrite=True)
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff], overwrite=True,
mode='dayenu', zeropad=['Mathematical Universe', 'Crazy Universe'])
# check 2d filter.
V.fourier_filter(filter_centers=[fc, fc],
filter_half_widths=[fwt, fw],
suppression_factors=[ff, ff],
mode='dayenu', overwrite=True,
zeropad=[20, 10], ax='both', max_contiguous_edge_flags=100)
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], rtol=0., atol=atol)
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dayenu(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dayenu')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
# had to set atol=1e-6 here so it won't fail on travis (it runs fine on my laptop). There are some funny
# numpy issues.
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
assert np.all([V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert pytest.raises(AssertionError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate=None, mode='dayenu')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dayenu')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dayenu')
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.)
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.)
# check whether dayenu filtering axis 1 and then axis 0 is the same as dayenu filtering axis 1 and then filtering the resid.
# note that filtering axis orders do not commute, we filter axis 1 (foregrounds) before filtering cross-talk.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, max_frate=1.0, output_prefix='clean1', mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, data=V.clean1_resid, output_prefix='clean0', mode='dayenu')
assert np.all(np.isclose(V.clean_resid[(24, 25, 'ee')], V.clean0_resid[(24, 25, 'ee')]))
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dpss(self):
# Relax atol=1e-6 for clean_data and data equalities. there may be some numerical
# issues going on. Notebook tests show that distributing minus signs has
# consequences.
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dpss_leastsq')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
assert np.all([V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert pytest.raises(AssertionError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', mode='dpss_leastsq')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dpss_leastsq')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dpss_leastsq')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert | np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.) | numpy.any |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME> - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "20/04/2017"
import numpy as np
from comsyl.mathcomsyl.Convolution import Convolution
from comsyl.utils.Logger import log
class AutocorrelationBuilderStrategy(object):
def __init__(self, x_coordinates, y_coordinates, density, field_x_coordinates, field_y_coordinates, weighted_fields):
self._x_coordinates = x_coordinates
self._y_coordinates = y_coordinates
self._density = density
self._weigthed_fields = weighted_fields
self._setActiveField(0)
self._field_tmp = np.zeros_like(self._field)
self._field_tmp_x = np.zeros(self._field.shape[0], dtype=np.complex128)
self._field_tmp_y = np.zeros(self._field.shape[1], dtype=np.complex128)
self._field_x_coordinates = field_x_coordinates
self._field_y_coordinates = field_y_coordinates
self._last_r_1_x = None
self._last_r_1_y = None
self._x_coordinates_i_min = -(self._x_coordinates.shape[0] + 1)/2
self._x_coordinates_i_max = (self._x_coordinates.shape[0] + 1)/2
self._x_coordinates_step_width = self._x_coordinates[1] - self._x_coordinates[0]
self._y_coordinates_i_min = -(self._y_coordinates.shape[0] + 1)/2
self._y_coordinates_i_max = (self._y_coordinates.shape[0] + 1)/2
self._y_coordinates_step_width = self._y_coordinates[1] - self._y_coordinates[0]
self._grid_area = self._x_coordinates_step_width * self._y_coordinates_step_width
log("Calculating density (integration part)")
self._setRhoR(self._field_x_coordinates, self._field_y_coordinates)
self._rho_phase_tmp = np.zeros_like(self._rho)
self._convolution = Convolution()
self._postInit()
def _setActiveField(self, index_field):
field = self._weigthed_fields[index_field, :, :]
self._field = field
self._field_conj = self._field.conj()
self._field_reverse = self._field[::-1, ::-1]
self._field_reverse_conj = self._field_reverse.conj()
def numberFields(self):
return self._weigthed_fields.shape[0]
def _postInit(self):
return
def evaluate(self, r_1, r_2):
raise NotImplementedError("Must implement.")
def evaluateAllR_2(self, r_1):
raise NotImplementedError("Must implement.")
def _setRhoR(self, r_x, r_y):
self._rho = np.zeros((r_x.shape[0], r_y.shape[0]), dtype=np.complex128)
for i_x, x in enumerate(r_x):
for i_y, y in enumerate(r_y):
res = self._density.integrationPartGaussian(0.0, 0.0,
x=x,
y=y)
self._rho[i_x, i_y] = res
def _setRhoPhase(self, r_x, r_y):
self._rho_phase_x = np.zeros((r_x.shape[0]), dtype=np.complex128)
self._rho_phase_y = np.zeros((r_y.shape[0]), dtype=np.complex128)
for x in r_x:
i_x = self.xIndexByCoordinate(x)
dr = | np.array([x, 0.0]) | numpy.array |
"""Test tentative interpolation."""
import numpy as np
from scipy.sparse import csr_matrix
from pyamg.aggregation.aggregation import fit_candidates
from numpy.testing import TestCase, assert_almost_equal
class TestFitCandidates(TestCase):
def setUp(self):
self.cases = []
# tests where AggOp includes all dofs
# one candidate
self.cases.append((
csr_matrix((np.ones(5), np.array([0, 0, 0, 1, 1]), np.arange(6)),
shape=(5, 2)), np.ones((5, 1))))
self.cases.append((
csr_matrix((np.ones(5), np.array([1, 1, 0, 0, 0]), np.arange(6)),
shape=(5, 2)), np.ones((5, 1))))
self.cases.append((
csr_matrix((np.ones(9), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.arange(10)),
shape=(9, 3)), np.ones((9, 1))))
self.cases.append((
csr_matrix((np.ones(9), np.array([2, 1, 0, 0, 1, 2, 1, 0, 2]),
np.arange(10)),
shape=(9, 3)), np.arange(9).reshape(9, 1)))
# two candidates
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 1, 1]), np.arange(5)),
shape=(4, 2)), np.vstack((np.ones(4), np.arange(4))).T))
self.cases.append((
csr_matrix((np.ones(9), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.arange(10)),
shape=(9, 3)), np.vstack((np.ones(9), np.arange(9))).T))
self.cases.append((
csr_matrix((np.ones(9), np.array([0, 0, 1, 1, 2, 2, 3, 3, 3]),
np.arange(10)),
shape=(9, 4)), np.vstack((np.ones(9), np.arange(9))).T))
# two candidates, small norms
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 1, 1]), np.arange(5)),
shape=(4, 2)),
np.vstack((np.ones(4), 1e-20 * np.arange(4))).T))
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 1, 1]), np.arange(5)),
shape=(4, 2)),
1e-20 * np.vstack((np.ones(4), np.arange(4))).T))
# block aggregates, one candidate
self.cases.append((
csr_matrix((np.ones(3), np.array([0, 1, 1]), np.arange(4)),
shape=(3, 2)), np.ones((6, 1))))
self.cases.append((
csr_matrix((np.ones(3), np.array([0, 1, 1]), np.arange(4)),
shape=(3, 2)), np.ones((9, 1))))
self.cases.append((
csr_matrix((np.ones(5), np.array([2, 0, 2, 1, 1]), np.arange(6)),
shape=(5, 3)), np.ones((10, 1))))
# block aggregates, two candidates
self.cases.append((
csr_matrix((np.ones(3), np.array([0, 1, 1]), np.arange(4)),
shape=(3, 2)), np.vstack((np.ones(6), np.arange(6))).T))
self.cases.append((
csr_matrix((np.ones(3), np.array([0, 1, 1]), np.arange(4)),
shape=(3, 2)), np.vstack((np.ones(9), np.arange(9))).T))
self.cases.append((
csr_matrix((np.ones(5), np.array([2, 0, 2, 1, 1]), np.arange(6)),
shape=(5, 3)),
np.vstack((np.ones(10), np.arange(10))).T))
# tests where AggOp excludes some dofs
# one candidate
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 1, 1]),
np.array([0, 1, 2, 2, 3, 4])),
shape=(5, 2)), np.ones((5, 1))))
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 1, 1]),
np.array([0, 1, 2, 2, 3, 4])),
shape=(5, 2)), np.vstack((np.ones(5), np.arange(5))).T))
# overdetermined blocks
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 1, 1]),
np.array([0, 1, 2, 2, 3, 4])),
shape=(5, 2)),
np.vstack((np.ones(5), np.arange(5), np.arange(5)**2)).T))
self.cases.append((
csr_matrix((np.ones(6), np.array([1, 3, 0, 2, 1, 0]),
np.array([0, 0, 1, 2, 2, 3, 4, 5, 5, 6])),
shape=(9, 4)),
np.vstack((np.ones(9), np.arange(9), np.arange(9) ** 2)).T))
self.cases.append((
csr_matrix((np.ones(6), np.array([1, 3, 0, 2, 1, 0]),
np.array([0, 0, 1, 2, 2, 3, 4, 5, 5, 6])),
shape=(9, 4)),
np.vstack((np.ones(9), np.arange(9))).T))
# complex tests
# one aggregate one candidate
# checks real part of complex
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 0, 0]), np.arange(5)),
shape=(4, 1)), (1 + 0j) * np.ones((4, 1))))
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 0, 0]), np.arange(5)),
shape=(4, 1)), (0 + 3j) * np.ones((4, 1))))
# checks norm(), but not dot()
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 0, 0]), np.arange(5)),
shape=(4, 1)), (1 + 3j) * np.ones((4, 1))))
# checks norm(), but not dot()
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 0, 0]), np.arange(5)),
shape=(4, 1)), (0 + 3j) * np.arange(4).reshape(4, 1)))
# checks norm(), but not dot()
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 0, 0]), np.arange(5)),
shape=(4, 1)), (1 + 3j) * np.arange(4).reshape(4, 1)))
self.cases.append((
csr_matrix((np.ones(4), np.array([0, 0, 0, 0]), np.arange(5)),
shape=(4, 1)),
np.array([[-1 + 4j], [0 + 5j], [5 - 2j], [9 - 8j]])))
# one aggregate two candidates
# checks real part of complex
self.cases.append((
csr_matrix((np.ones(4), | np.array([0, 0, 0, 0]) | numpy.array |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from PIL import Image, ImageEnhance, ImageDraw
import math
import six
import sys; sys.path.append('../')
from data.choose_config import cfg
cfg = cfg.cfg
import random
class sampler():
def __init__(self,
max_sample,
max_trial,
min_scale,
max_scale,
min_aspect_ratio,
max_aspect_ratio,
min_jaccard_overlap,
max_jaccard_overlap,
min_object_coverage,
max_object_coverage,
use_square=False):
self.max_sample = max_sample
self.max_trial = max_trial
self.min_scale = min_scale
self.max_scale = max_scale
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
self.min_jaccard_overlap = min_jaccard_overlap
self.max_jaccard_overlap = max_jaccard_overlap
self.min_object_coverage = min_object_coverage
self.max_object_coverage = max_object_coverage
self.use_square = use_square
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = | np.maximum(box_a[:, :2], box_b[:2]) | numpy.maximum |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for the bfloat16 Python type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
class Bfloat16Test(test.TestCase):
def float_values(self):
"""Returns values that should round trip exactly to float and back."""
epsilon = float.fromhex("1.0p-7")
return [
0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon,
-1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0,
float("inf"), float("-inf"), float("nan")]
def _assertFloatIdentical(self, v, w):
if math.isnan(v):
self.assertTrue(math.isnan(w))
else:
self.assertEqual(v, w)
def testRoundTripToFloat(self):
for v in self.float_values():
self._assertFloatIdentical(v, float(bfloat16(v)))
def testRoundTripToInt(self):
for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]:
self.assertEqual(v, int(bfloat16(v)))
def testStr(self):
self.assertEqual("0", str(bfloat16(0.0)))
self.assertEqual("1", str(bfloat16(1.0)))
self.assertEqual("-3.5", str(bfloat16(-3.5)))
self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", str(bfloat16(float("inf"))))
self.assertEqual("-inf", str(bfloat16(float("-inf"))))
self.assertEqual("nan", str(bfloat16(float("nan"))))
def testRepr(self):
self.assertEqual("bfloat16(0)", repr(bfloat16(0)))
self.assertEqual("bfloat16(1)", repr(bfloat16(1)))
self.assertEqual("bfloat16(-3.5)", repr(bfloat16(-3.5)))
self.assertEqual("bfloat16(0.0078125)",
repr(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("bfloat16(inf)", repr(bfloat16(float("inf"))))
self.assertEqual("bfloat16(-inf)", repr(bfloat16(float("-inf"))))
self.assertEqual("bfloat16(nan)", repr(bfloat16(float("nan"))))
def testHash(self):
self.assertEqual(0, hash(bfloat16(0.0)))
self.assertEqual(0x3f80, hash(bfloat16(1.0)))
self.assertEqual(0x7fc0, hash(bfloat16(float("nan"))))
# Tests for Python operations
def testNegate(self):
for v in self.float_values():
self._assertFloatIdentical(-v, float(-bfloat16(v)))
def testAdd(self):
self._assertFloatIdentical(0, float(bfloat16(0) + bfloat16(0)))
self._assertFloatIdentical(1, float(bfloat16(1) + bfloat16(0)))
self._assertFloatIdentical(0, float(bfloat16(1) + bfloat16(-1)))
self._assertFloatIdentical(5.5, float(bfloat16(2) + bfloat16(3.5)))
self._assertFloatIdentical(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("inf")) + bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("-inf")) + bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))
def testSub(self):
self._assertFloatIdentical(0, float(bfloat16(0) - bfloat16(0)))
self._assertFloatIdentical(1, float(bfloat16(1) - bfloat16(0)))
self._assertFloatIdentical(2, float(bfloat16(1) - bfloat16(-1)))
self._assertFloatIdentical(-1.5, float(bfloat16(2) - bfloat16(3.5)))
self._assertFloatIdentical(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(-2.25) - bfloat16(float("inf"))))
self._assertFloatIdentical(float("inf"),
float(bfloat16(-2.25) - bfloat16(float("-inf"))))
self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))
def testMul(self):
self._assertFloatIdentical(0, float(bfloat16(0) * bfloat16(0)))
self._assertFloatIdentical(0, float(bfloat16(1) * bfloat16(0)))
self._assertFloatIdentical(-1, float(bfloat16(1) * bfloat16(-1)))
self._assertFloatIdentical(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("inf")) * bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("-inf")) * bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))
def testDiv(self):
self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
self._assertFloatIdentical(float("inf"), float(bfloat16(1) / bfloat16(0)))
self._assertFloatIdentical(-1, float(bfloat16(1) / bfloat16(-1)))
self._assertFloatIdentical(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("inf")) / bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("-inf")) / bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))
def testLess(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v < w, bfloat16(v) < bfloat16(w))
def testLessEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))
def testGreater(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v > w, bfloat16(v) > bfloat16(w))
def testGreaterEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))
def testEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v == w, bfloat16(v) == bfloat16(w))
def testNotEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v != w, bfloat16(v) != bfloat16(w))
def testNan(self):
a = np.isnan(bfloat16(float("nan")))
self.assertTrue(a)
np.testing.assert_allclose(np.array([1.0, a]), np.array([1.0, a]))
a = np.array(
[bfloat16(1.34375),
bfloat16(1.4375),
bfloat16(float("nan"))],
dtype=dtypes.bfloat16.as_numpy_dtype)
b = np.array(
[bfloat16(1.3359375),
bfloat16(1.4375),
bfloat16(float("nan"))],
dtype=dtypes.bfloat16.as_numpy_dtype)
np.testing.assert_allclose(
a, b, rtol=0.1, atol=0.1, equal_nan=True, err_msg="", verbose=True)
class Bfloat16NumPyTest(test.TestCase):
def testDtype(self):
self.assertEqual(bfloat16, np.dtype(bfloat16))
def testArray(self):
x = np.array([[1, 2, 3]], dtype=bfloat16)
self.assertEqual(bfloat16, x.dtype)
self.assertEqual("[[bfloat16(1) bfloat16(2) bfloat16(3)]]", str(x))
self.assertAllEqual(x, x)
self.assertAllClose(x, x)
self.assertTrue((x == x).all())
def testComparisons(self):
x = np.array([401408, 7, -32], dtype=np.float32)
bx = x.astype(bfloat16)
y = np.array([82432, 7, 0], dtype=np.float32)
by = y.astype(bfloat16)
self.assertAllEqual(x == y, bx == by)
self.assertAllEqual(x != y, bx != by)
self.assertAllEqual(x < y, bx < by)
self.assertAllEqual(x > y, bx > by)
self.assertAllEqual(x <= y, bx <= by)
self.assertAllEqual(x >= y, bx >= by)
def testEqual2(self):
a = | np.array([401408], bfloat16) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************
# @Time : 2018/9/17 1:23
# @Author : <NAME> & <NAME>
# @Lab : nesa.zju.edu.cn
# @File : DefenseEvaluations.py
# **************************************
import argparse
import os
import random
import sys
import warnings
import numpy as np
import scipy.stats as jslib
import torch
import torch.nn.functional as F
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from RawModels.Utils.dataset import get_mnist_test_loader, get_cifar10_test_loader
from RawModels.ResNet import resnet20_cifar
from RawModels.MNISTConv import MNISTConvNet
# help functions
def pd_prediction(model, dataset, data_loader, epsilon, device):
from Defenses.DefenseMethods.PD import PixelDefend
pd = PixelDefend(model=model, defense_name='PD', dataset=dataset, pixel_cnn_dir='../Defenses/', device=device)
model.eval()
predicted_defended = []
with torch.no_grad():
for index, (images, labels) in enumerate(data_loader):
images = images.detach().cpu().numpy()
purified_images = pd.de_noising_samples(samples=images, batch_size=images.shape[0], eps=epsilon)
rt_logits = model(torch.from_numpy(purified_images).to(device))
rt_predicted = F.softmax(rt_logits, dim=1).cpu().numpy()
predicted_defended.extend(rt_predicted)
return np.array(predicted_defended)
def rt_prediction(model, dataset, data_loader, final_size, device):
from Defenses.DefenseMethods.RT import RTDefense
rt = RTDefense(model=model, defense_name='RT', dataset=dataset, device=device)
model.eval()
predicted_defended = []
with torch.no_grad():
for index, (images, labels) in enumerate(data_loader):
transformed_images = rt.randomization_transformation(samples=images, original_size=images.shape[-1], final_size=final_size)
transformed_images = transformed_images(device)
rt_logits = model(transformed_images)
rt_predicted = F.softmax(rt_logits, dim=1).cpu().numpy()
predicted_defended.extend(rt_predicted)
return np.array(predicted_defended)
def te_prediction(model, data_loader, level, device):
from Defenses.DefenseMethods.TE import thermometer_encoding
model.eval()
predicted_defended = []
with torch.no_grad():
for index, (images, labels) in enumerate(data_loader):
therm_inputs = thermometer_encoding(samples=images.to(device), level=level, device=device)
therm_inputs = torch.from_numpy(therm_inputs).to(device)
te_logits = model(therm_inputs)
te_predicted = F.softmax(te_logits, dim=1).cpu().numpy()
predicted_defended.extend(te_predicted)
return np.array(predicted_defended)
def defense_utility_measure(pred_def, pred_raw, true_label):
# compute the classification accuracy of raw model
correct_prediction_raw = np.equal(np.argmax(pred_raw, axis=1), true_label)
acc_raw = np.mean(correct_prediction_raw.astype(float))
# compute the classification accuracy of defense-enhanced model
correct_prediction_def = np.equal(np.argmax(pred_def, axis=1), true_label)
acc_def = np.mean(correct_prediction_def.astype(float))
# compute the Classification Accuracy Variance(CAV)
cav_result = acc_def - acc_raw
# find the index of correct predicted examples by defence-enhanced model and raw model
idx_def = np.squeeze(np.argwhere(correct_prediction_def == True))
idx_raw = np.squeeze(np.argwhere(correct_prediction_raw == True))
idx = np.intersect1d(idx_def, idx_raw, assume_unique=True)
# compute the Classification Rectify Ratio(CRR) & Classification Sacrifice Ratio(CSR)
num_rectify = len(idx_def) - len(idx)
crr_result = num_rectify / len(pred_def)
num_sacrifice = len(idx_raw) - len(idx)
csr_result = num_sacrifice / len(pred_def)
# filter the correct prediction results
pred_def_filter = pred_def[idx]
pred_raw_filter = pred_raw[idx]
# compute the Classification Confidence Variance(CCV)
confidence_def = np.max(pred_def_filter, axis=1)
confidence_raw = np.max(pred_raw_filter, axis=1)
ccv_result = np.mean(np.absolute(confidence_def - confidence_raw))
# compute the Classification Output Stability(COS)
M = (pred_def_filter + pred_raw_filter) / 2.
js_total = 0
for i in range(len(M)):
js = 0.5 * jslib.entropy(pred_def_filter[i], M[i]) + 0.5 * jslib.entropy(pred_raw_filter[i], M[i])
js_total += js
cos_result = js_total / len(M)
return acc_raw, acc_def, cav_result, crr_result, csr_result, ccv_result, cos_result
def prediction(model, test_loader, device):
print('\nThe #{}# model is evaluated on the testing dataset loader ...'.format(model.model_name))
model = model.to(device)
model.eval()
prediction = []
true_labels = []
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
logits = model(images)
predicted = F.softmax(logits, dim=1).cpu().numpy()
prediction.extend(predicted)
true_labels.extend(labels)
prediction = np.array(prediction)
true_labels = np.array(true_labels)
return prediction, true_labels
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
| np.random.seed(args.seed) | numpy.random.seed |
import sys
import warnings
import math
import pdb
import itertools
import numpy as np
from utils import reset_wrapper, step_wrapper
from scipy.ndimage.filters import convolve1d as convolve
import os
import copy
import pygame
from numba import njit, jit
from collections import deque
@njit
def angle_between(v1, v2):
v1_conv = v1.astype(np.dtype("float"))
v2_conv = v2.astype(np.dtype("float"))
return np.abs(
np.arctan2(
np.linalg.det(np.stack((v1_conv, v2_conv))),
np.dot(v1_conv, v2_conv),
)
)
@njit
def total_angle_between(v1, v2):
"""
Calculate total angle between v1 and v2. Resulting angle is in range [-pi, pi].
:param v1: first vector.
:type v1: np.array
:param v2: second vector.
:type v2: np.array
:return: angle between v1 and v2, in range [-pi, pi].
:rtype: float.
"""
v1_conv = v1.astype(np.dtype("float"))
v2_conv = v2.astype(np.dtype("float"))
return np.arctan2(
np.linalg.det(np.stack((v1_conv, v2_conv))), np.dot(v1_conv, v2_conv),
)
@njit
def dist_2d(v1, v2):
return math.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2)
@njit
def norm_2d(vector):
return math.sqrt(vector[0] ** 2 + vector[1] ** 2)
def deg_to_rad(deg):
return deg * np.pi / 180
def rad_to_deg(rad):
return rad * 180 / np.pi
def get_rot_matrix(theta):
"""
returns the rotation matrix given a theta value
rotates in the counter clockwise direction
"""
return np.asarray(
[[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
)
def arange_orientation_info(dim_vector_8):
# converts the 8 dim vector of orientation to
# a 9 dim vector, for visulization purposes
orient_disp_vector = np.zeros(9)
j = 0
for i in range(dim_vector_8.shape[0]):
if i == 4:
j += 1
orient_disp_vector[j] = dim_vector_8[i]
return orient_disp_vector
"""
def get_abs_orientation(agent_state, orientation_approximator):
#returns the current absolute binned orientation of the agent
#one of the 8 directions. Dim:8 (this is the default case)
#for the default case, it additionally returns a 9 dimensional vector
#if no orientation information is provided it returns 4.
#works for the orientation approximator
0 1 2
3 4
5 6 7
############
#for other cases, it just returns the orientation.
#if no orientation information is provided, it returns -1.
no_of_directions = len(orientation_approximator)
angle_diff= np.zeros(no_of_directions)
abs_approx_orientation = None
if no_of_directions==8: #the default
#will return the vector only if the orientation_approximator is the default 8-dir one.
abs_approx_orientation = np.zeros(9)
else:
abs_approx_orientation = np.zeros(no_of_directions)
orientation = agent_state['orientation']
if orientation is None:
#straight up
orientation = 1
elif np.linalg.norm(orientation)==0:
if no_of_directions==8:
orientation = 1
else:
orientation = 1
else:
for i in range(len(orientation_approximator)):
#print('The orientation val')
#print(orientation)
angle_diff[i] = angle_between(orientation_approximator[i], orientation)
orientation = np.argmin(angle_diff)
if no_of_directions == 8:
if orientation >=4:
orientation += 1
abs_approx_orientation[orientation] = 1
return abs_approx_orientation, orientation
return abs_approx_orientation, orientation
"""
def get_abs_orientation(agent_state, orientation_approximator):
"""
#returns the current absolute binned orientation of the agent
#one of the 8 directions. Dim:8 (this is the default case)
#for the default case, it additionally returns a 9 dimensional vector
#if no orientation information is provided it returns 4.
#works for the orientation approximator
0 1 2
7 3
6 5 4
############
#for other cases, it just returns the orientation.
#if no orientation information is provided, it returns -1.
"""
no_of_directions = len(orientation_approximator)
angle_diff = np.zeros(no_of_directions)
min_thresh = 0.001
abs_approx_orientation = None
if no_of_directions == 8: # the default
# will return the vector only if the orientation_approximator is the default 8-dir one.
abs_approx_orientation = np.zeros(9)
else:
abs_approx_orientation = np.zeros(no_of_directions)
orientation = agent_state["orientation"]
if orientation is None:
# straight up
orientation = 1
else:
for i in range(len(orientation_approximator)):
# print('The orientation val')
# print(orientation)
angle_diff[i] = angle_between(
orientation_approximator[i], orientation
)
orientation = np.argmin(angle_diff)
abs_approx_orientation[orientation] = 1
return abs_approx_orientation, orientation
def get_rel_orientation(prev_frame_info, agent_state, goal_state):
"""
Calculates and bins the angle between (agent_pos - goal_pos) and agent velocity.
in effect, this is the "error" in the agent's heading.
"""
# returns the relative orientation of the agent with the direction
# of the goal.
# Primarily for use in IRL
relative_orientation_vector = np.zeros(4)
vector_to_goal = goal_state - agent_state["position"]
if prev_frame_info is None:
agent_orientation = np.array([-1, 0])
else:
agent_orientation = (
agent_state["position"] - prev_frame_info["position"]
)
diff_in_angle = angle_between(vector_to_goal, agent_orientation)
# pdb.set_trace()
if diff_in_angle < np.pi / 8:
rel_orientation = 0
elif diff_in_angle < np.pi / 4 and diff_in_angle >= np.pi / 8:
rel_orientation = 1
elif diff_in_angle < np.pi * 3 / 4 and diff_in_angle >= np.pi / 4:
rel_orientation = 2
else:
rel_orientation = 3
relative_orientation_vector[rel_orientation] = 1
return relative_orientation_vector
def get_rel_goal_orientation(
orientation_approximator,
rel_orient_conv,
agent_state,
agent_abs_orientation,
goal_state,
):
"""
Calculates a vector from the agent to the goal.
This vector is in the agent's coordinate system, e.g. zero degrees is forward.
This vector is binned into a one hot vector based on orientation_approximator.
"""
# returns the relative orientation of the goal wrt to the agent
# Dim:8
no_of_directions = len(orientation_approximator)
angle_diff = np.zeros(no_of_directions)
relative_orientation_vector = np.zeros(no_of_directions)
rot_matrix = get_rot_matrix(rel_orient_conv[agent_abs_orientation])
# translate the point so that the agent sits at the center of the coordinates
# before rtotation
vec_to_goal = goal_state - agent_state["position"]
# rotate the coordinates to get the relative coordinates wrt the agent
rel_coord_goal = np.matmul(rot_matrix, vec_to_goal)
relative_goal = {}
relative_goal["orientation"] = rel_coord_goal
relative_orientation_vector, _ = get_abs_orientation(
relative_goal, orientation_approximator
)
return relative_orientation_vector
def discretize_information(information, information_slabs):
# given a piece of information(scalar), this function returns the correct
# slab in which the information belongs, based on the slab information
# information_slab(list)provided
for i in range(len(information_slabs) - 1):
if (
information >= information_slabs[i]
and information < information_slabs[i + 1]
):
return i
# if does not classify in any information slabs
return None
def calculate_social_forces(
agent_state, obstacle_state, agent_width, obstacle_width, a, b, lambda_val
):
# agent_state and obstacle_state are dictionaries with the following information:
# position, orientation and speed
r_i_j = agent_width / 2 + obstacle_width / 2
d_i_j = np.linalg.norm(
agent_state["position"] - obstacle_state["position"]
)
@njit
def radial_density_features(agent_position, pedestrian_positions, radius):
"""
implements the 'density features' from:
IRL Algorithms and Features for Robot navigation in Crowds: Vasquez et. al
:param agent_position: position of agent.
:type agent_position: numpy array or tuple.
:param pedestrian_positions: list or array of pedestrian positions.
:type pedestrian_positions: list or np array of tuples or np arrays.
"""
pedestrian_count = 0
# Using indexing necessary for Numba to work
for ped_idx in range(len(pedestrian_positions)):
if dist_2d(pedestrian_positions[ped_idx], agent_position) <= radius:
pedestrian_count += 1
if pedestrian_count >= 5:
return np.array([0.0, 0.0, 1.0])
if pedestrian_count < 2:
return np.array([1.0, 0.0, 0.0])
elif 2 <= pedestrian_count < 5:
return np.array([0.0, 1.0, 0.0])
else:
raise ValueError
@njit
def speed_features(
agent_velocity,
pedestrian_velocities,
lower_threshold=0.015,
upper_threshold=0.025,
):
"""
Computes speed features as described in Vasquez et. al's paper: "Learning
to navigate through crowded environments".
:param agent_velocity: velocity of agent (robot)
:type agent_velocity: 2D np.array or tuple
:param pedestrian_velocities: velocities of pedestrians
:type pedestrian_velocities: list or np.array of 2d arrays or tuples.
:param lower_threshold: Lower magnitude of speed threshold threshold used
for binning. This is 0.015 in the paper.
:type lower_threshold: float
:param upper_threshold: Higher magnitude of speed threshold
used for binning. This is 0.025 in the paper.
:type upper_threshold: float
:return: magnitude feature np.array of shape (3,)
:rtype: float np.array
"""
assert lower_threshold < upper_threshold
feature = np.zeros(3)
for idx in range(len(pedestrian_velocities)):
pedestrian_vel = pedestrian_velocities[idx]
speed = dist_2d(pedestrian_vel, agent_velocity)
# put value into proper bin
if 0 <= speed < lower_threshold:
feature[0] += 1
elif lower_threshold <= speed < upper_threshold:
feature[1] += 1
elif speed >= upper_threshold:
feature[2] += 1
else:
raise ValueError(
"Error in binning speed. speed does not fit into any bin."
)
return feature
@njit
def orientation_features(
agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities
):
"""
Computes the orientation features described in Vasquez et. al's paper:
"Learning to navigate through crowded environments".
:param agent_position: position of the agent (robot)
:type agent_position: 2d np.array or tuple
:param agent_velocity: velocity of the agent (robot)
:type agent_velocity: 2d np.array or tuple
:param pedestrian_positions: positions of pedestrians.
:type pedestrian_positions: np.array or list, containing 2d arrays or tuples.
:param pedestrian_velocities: velocities of pedestrians.
:type pedestrian_velocities: np.array or list, containing 2d arrays or tuples.
:return: orientation feature vector.
:rtype: float np.array of shape (3,)
"""
feature = np.zeros(3)
# Check that same number of pedestrian positions and velocities are passed in.
assert len(pedestrian_positions) == len(pedestrian_velocities)
for ped_id in range(len(pedestrian_positions)):
relative_pos = agent_position - pedestrian_positions[ped_id]
relative_vel = agent_velocity - pedestrian_velocities[ped_id]
# angle_between produces only positive angles
angle = angle_between(relative_pos, relative_vel)
# put into bins
# Bins adjusted to work with angle_between() (i.e. abs value of angles.)
if 0.75 * np.pi < angle <= np.pi:
feature[0] += 1
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
feature[1] += 1
elif 0.0 <= angle < 0.25 * np.pi:
feature[2] += 1
else:
raise ValueError(
"Error in binning orientation. Orientation does not fit into any bin."
)
return feature
@njit
def velocity_features(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
lower_speed_threshold=0.015,
upper_speed_threshold=0.025,
):
"""
Computes the velocity features described in Vasquez et. al's paper:
"Learning to navigate through crowded environments".
:param agent_position: position of the agent (robot)
:type agent_position: 2d np.array or tuple
:param agent_velocity: velocity of the agent (robot)
:type agent_velocity: 2d np.array or tuple
:param pedestrian_positions: positions of pedestrians.
:type pedestrian_positions: 2d float np.array.
:param lower_speed_threshold: Lower magnitude of speed threshold
threshold used for binning. This is 0.015 in the paper.
:type lower_threshold: float
:param upper_speed_threshold: Higher magnitude of speed threshold
threshold used for binning. This is 0.025 in the paper.
:type upper_threshold: float
:param pedestrian_velocities: velocities of pedestrians.
:type pedestrian_velocities: 2d float np.array.
:param lower_threshold: Lower magnitude of speed threshold threshold used
for binning. This is 0.015 in the paper.
:type lower_threshold: float
:param upper_threshold: Higher magnitude of speed threshold threshold
used for binning. This is 0.025 in the paper.
:type upper_threshold: float
:return: orientation feature vector.
:rtype: float np.array of shape (3,)
"""
assert lower_speed_threshold < upper_speed_threshold
feature = np.zeros((3, 3))
assert len(pedestrian_positions) == len(pedestrian_velocities)
# used to group pedestrians with the same orientation bin together using
# their ID.
ped_sorted_by_orientation = [np.empty(0, dtype=np.int64)] * 3
for ped_id in range(len(pedestrian_positions)):
relative_pos = agent_position - pedestrian_positions[ped_id]
relative_vel = agent_velocity - pedestrian_velocities[ped_id]
# angle_between produces only positive angles
if (relative_pos == np.zeros(2)).all() or (
relative_vel == np.zeros(2)
).all():
# cannot calculate angle between zero vectors
angle = 0.0
else:
angle = angle_between(relative_pos, relative_vel)
# put into bins
# Bins adjusted to work with angle_between() (i.e. abs value of angles.)
if 0.75 * np.pi < angle <= np.pi:
ped_sorted_by_orientation[0] = np.append(
ped_sorted_by_orientation[0], ped_id
)
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
ped_sorted_by_orientation[1] = np.append(
ped_sorted_by_orientation[1], ped_id
)
elif 0.0 <= angle < 0.25 * np.pi:
ped_sorted_by_orientation[2] = np.append(
ped_sorted_by_orientation[2], ped_id
)
else:
raise ValueError("Orientation does not fit into any bin.")
for idx, ped_ids in enumerate(ped_sorted_by_orientation):
velocities = pedestrian_velocities[ped_ids]
if not velocities.size:
break
else:
mean_speeds = np.mean(np.abs(velocities))
# bin speeds
if 0 <= mean_speeds < lower_speed_threshold:
feature[idx, 0] = 1
elif lower_speed_threshold <= mean_speeds < upper_speed_threshold:
feature[idx, 1] = 1
elif mean_speeds >= upper_speed_threshold:
feature[idx, 2] = 1
else:
raise ValueError("Average speed does not fit in any bins.")
return feature.flatten()
def social_force_features(
agent_radius, agent_position, agent_velocity, pedestrian_positions
):
"""
Computes the social forces features described in Vasquez et. al's paper:
"Learning to navigate through crowded environments".
:param agent_radius: radius of agent(s) in the environment. Note: this is
the radius of the agent's graphical circle, not a radius around the
agent.
:type agent_radius: float.
:param agent_position: position of the agent (robot)
:type agent_position: 2d np.array or tuple
:param agent_velocity: velocity of the agent (robot)
:type agent_velocity: 2d np.array or tuple
:param pedestrian_positions: positions of pedestrians.
:type pedestrian_positions: 2d float np.array.
:param pedestrian_velocities: velocities of pedestrians.
:type pedestrian_velocities: 2d float np.array.
:return: orientation feature vector.
:rtype: float np.array of shape (3,)
"""
# in the paper formula, 'i' is our agent, while 'j's are the pedestrians.
rel_positions = pedestrian_positions - agent_position
rel_distances = np.linalg.norm(rel_positions, axis=1)
normalized_rel_positions = rel_positions / np.max(rel_distances)
assert rel_positions.shape == normalized_rel_positions.shape
rel_angles = np.zeros(rel_distances.shape)
# used to group pedestrians with the same orientation bin together using
# their ID.
feature = np.zeros(3)
ped_orientation_bins = [np.empty(0, dtype=np.int64)] * 3
for ped_id in range(len(pedestrian_positions)):
relative_pos = rel_positions[ped_id]
# angle_between produces only positive angles
angle = angle_between(relative_pos, agent_velocity)
rel_angles[ped_id] = angle
# put into bins
# Bins adjusted to work with angle_between() (i.e. abs value of angles.)
if 0.75 * np.pi <= angle <= np.pi:
ped_orientation_bins[0] = np.append(
ped_orientation_bins[0], ped_id
)
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
ped_orientation_bins[1] = np.append(
ped_orientation_bins[1], ped_id
)
elif 0.0 <= angle < 0.25 * np.pi:
ped_orientation_bins[2] = np.append(
ped_orientation_bins[2], ped_id
)
else:
raise ValueError("Orientation does not fit into any bin.")
exp_multiplier = np.exp(2 * agent_radius - rel_distances).reshape(-1, 1)
anisotropic_term = (2.0 - 0.5 * (1.0 + np.cos(rel_angles))).reshape(-1, 1)
social_forces = (
exp_multiplier * normalized_rel_positions * anisotropic_term
)
forces_above_threshold = np.linalg.norm(social_forces, axis=1) > 0.5
feature[0] = np.sum(forces_above_threshold[ped_orientation_bins[0]])
feature[1] = np.sum(forces_above_threshold[ped_orientation_bins[1]])
feature[2] = np.sum(forces_above_threshold[ped_orientation_bins[2]])
return feature
@njit
def angle_to_goal_features(goal_position, agent_position, agent_orientation):
"""
computes features based on the error in the agent's heading towards the
goal. Error is the angle between agent heading vector and vector
(goal_pos - agent_pos). The features are binary features based on where
the angle fits in the bins [0-pi/8, pi/8-pi/4, pi/4-3/4pi, 3/4pi-pi].
This is meant to mimic the goal_rel_orientation function.
:param goal_position: position of the goal.
:type goal_position: 2d numpy vector.
:param agent_position: position of agent.
:type agent_position: 2d numpy vector.
:param agent_orientation: orientation vector of agent.
:type agent_orientation: 2d numpy vector.
:raises ValueError: If angle does not fit in the [0,pi] interval,
something unexpected has happened.
:return: feature vector representing binned angles.
:rtype: float np.array
"""
features = np.zeros(4)
vector_to_goal = goal_position - agent_position
angle = angle_between(agent_orientation, vector_to_goal)
# bin in angle bins
if 0.0 <= angle < 0.125 * np.pi:
features[0] = 1.0
elif 0.125 * np.pi <= angle < 0.25 * np.pi:
features[1] = 1.0
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
features[2] = 1.0
elif 0.75 * np.pi <= angle <= np.pi:
features[3] = 1.0
else:
raise ValueError("Cannot bin angle in [0,pi] interval.")
return features
@njit
def vector_to_goal_features(goal_position, agent_position, agent_orientation):
features = np.zeros(8)
vector_to_goal = goal_position - agent_position
angle = total_angle_between(agent_orientation, vector_to_goal)
# mimic finding closest relative vector by binning angle
if -0.125 * np.pi <= angle < 0.125 * np.pi:
features[0] = 1.0
elif 0.125 * np.pi <= angle < 0.375 * np.pi:
features[1] = 1.0
elif 0.375 * np.pi <= angle < 0.625 * np.pi:
features[2] = 1.0
elif 0.625 * np.pi <= angle < 0.875 * np.pi:
features[3] = 1.0
elif 0.875 * np.pi <= angle <= np.pi:
features[4] = 1.0
elif -np.pi <= angle < -0.875 * np.pi:
features[4] = 1.0
elif -0.875 * np.pi <= angle < -0.625 * np.pi:
features[5] = 1.0
elif -0.625 * np.pi <= angle < -0.375 * np.pi:
features[6] = 1.0
elif -0.375 * np.pi <= angle < -0.125 * np.pi:
features[7] = 1.0
else:
raise ValueError("Faled to bin angles in [-pi, pi] range.")
return features
@njit
def orientation_change_features(new_orientation, old_orientation):
thresholds = np.array(
[0, np.pi / 9, 2 * np.pi / 9, np.pi * 3 / 9, 4 * np.pi / 9]
)
if old_orientation is None:
print("Warning: old orientation is none, assuming old=new.")
orientation_change = 0.0
else:
orientation_change = angle_between(new_orientation, old_orientation)
# bin based on thresholds
features = np.zeros(5)
index = np.argmin(np.abs(orientation_change - thresholds))
features[index] = 1.0
return features
@njit
def SAM_features(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
inner_radius,
outer_radius,
lower_speed_threshold,
upper_speed_threshold,
):
"""
Calculates entire sam features based on Fahad et. al's 2018 paper:
"Learning How Pedestrians Navigate: A Deep Inverse Reinforcement Learning Approach"
:param agent_position: Position of the agent.
:type agent_position: 2d numpy float array.
:param agent_velocity: Agent velocity.
:type agent_velocity: 2d numpy float array.
:param pedestrian_positions: Px2 vector of the position of all pedestrians.
:type pedestrian_positions: Px2 numpy float array where P is the number
of pedestrians.
:param pedestrian_velocities: Px2 vector of the velocity of all pedestrians.
:type pedestrian_velocities: Px2 numpy float array where P is the number
of pedestrians.
:param inner_radius: Radius of inner circle of feature extractor.
:type inner_radius: float.
:param outer_radius: Radius of outer circle of feature extractor.
:type outer_radius: float.
:param lower_speed_threshold: lower binning threshold for speed.
:type lower_speed_threshold: float.
:param upper_speed_threshold: upper binning threshold for speed.
:type upper_speed_threshold: float.
:return: tuple (SAM_features, density) where SAM_features are the
features and density is total number of pedestrians inside all bins.
:rtype: tuples(numpy 1d array, float)
"""
num_pedestrians = pedestrian_positions.shape[0]
# classify pedestrians in either inner or outer ring
ring_designation = np.zeros(num_pedestrians)
for idx in range(num_pedestrians):
ped_distance = dist_2d(agent_position, pedestrian_positions[idx])
if ped_distance <= outer_radius:
if ped_distance > inner_radius:
ring_designation[idx] = 2
else:
ring_designation[idx] = 1
inner_ped_positions = pedestrian_positions[ring_designation == 1]
inner_ped_velocities = pedestrian_velocities[ring_designation == 1]
outer_ped_positions = pedestrian_positions[ring_designation == 2]
outer_ped_velocities = pedestrian_velocities[ring_designation == 2]
assert inner_ped_positions.shape[0] == inner_ped_velocities.shape[0]
assert outer_ped_positions.shape[0] == outer_ped_velocities.shape[0]
num_inner_pedestrians = inner_ped_positions.shape[0]
num_outer_pedestrians = outer_ped_positions.shape[0]
# classify pedestrians in each bin, and add up their velocities per bin
peds_in_bin_counts = np.zeros(10)
average_velocities = np.zeros((10, 2))
for idx in range(num_inner_pedestrians):
ped_relative_position = inner_ped_positions[idx] - agent_position
ped_velocity = inner_ped_velocities[idx]
angle = total_angle_between(agent_velocity, ped_relative_position)
if -0.25 * np.pi < angle < 0.25 * np.pi:
peds_in_bin_counts[0] += 1
average_velocities[0] += ped_velocity
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
peds_in_bin_counts[1] += 1
average_velocities[1] += ped_velocity
elif 0.75 * np.pi <= angle < np.pi or -np.pi < angle < -0.75 * np.pi:
peds_in_bin_counts[2] += 1
average_velocities[2] += ped_velocity
elif -0.75 * np.pi <= angle <= -0.25 * np.pi:
peds_in_bin_counts[3] += 1
average_velocities[3] += ped_velocity
else:
raise ValueError("angle couldn't be binned.")
for idx in range(num_outer_pedestrians):
ped_relative_position = outer_ped_positions[idx] - agent_position
ped_velocity = outer_ped_velocities[idx]
angle = total_angle_between(agent_velocity, ped_relative_position)
if -0.25 * np.pi < angle < 0.25 * np.pi:
peds_in_bin_counts[4] += 1
average_velocities[4] += ped_velocity
elif 0.25 * np.pi <= angle < 0.5 * np.pi:
peds_in_bin_counts[9] += 1
average_velocities[9] += ped_velocity
elif 0.5 * np.pi <= angle < 0.75 * np.pi:
peds_in_bin_counts[8] += 1
average_velocities[8] += ped_velocity
elif 0.75 * np.pi <= angle < np.pi or -np.pi < angle < -0.75 * np.pi:
peds_in_bin_counts[7] += 1
average_velocities[7] += ped_velocity
elif -0.5 * np.pi <= angle < -0.25 * np.pi:
peds_in_bin_counts[5] += 1
average_velocities[5] += ped_velocity
elif -0.75 * np.pi <= angle < -0.5 * np.pi:
peds_in_bin_counts[6] += 1
average_velocities[6] += ped_velocity
else:
raise ValueError("angle couldn't be binned.")
nonzero_mask = peds_in_bin_counts != 0
average_velocities[nonzero_mask] /= peds_in_bin_counts[
nonzero_mask
].reshape(-1, 1)
heading_feat_vect = np.zeros((10, 3))
velocity_feat_vect = np.zeros((10, 3))
# 0 degree degree vector used as reference for judging absolute angles.
angle_origin = np.array([1.0, 0.0])
for idx in range(len(average_velocities)):
if peds_in_bin_counts[idx] == 0.0:
continue
relative_velocity = agent_velocity - average_velocities[idx]
heading = angle_between(relative_velocity, agent_velocity)
heading_thresholds = np.array([0.25 * np.pi, 0.75 * np.pi])
heading_idx = np.digitize( | np.array(heading) | numpy.array |
# import numpy as np
# import os
# from sklearn import *
# from sklearn.metrics import classification_report, confusion_matrix
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.cluster import MiniBatchKMeans
# import cv2
# import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
# import pickle
# from sklearn import preprocessing
# from sklearn.cluster import KMeans
# # defining feature extractor that we want to use
# extractor = cv2.xfeatures2d.SIFT_create()
# def features(image, extractor):
# keypoints, descriptors = extractor.detectAndCompute(image, None)
# #print (keypoints)
# return keypoints, descriptors
# def sift_kmeans():
# labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
# sift_keypoints=[]
# for label in labels:
# path='train/'+label
# for imgfile in os.listdir(path):
# img = cv2.imread(os.path.join(path,imgfile),1)
# kp,des = features(img,extractor)
# sift_keypoints.append(des)
# sift_keypoints=np.asarray(sift_keypoints)
# sift_keypoints=np.concatenate(sift_keypoints, axis=0)
# #with the descriptors detected, lets clusterize them
# print("Training kmeans")
# for num_cluster in range(100,500,100):
# print("No. of cluster = "+str(num_cluster))
# kmeans = MiniBatchKMeans(n_clusters=num_cluster,random_state=0,init_size=int(num_cluster*1.2)).fit(sift_keypoints)
# print("Done Kmeans")
# pkl_filename = "pickle_model"+str(num_cluster)+".pkl"
# with open(pkl_filename, 'wb') as pkl_file:
# pickle.dump(kmeans,pkl_file)
# #return the learned model
# def histogram_test(model,num_cluster):
# feature_vectors=[]
# class_vectors=[]
# labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
# for label in labels:
# print("Testing")
# path='test/'+label
# print(label)
# # dir_hist=os.path.join('hist',label)
# # if os.path.isdir(dir_hist)==False:
# # os.makedirs(dir_hist)
# for imgfile in os.listdir(path):
# img = cv2.imread(os.path.join(path,imgfile),1)
# kp,des = features(img,extractor)
# predict_kmeans=model.predict(des)
# # print(predict_kmeans)
# #calculates the histogram
# hist=[0 for m in range(0,num_cluster)]
# for f in predict_kmeans:
# hist[f]+=1
# # hist, bin_edges=np.histogram(predict_kmeans,bins=num_cluster)
# # n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# # print(dir_hist+'/'+imgfile[:-3]+'png')
# # plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
# feature_vectors.append(hist)
# class_vectors.append(label)
# feature_vectors=np.asarray(feature_vectors)
# class_vectors=np.asarray(class_vectors)
# return feature_vectors,class_vectors
# def histogram(model,num_cluster):
# feature_vectors=[]
# class_vectors=[]
# labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
# for label in labels:
# path='train/'+label
# print(label)
# # dir_hist=os.path.join('hist',label)
# # if os.path.isdir(dir_hist)==False:
# # os.makedirs(dir_hist)
# for imgfile in os.listdir(path):
# img = cv2.imread(os.path.join(path,imgfile),1)
# kp,des = features(img,extractor)
# predict_kmeans=model.predict(des)
# # print(predict_kmeans)
# # print(predict_kmeans)
# #calculates the histogram
# hist=[0 for m in range(0,num_cluster)]
# for f in predict_kmeans:
# hist[f]+=1
# # hist, bin_edges=np.histogram(np.array(predict_kmeans),bins=num_cluster)
# # print(hist)
# # print(bin_edges)
# # n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# # print(dir_hist+'/'+imgfile[:-3]+'png')
# # plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
# feature_vectors.append(hist)
# class_vectors.append(label)
# feature_vectors=np.asarray(feature_vectors)
# class_vectors=np.asarray(class_vectors)
# return feature_vectors,class_vectors
# #print(desc)
# #img2 = cv2.drawKeypoints(img,kp,None)
# #img3 = cv2.drawKeypoints(img1,kp1,None)
# #cv2.imshow('photo',img2)
# #cv2.imshow('photo1',img3)
# #cv2.waitKey(0)
# #cv2.destroyAllWindows()
# if __name__ == "__main__":
# sift_kmeans()
# # for i in range(100,500,100):
# # filename="pickle_model"+str(i)+".pkl"
# # model=pickle.load(open(filename, 'rb'))
# # # print (len(model.cluster_centers_))
# # # for m in model.cluster_centers_:
# # # print(len(m))
# # # # print(len(model))
# # # break
# # train_ft,train_label=histogram(model,i)
# # le = preprocessing.LabelEncoder()
# # train_enc_label=le.fit_transform(list(train_label))
# # # print(enc_label)
# # test_ft,test_label=histogram_test(model,i)
# # le1 = preprocessing.LabelEncoder()
# # test_enc_label=le1.fit_transform(list(test_label))
# # error=[]
# # for j in range(5, 45):
# # knn = KNeighborsClassifier(n_neighbors=j)
# # knn.fit(list(train_ft), train_enc_label)
# # pred_i = knn.predict(list(test_ft))
# # print(confusion_matrix(test_enc_label, pred_i))
# # print(classification_report(test_enc_label, pred_i))
# # error.append(np.mean(pred_i != test_enc_label))
# # plt.figure(figsize=(12, 6))
# # plt.plot(range(5, 45), error, color='red', linestyle='dashed', marker='o',
# # markerfacecolor='blue', markersize=10)
# # plt.title('Error Rate K Value')
# # plt.xlabel('K Value')
# # plt.ylabel('Mean Error')
# # plt.savefig("Error_for_"+str(i)+"words.png")
import numpy as np
import os
from sklearn import *
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import MiniBatchKMeans
import cv2
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import pickle
from sklearn import preprocessing
from sklearn.cluster import KMeans
# defining feature extractor that we want to use
extractor = cv2.xfeatures2d.SIFT_create()
def features(image, extractor):
keypoints, descriptors = extractor.detectAndCompute(image, None)
#print (keypoints)
return keypoints, descriptors
def sift_kmeans():
labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
sift_keypoints=[]
for label in labels:
path='train/'+label
for imgfile in os.listdir(path):
img = cv2.imread(os.path.join(path,imgfile),1)
kp,des = features(img,extractor)
sift_keypoints.append(des)
sift_keypoints=np.asarray(sift_keypoints)
sift_keypoints=np.concatenate(sift_keypoints, axis=0)
#with the descriptors detected, lets clusterize them
print("Training kmeans")
for num_cluster in range(100,500,100):
print("No. of cluster = "+str(num_cluster))
kmeans = MiniBatchKMeans(n_clusters=num_cluster,random_state=0,init_size=int(num_cluster*1.2)).fit(sift_keypoints)
print("Done Kmeans")
pkl_filename = "pickle_model"+str(num_cluster)+".pkl"
with open(pkl_filename, 'wb') as pkl_file:
pickle.dump(kmeans,pkl_file)
#return the learned model
def histogram_test(model,num_cluster):
feature_vectors=[]
class_vectors=[]
labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
for label in labels:
print("Testing")
path='test/'+label
print(label)
# dir_hist=os.path.join('hist',label)
# if os.path.isdir(dir_hist)==False:
# os.makedirs(dir_hist)
for imgfile in os.listdir(path):
img = cv2.imread(os.path.join(path,imgfile),1)
kp,des = features(img,extractor)
predict_kmeans=model.predict(des)
# print(predict_kmeans)
#calculates the histogram
hist=[0 for m in range(0,num_cluster)]
for f in predict_kmeans:
hist[f]+=1
# hist, bin_edges=np.histogram(predict_kmeans,bins=num_cluster)
# n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# print(dir_hist+'/'+imgfile[:-3]+'png')
# plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
feature_vectors.append(hist)
class_vectors.append(label)
feature_vectors=np.asarray(feature_vectors)
class_vectors=np.asarray(class_vectors)
return feature_vectors,class_vectors
def histogram(model,num_cluster):
feature_vectors=[]
class_vectors=[]
labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
for label in labels:
path='train/'+label
print(label)
# dir_hist=os.path.join('hist',label)
# if os.path.isdir(dir_hist)==False:
# os.makedirs(dir_hist)
for imgfile in os.listdir(path):
img = cv2.imread(os.path.join(path,imgfile),1)
kp,des = features(img,extractor)
predict_kmeans=model.predict(des)
# print(predict_kmeans)
# print(predict_kmeans)
#calculates the histogram
hist=[0 for m in range(0,num_cluster)]
for f in predict_kmeans:
hist[f]+=1
# hist, bin_edges=np.histogram(np.array(predict_kmeans),bins=num_cluster)
# print(hist)
# print(bin_edges)
# n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# print(dir_hist+'/'+imgfile[:-3]+'png')
# plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
feature_vectors.append(hist)
class_vectors.append(label)
feature_vectors=np.asarray(feature_vectors)
class_vectors= | np.asarray(class_vectors) | numpy.asarray |
import cv2
import numpy as np
def valid_odd_size(size):
"""
Validates that a kernel shape is of odd ints and of with 2 dimensions
:param size: the shape (size) to be checked
:return: False if size is invalid
"""
if type(size) not in (list, tuple):
return False
if len(size) != 2:
return False
if size[0] % 2 != 1 or size[1] % 2 != 1:
return False
return True
def cross_kernel(size):
r"""
Returns a cross (ones in a cross) kernel for morphological functions
Example of a (5,5) cross:
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 1 1 1 1 1 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the `numpy.array` of the cross shape
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
return cv2.getStructuringElement(cv2.MORPH_CROSS, ksize=size)
def rectangle_kernel(size):
r"""
Returns a rectangle (all ones) kernel for morphological functions
Example of a (5,5) rectangle:
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the `numpy.array` of the cross shape
"""
return cv2.getStructuringElement(cv2.MORPH_RECT, ksize=size)
def ellipse_kernel(size):
r"""
Returns an ellipse (ones in the shape of an ellipse) kernel for morphological functions
Example of a (5,5) ellipse:
| \| 0 0 1 0 0 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 1 1 1 1 1 \|
| \| 0 0 1 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the kernel
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=size)
def horizontal_line_kernel(size):
r"""
Returns a horizontal line (a horizontal line of ones) kernel for morphological functions
Example of a (5,5) horizontal line:
| \| 0 0 0 0 0 \|
| \| 0 0 0 0 0 \|
| \| 1 1 1 1 1 \|
| \| 0 0 0 0 0 \|
| \| 0 0 0 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the kernel
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
kernel = np.zeros(size, dtype=np.uint8)
kernel[int((size[0] - 1) / 2),] = 1
return kernel
def vertical_line_kernel(size):
r"""
Returns a vertical line (a vertical line of ones) kernel for morphological functions
Example of a (5,5) vertical line:
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
| \| 0 0 1 0 0 \|
:param size: a tuple of size 2 of 2 odd integers denoting the size of the kernel
f.g. (5, 5)
:return: the kernel
"""
if not valid_odd_size(size):
raise ValueError(f"Invalid kernel size given, make sure the size (width, height) are both positive and odd,"
f" size given {size}")
kernel = | np.zeros(size, dtype=np.uint8) | numpy.zeros |
import glob
import numpy as np
import pandas as pd
from shapely.geometry import LineString,MultiLineString,Point,MultiPoint
from shapely.ops import linemerge
import pyproj
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
import xgboost
from tqdm import tqdm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix,accuracy_score
import pickle
import os
import argparse
np.random.seed(10)
from param import *
#get an ensemble of 5 classifiers from scikit-learn i.e randome_forest, extra_tree,svc,KNeighbours
#and xgboost classifier
#the parameters are tuned for this dataset, set class_weights to balanced as the start to end
#goals have different distribution
def get_ensemble_of_classifiers(vote=True):
clfs={}
clf1=ExtraTreesClassifier(100,class_weight='balanced',n_jobs=-1)
clfs['extra_tree']=clf1
clf2=RandomForestClassifier(50,class_weight='balanced',n_jobs=-1)
clfs['random_forest']=clf2
clf3=KNeighborsClassifier(20,weights='distance',n_jobs=-1)
clfs['knn']=clf3
clf4=xgboost.XGBClassifier(n_estimators=100,subsample=.7)
clfs['xgb']=clf4
if vote:
clf5=SVC(0.1)
cvote=VotingClassifier(estimators=[('et', clf1), ('rf', clf2), ('kn', clf3),('xgb',clf4),('svc',clf5)], voting='hard')
return {'cvote':cvote}
else:
clf5=SVC(0.1,class_weight='balanced',probability=True)
clfs['svc']=clf5
return clfs
# get the closest and farthest distance for a track to all the goals
def closest_farthest(track):
closest_to_track=[]
farthest_to_track=[]
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
cd=[]
for item in track:
point1=Point(item)
_,_,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
cd.append(distance)
closest_to_track.append(np.min(cd))
farthest_to_track.append(np.max(cd))
return closest_to_track,farthest_to_track
# get distance to a goal given a point on the track
def goal_dist(point1):
d={}
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
angle1,angle2,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
d[i]=distance
return d.values()
# gets distance features for training and testing
# the feature vector includes closest and nearest distances
# and distance to goal from the start or end points of track
def get_distances(df,goal,trim=None):
start,end=Point(df[['lon','lat']].values[0]),Point(df[['lon','lat']].values[-1])
duration=df.elapsedTime_sec.values[-1]
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
closest,farthest=closest_farthest(df[['lon','lat']].values)
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# similar to get_distance function above but additionally trims the start and end point randomly
def get_distances_multi(df,goal):
# how much to trim from start
trim_start=np.random.randint(TRIM_START,TRIM_END)
idx_s=np.where(df.elapsedTime_sec>trim_start)[0][0]
start=Point(df[['lon','lat']].values[idx_s])
# how much to trim from end
trim_end=np.random.randint(TRIM_START,TRIM_END)
idx_e=np.where(df.elapsedTime_sec>df.elapsedTime_sec.values[-1]-trim_end)[0][0]
end=Point(df[['lon','lat']].values[idx_e])
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
duration=df.elapsedTime_sec.values[idx_e]
closest,farthest=closest_farthest(df[['lon','lat']].values[idx_s:idx_e])
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# get the train feature vector. The feature vector are aggressively augmented
# i.e for each feature vector 20 tracks with random trims are created from start and end
# also include other feature such as age, gender,duration,velocity and total distance covered
def get_train_feat(datafiles):
print ('Multi trim featurees 20 samp in each')
xfeat={}
for f in tqdm(datafiles):
for i in range(0,20):
df = pd.read_csv(f)
if i==0:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances(df,goal,trim=None)
else:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances_multi(df,goal)
feat=[duration,total_distance_covered]
feat.extend(distance_to_goal_from_start)
feat.extend(distance_to_goal_from_end)
feat.extend(cd)
feat.extend(fd)
if df.tripID.values[0] not in xfeat.keys():
xfeat[df.tripID.values[0]]=[feat]
else:
xfeat[df.tripID.values[0]].append(feat)
train_info['gender']=pd.factorize(train_info['gender'])[0]
train_info['age']=train_info['age'].fillna(train_info['age'].mean())
features=[]
labels_start=[]
labels_end=[]
for i,k in enumerate(train_info.tripID.values):
for item in xfeat[k]:
feat=train_info.loc[k][['age','gender']].values.tolist()
duration=item[0]
velocity=item[1]/duration
feat.extend([duration,velocity])
feat.extend(item)
features.append(feat)
labels_start.append(train_info.iloc[i]['startLocID'])
labels_end.append(train_info.iloc[i]['destLocID'])
features=np.asarray(features).astype('float32')
labels_start=np.asarray(labels_start).astype('int')
labels_end=np.asarray(labels_end).astype('int')
if SHUFFLE:
idx=range(0,len(features))
| np.random.shuffle(idx) | numpy.random.shuffle |
import os
import glob
import wget
import time
import subprocess
import shlex
import sys
import warnings
import random
from Bio.SeqUtils import seq1
from Bio.PDB.PDBParser import PDBParser
from Bio import AlignIO
from sklearn.base import TransformerMixin
from sklearn.preprocessing import StandardScaler, Normalizer , MinMaxScaler , RobustScaler
from sklearn.decomposition import PCA
sys.path.append('./ProFET/ProFET/feat_extract/')
import FeatureGen
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import h5py
#PCA and scaler
class NDSRobust(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = RobustScaler(copy=True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = np.array(X)
# Save the original shape to reshape the flattened X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._flatten(X)
self._scaler.fit(X, **kwargs)
return self
def transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.transform(X, **kwargs)
X = self._reshape(X)
return X
def inverse_transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.inverse_transform(X, **kwargs)
X = self._reshape(X)
return X
def _flatten(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = np.prod(self._orig_shape)
X = X.reshape(-1, n_dims)
return X
def _reshape(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.reshape(-1, *self._orig_shape)
return X
#ndimensional PCA for arrays
class NDSPCA(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = PCA(copy = True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = np.array(X)
# Save the original shape to reshape the flattened X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._flatten(X)
self._scaler.fit(X, **kwargs)
self.explained_variance_ratio_ = self._scaler.explained_variance_ratio_
self.components_ =self._scaler.components_
return self
def transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.transform(X, **kwargs)
return X
def inverse_transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.inverse_transform(X, **kwargs)
X = self._reshape(X)
return X
def _flatten(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = np.prod(self._orig_shape)
X = X.reshape(-1, n_dims)
return X
def _reshape(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.reshape(-1, *self._orig_shape)
return X
#fit the components of the output space
#stacked distmats (on the 1st axis)
def fit_y( y , components = 300 , FFT = True ):
if FFT == True:
#got through a stack of structural distmats. these should be 0 padded to all fit in an array
y = np.stack([ np.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])] )
print(y.shape)
y = np.hstack( [ np.real(y) , np.imag(y)] )
print(y.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(y)
print('explained variance')
print(np.sum(ndpca.explained_variance_ratio_))
y = ndpca.transform(y)
scaler0 = RobustScaler( )
scaler0.fit(y)
return scaler0, ndpca
def transform_y(y, scaler0, ndpca, FFT = False):
if FFT == True:
y = np.stack([np.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])])
print(y.shape)
y = np.hstack( [ np.real(y) , np.imag(y)] )
y = ndpca.transform(y)
print(y.shape)
y = scaler0.transform(y)
return y
def inverse_transform_y(y, scaler0, ndpca, FFT=False):
y = scaler0.inverse_transform(y)
y = ndpca.inverse_transform(y)
if FFT == True:
split = int(y.shape[1]/2)
y = np.stack([ np.fft.irfft2(y[i,:split,:] + 1j*y[i,split:,:]) for i in range(y.shape[0]) ] )
return y
#fit the components of the in space
#stacked align voxels (on the 1st axis)
def fit_x(x, components = 300, FFT = True):
if FFT == True:
#got through a stack of align voxels. these should be 0 padded to all fit in an array
x = np.stack([ np.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
print(x.shape)
x = np.hstack( [ np.real(x) , np.imag(x)] )
print(x.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(x)
print('explained variance')
print(np.sum(ndpca.explained_variance_ratio_))
x = ndpca.transform(x)
scaler0 = RobustScaler( )
scaler0.fit(x)
return scaler0, ndpca
def transform_x(x, scaler0, ndpca, FFT = False):
if FFT == True:
x = np.stack([ np.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
print(x.shape)
x = np.hstack( [ np.real(x) , np.imag(x)] )
x = ndpca.transform(x)
print(x.shape)
x = scaler0.transform(x)
return x
#todo -- check the split is happening in the right dimension
def inverse_transform_x(x, scaler0, ndpca, FFT=False):
x = scaler0.inverse_transform(x)
x = ndpca.inverse_transform(x)
if FFT == True:
split = int(x.shape[1]/2)
x = np.stack([ np.fft.irfftn(x[i,:split,:,:] + 1j*x[i,split:,:,:]) for i in range(x.shape[0]) ] )
return x
#get align files
def runclustalo( infile , runIdentifier, path = 'clustalo' , outdir='./', args = '' , verbose = False):
if verbose == True:
print( infile , runIdentifier , path , outdir )
#i usually use filenames that reflect what the pipeline has done until that step
outfile= outdir+runIdentifier+infile+".aln.fasta"
#here we write the command as a string using all the args
args = path + ' -i '+ infile +' -o '+ outfile + ' ' +args
args = shlex.split(args)
if verbose == True:
print(args)
p = subprocess.Popen(args )
#return the opened process and the file it's creating
#we can also use the communicate function later to grad stdout if we need to
return p , outfile
#TODO - add sequence to align
def alnFileToArray(filename, returnMsa = False):
alnfile = filename
msa = AlignIO.read(alnfile , format = 'fasta')
align_array = np.array([ list(rec.upper()) for rec in msa], np.character)
if returnMsa:
return align_array, msa
return align_array
def alnArrayLineToSequence(align_array, index):
seq = ''
for aa in align_array[index]:
seq += aa.decode('utf-8')
return seq
#generate align list
def generateAlignList(directory = 'alns', returnMsa = False):
aligns = list()
msas = list()
#read through align files to get align arrays list
for file in os.listdir(directory):
if file.endswith('.fasta'):
aligns.append(alnFileToArray(directory+'/'+file, returnMsa)[0])
if returnMsa:
msas.append(alnFileToArray(directory+'/'+file, returnMsa)[1])
if returnMsa:
return aligns, msas
return aligns
#find biggest align shape (for padding) - aligns is a list of arrays
def biggestAlignShape(aligns):
longestProts = 0
mostProts = 0
for aln in aligns:
if aln.shape[0] > mostProts:
mostProts = aln.shape[0]
if aln.shape[1] > longestProts:
longestProts = aln.shape[1]
return mostProts, longestProts
def rundssp( infile , runIdentifier, path = 'dssp' , outdir='./', args = '' , verbose = False):
if verbose == True:
print( infile , runIdentifier , path , outdir )
#i usually use filenames that reflect what the pipeline has done until that step
outfile= outdir+runIdentifier+infile+".dssp"
#here we write the command as a string using all the args
args = path + ' -i '+ infile +' -o '+ outfile + ' ' +args
args = shlex.split(args)
if verbose == True:
print(args)
p = subprocess.Popen(args)
#return the opened process and the file it's creating
#we can also use the communicate function later to grad stdout if we need to
return p , outfile
def dssp2pandas(dsspstr):
#read the dssp file format into a pandas dataframe
start = False
lines = {}
count = 0
for l in dsspstr.split('\n'):
if '#' in l:
start = True
if start == True:
if count > 0:
lines[count] = dict(zip(header,l.split()))
else:
header = l.split()
count +=1
df = pd.DataFrame.from_dict( lines , orient = 'index')
return df
#structs is a dictionary of all the structures (which are then subdivided into chains)
def parsePDB(structs):
parser = PDBParser()
converter = {'ALA': 'A', 'ASX': 'B', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P',
'GLN': 'Q', 'ARG': 'R', 'SER': 'S', 'THR': 'T', 'SEC': 'U', 'VAL': 'V', 'TRP': 'W',
'XAA': 'X', 'TYR': 'Y', 'GLX': 'Z'}
structseqs={}
with open( 'structs.fast' , 'w') as fastout:
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
for model in Structure:
for chain in model:
res = chain.get_residues()
seq = ''.join([ converter[r.get_resname()] for r in res if r.get_resname() in converter ] )
fastout.write('>' + s + '|'+ chain.id +'\\n')
fastout.write(str( seq ) +'\\n' )
structseqs[ s + '|'+ chain.id ] = seq
return structseqs
def generateProtFeatDict(sequence):
features = FeatureGen.Get_Protein_Feat(sequence)
return features
#generate complete set of dictionary keys generated by protFET
def protFeatKeys(align_array):
dictKeys = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#sequence = str(msa[i].seq)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
dictKeys = dictKeys.union(set(generateProtFeatDict(sequence).keys()) - dictKeys)
return dictKeys
#generate ProtFET array for given align (maxKeys: all keys of the feature dictionary, over the entire set)
def alignToProtFeat(align_array, dictKeys):
#generate 2d array of ProtFET features for each sequence in align
align_features = np.zeros((align_array.shape[0], len(dictKeys)), dtype=float)
missingFeatures = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
featuresDict = generateProtFeatDict(sequence)
missingFeatures = dictKeys - set(featuresDict.keys())
for newKey in missingFeatures:
featuresDict[newKey] = float(0)
features = np.array(list(featuresDict.values()))
align_features[i,:] = features
return align_features
#generate array of ProtFeat features for all aligns
def protFeatArrays(aligns):
maxKeys = set()
mostProts = biggestAlignShape(aligns)[0]
#build set of all keys used in the set
for i in range(len(aligns)):
maxKeys = maxKeys.union(protFeatKeys(aligns[i]) - maxKeys)
setFeatures = np.zeros((len(aligns), mostProts, len(maxKeys)))
for i in range(len(aligns)):
np.append(setFeatures, alignToProtFeat(aligns[i], maxKeys))
return setFeatures
def generateGapMatrix(align_array):
gap_array = np.array([[1 if (align_array[i][j] == b'.' or align_array[i][j] == b'-') else 0 for j in range(align_array.shape[1])] for i in range(align_array.shape[0])])
return gap_array
def generateAlignVoxel(align_array, propAmount = 12):
align_prop_array = | np.zeros((align_array.shape[0], align_array.shape[1], propAmount + 1), dtype=float) | numpy.zeros |
"""Implementations of the IPFP algorithm to solve for equilibrium and do comparative statics
in several variants of the `Choo and Siow 2006 <https://www.jstor.org/stable/10.1086/498585?seq=1>`_ model:
* homoskedastic with singles (as in Choo and Siow 2006)
* homoskedastic without singles
* gender-heteroskedastic: with a scale parameter on the error term for women
* gender- and type-heteroskedastic: with a scale parameter on the error term for each gender and type
* two-level nested logit, with nests and nest parameters that do not depend on the type, and {0} as the first nest
Each solver, when fed the joint surplus and margins, returns the equilibrium matching patterns, the adding-up errors on the margins,
and if requested (using `gr=True`) the derivatives of the matching patterns in all primitives.
"""
import numpy as np
from math import sqrt
from typing import Union, Tuple, List
import scipy.linalg as spla
from utils import print_stars, bs_error_abort, npexp, npmaxabs, \
nppow, der_nppow, nprepeat_col, nprepeat_row, describe_array, test_vector
TripleArrays = Tuple[np.ndarray, np.ndarray, np.ndarray]
IPFPnoGradientResults = Tuple[TripleArrays, np.ndarray, np.ndarray, np.ndarray]
IPFPGradientResults = Tuple[TripleArrays,
np.ndarray, np.ndarray, np.ndarray, TripleArrays]
IPFPResults = Union[IPFPnoGradientResults, IPFPGradientResults]
def _ipfp_check_sizes(men_margins: np.ndarray, women_margins: np.ndarray,
Phi: np.ndarray) -> Tuple[int]:
"""checks that the margins and surplus have the correct shapes and sizes """
X = test_vector(men_margins)
Y = test_vector(women_margins)
if Phi.shape != (X, Y):
bs_error_abort(f"The shape of Phi should be ({X}, {Y}")
return X, Y
def ipfp_homoskedastic_nosingles_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) \
-> IPFPnoGradientResults:
"""Solves for equilibrium in a Choo and Siow market without singles, given systematic surplus and margins
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of :math:`(\\mu_{xy})` wrt `Phi`
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
muxy: the matching patterns, shape (X, Y)
marg_err_x, marg_err_y: the errors on the margins
and the gradients of :math:`(\\mu_{xy})` wrt `Phi` if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
n_couples = np.sum(men_margins)
# check that there are as many men as women
if np.abs(np.sum(women_margins) - n_couples) > n_couples * tol:
bs_error_abort("There should be as many men as women")
ephi2, der_ephi2 = npexp(Phi / 2.0, deriv=True)
ephi2T = ephi2.T
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# starting with a reasonable initial point for tx and ty: : tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
bigc = sqrt(n_couples / np.sum(ephi2))
txi = np.full(X, bigc)
tyi = np.full(Y, bigc)
err_diff = bigc
tol_diff = tol * err_diff
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = men_margins / sx
sy = ephi2T @ tx
ty = women_margins / sy
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi, tyi = tx, ty
niter += 1
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = np.sum(muxy, 1) - men_margins
marg_err_y = np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return muxy, marg_err_x, marg_err_y
else:
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = X + Y
n_prod_categories = X * Y
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(sxi)
lhs[:X, X:] = ephi2 * txi.reshape((-1, 1))
lhs[X:, X:] = np.diag(syi)
lhs[X:, :X] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 /= (2.0 * ephi2) # 1/2 with safeguards
ivar = 0
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += Y
ivar1 = X
ivar2 = 0
for iwoman in range(Y):
rhs[ivar1, ivar2:n_cols_rhs:Y] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:X, :]
dT = dt_dT[X:, :]
# now construct the derivatives of muxy
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(X):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + Y),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += Y
for iwoman in range(Y):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:Y,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy += np.diag(muxy_vec2)
return muxy, marg_err_x, marg_err_y, dmuxy
def ipfp_homoskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a Choo and Siow market with singles, given systematic surplus and margins
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi) if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
ephi2, der_ephi2 = npexp(Phi / 2.0, deriv=True)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# where mux0=tx**2 and mu0y=ty**2
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
ephi2T = ephi2.T
nindivs = np.sum(men_margins) + np.sum(women_margins)
bigc = sqrt(nindivs / (X + Y + 2.0 * np.sum(ephi2)))
txi = np.full(X, bigc)
tyi = np.full(Y, bigc)
err_diff = bigc
tol_diff = tol * bigc
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = (np.sqrt(sx * sx + 4.0 * men_margins) - sx) / 2.0
sy = ephi2T @ tx
ty = (np.sqrt(sy * sy + 4.0 * women_margins) - sy) / 2.0
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi = tx
tyi = ty
niter += 1
mux0 = txi * txi
mu0y = tyi * tyi
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = mux0 + np.sum(muxy, 1) - men_margins
marg_err_y = mu0y + np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return (muxy, mux0, mu0y), marg_err_x, marg_err_y
else: # we compute the derivatives
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = X + Y
n_prod_categories = X * Y
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(2.0 * txi + sxi)
lhs[:X, X:] = ephi2 * txi.reshape((-1, 1))
lhs[X:, X:] = np.diag(2.0 * tyi + syi)
lhs[X:, :X] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_sum_categories + n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt men_margins
rhs[:X, :X] = np.eye(X)
# to compute derivatives of (txi, tyi) wrt women_margins
rhs[X:n_sum_categories,
X:n_sum_categories] = np.eye(Y)
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 /= (2.0 * ephi2) # 1/2 with safeguards
ivar = n_sum_categories
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += Y
ivar1 = X
ivar2 = n_sum_categories
for iwoman in range(Y):
rhs[ivar1, ivar2:n_cols_rhs:Y] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:X, :]
dT = dt_dT[X:, :]
# now construct the derivatives of the mus
dmux0 = 2.0 * (dt * txi.reshape((-1, 1)))
dmu0y = 2.0 * (dT * tyi.reshape((-1, 1)))
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(X):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + Y),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += Y
for iwoman in range(Y):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:Y,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy[:, n_sum_categories:] += np.diag(muxy_vec2)
return (muxy, mux0, mu0y), marg_err_x, marg_err_y, (dmuxy, dmux0, dmu0y)
def ipfp_gender_heteroskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tau: float, tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a in a gender-heteroskedastic Choo and Siow market
given systematic surplus and margins and a scale parameter `tau`
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tau: the standard error for all women
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi, tau) if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
if tau <= 0:
bs_error_abort(f"We need a positive tau, not {tau}")
#############################################################################
# we use ipfp_heteroxy_solver with sigma_x = 1 and tau_y = tau
#############################################################################
sigma_x = np.ones(X)
tau_y = np.full(Y, tau)
if gr:
mus, marg_err_x, marg_err_y, dmus_hxy = \
ipfp_heteroskedastic_solver(Phi, men_margins, women_margins,
sigma_x, tau_y, tol=tol, gr=True,
maxiter=maxiter, verbose=verbose)
dmus_xy, dmus_x0, dmus_0y = dmus_hxy
n_sum_categories = X + Y
n_prod_categories = X * Y
n_cols = n_sum_categories + n_prod_categories
itau_y = n_cols + X
dmuxy = np.zeros((n_prod_categories, n_cols + 1))
dmuxy[:, :n_cols] = dmus_xy[:, :n_cols]
dmuxy[:, -1] = np.sum(dmus_xy[:, itau_y:], 1)
dmux0 = np.zeros((X, n_cols + 1))
dmux0[:, :n_cols] = dmus_x0[:, :n_cols]
dmux0[:, -1] = np.sum(dmus_x0[:, itau_y:], 1)
dmu0y = np.zeros((Y, n_cols + 1))
dmu0y[:, :n_cols] = dmus_0y[:, :n_cols]
dmu0y[:, -1] = np.sum(dmus_0y[:, itau_y:], 1)
return (muxy, mux0, mu0y), marg_err_x, marg_err_y, (dmuxy, dmux0, dmu0y)
else:
return ipfp_heteroskedastic_solver(Phi, men_margins, women_margins,
sigma_x, tau_y, tol=tol, gr=False,
maxiter=maxiter, verbose=verbose)
def ipfp_heteroskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
sigma_x: np.array, tau_y: np.array, tol: float = 1e-9, gr: bool = False,
verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a in a fully heteroskedastic Choo and Siow market
given systematic surplus and margins and standard errors `sigma_x` and `tau_y`
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
sigma_x: the vector of standard errors for the X types of men
sigma_x: the vector of standard errors for Y types of women
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi, sigma_x, tau_y)
if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
if np.min(sigma_x) <= 0.0:
bs_error_abort(f"All elements of sigma_x must be positive")
if np.min(tau_y) <= 0.0:
bs_error_abort(f"All elements of tau_y must be positive")
sumxy1 = 1.0 / np.add.outer(sigma_x, tau_y)
ephi2, der_ephi2 = npexp(Phi * sumxy1, deriv=True)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# with tx = mux0^(sigma_x/(sigma_x + tau_max))
# and ty = mu0y^(tau_y/(sigma_max + tau_y))
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
nindivs = np.sum(men_margins) + np.sum(women_margins)
bigc = nindivs / (X + Y + 2.0 * np.sum(ephi2))
# we find the largest values of sigma_x and tau_y
xmax = np.argmax(sigma_x)
sigma_max = sigma_x[xmax]
ymax = np.argmax(tau_y)
tau_max = tau_y[ymax]
# we use tx = mux0^(sigma_x/(sigma_x + tau_max))
# and ty = mu0y^(tau_y/(sigma_max + tau_y))
sig_taumax = sigma_x + tau_max
txi = np.power(bigc, sigma_x / sig_taumax)
sigmax_tau = tau_y + sigma_max
tyi = np.power(bigc, tau_y / sigmax_tau)
err_diff = bigc
tol_diff = tol * bigc
tol_newton = tol
niter = 0
while (err_diff > tol_diff) and (niter < maxiter): # IPFP main loop
# Newton iterates for men
err_newton = bigc
txin = txi.copy()
mu0y_in = np.power(np.power(tyi, sigmax_tau), 1.0 / tau_y)
while err_newton > tol_newton:
txit = np.power(txin, sig_taumax)
mux0_in = np.power(txit, 1.0 / sigma_x)
out_xy = np.outer(np.power(mux0_in, sigma_x),
np.power(mu0y_in, tau_y))
muxy_in = ephi2 * np.power(out_xy, sumxy1)
errxi = mux0_in + np.sum(muxy_in, 1) - men_margins
err_newton = npmaxabs(errxi)
txin -= errxi / (sig_taumax * (mux0_in / sigma_x
+ np.sum(sumxy1 * muxy_in, 1)) / txin)
tx = txin
# Newton iterates for women
err_newton = bigc
tyin = tyi.copy()
mux0_in = np.power(np.power(tx, sig_taumax), 1.0 / sigma_x)
while err_newton > tol_newton:
tyit = np.power(tyin, sigmax_tau)
mu0y_in = np.power(tyit, 1.0 / tau_y)
out_xy = np.outer(np.power(mux0_in, sigma_x),
np.power(mu0y_in, tau_y))
muxy_in = ephi2 * np.power(out_xy, sumxy1)
erryi = mu0y_in + np.sum(muxy_in, 0) - women_margins
err_newton = npmaxabs(erryi)
tyin -= erryi / (sigmax_tau * (mu0y_in / tau_y
+ np.sum(sumxy1 * muxy_in, 0)) / tyin)
ty = tyin
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi = tx
tyi = ty
niter += 1
mux0 = mux0_in
mu0y = mu0y_in
muxy = muxy_in
marg_err_x = mux0 + np.sum(muxy, 1) - men_margins
marg_err_y = mu0y + np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return (muxy, mux0, mu0y), marg_err_x, marg_err_y
else: # we compute the derivatives
n_sum_categories = X + Y
n_prod_categories = X * Y
# we work directly with (mux0, mu0y)
sigrat_xy = sumxy1 * sigma_x.reshape((-1, 1))
taurat_xy = 1.0 - sigrat_xy
mux0_mat = nprepeat_col(mux0, Y)
mu0y_mat = nprepeat_row(mu0y, X)
# muxy = axy * bxy * ephi2
axy = nppow(mux0_mat, sigrat_xy)
bxy = nppow(mu0y_mat, taurat_xy)
der_axy1, der_axy2 = der_nppow(mux0_mat, sigrat_xy)
der_bxy1, der_bxy2 = der_nppow(mu0y_mat, taurat_xy)
der_axy1_rat, der_axy2_rat = der_axy1 / axy, der_axy2 / axy
der_bxy1_rat, der_bxy2_rat = der_bxy1 / bxy, der_bxy2 / bxy
# start with the LHS of the linear system on (dmux0, dmu0y)
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(
1.0 + np.sum(muxy * der_axy1_rat, 1))
lhs[:X, X:] = muxy * der_bxy1_rat
lhs[X:, X:] = np.diag(
1.0 + np.sum(muxy * der_bxy1_rat, 0))
lhs[X:, :X] = (muxy * der_axy1_rat).T
# now fill the RHS (derivatives wrt men_margins, then men_margins,
# then Phi, then sigma_x and tau_y)
n_cols_rhs = n_sum_categories + n_prod_categories + X + Y
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (mux0, mu0y) wrt men_margins
rhs[:X, :X] = np.eye(X)
# to compute derivatives of (mux0, mu0y) wrt women_margins
rhs[X:,
X:n_sum_categories] = np.eye(Y)
# the next line is sumxy1 with safeguards
sumxy1_safe = sumxy1 * der_ephi2 / ephi2
big_a = muxy * sumxy1_safe
big_b = der_axy2_rat - der_bxy2_rat
b_mu_s = big_b * muxy * sumxy1
a_phi = Phi * big_a
big_c = sumxy1 * (a_phi - b_mu_s * tau_y)
big_d = sumxy1 * (a_phi + b_mu_s * sigma_x.reshape((-1, 1)))
# to compute derivatives of (mux0, mu0y) wrt Phi
ivar = n_sum_categories
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = -big_a[iman, :]
ivar += Y
ivar1 = X
ivar2 = n_sum_categories
iend_phi = n_sum_categories + n_prod_categories
for iwoman in range(Y):
rhs[ivar1, ivar2:iend_phi:Y] = -big_a[:, iwoman]
ivar1 += 1
ivar2 += 1
# to compute derivatives of (mux0, mu0y) wrt sigma_x
iend_sig = iend_phi + X
der_sigx = np.sum(big_c, 1)
rhs[:X, iend_phi:iend_sig] = np.diag(der_sigx)
rhs[X:, iend_phi:iend_sig] = big_c.T
# to compute derivatives of (mux0, mu0y) wrt tau_y
der_tauy = np.sum(big_d, 0)
rhs[X:, iend_sig:] = np.diag(der_tauy)
rhs[:X, iend_sig:] = big_d
# solve for the derivatives of mux0 and mu0y
dmu0 = spla.solve(lhs, rhs)
dmux0 = dmu0[:X, :]
dmu0y = dmu0[X:, :]
# now construct the derivatives of muxy
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
der1 = ephi2 * der_axy1 * bxy
ivar = 0
for iman in range(X):
dmuxy[ivar:(ivar + Y), :] \
= np.outer(der1[iman, :], dmux0[iman, :])
ivar += Y
der2 = ephi2 * der_bxy1 * axy
for iwoman in range(Y):
dmuxy[iwoman:n_prod_categories:Y, :] \
+= np.outer(der2[:, iwoman], dmu0y[iwoman, :])
# add the terms that comes from differentiating ephi2
# on the derivative wrt Phi
i = 0
j = n_sum_categories
for iman in range(X):
for iwoman in range(Y):
dmuxy[i, j] += big_a[iman, iwoman]
i += 1
j += 1
# on the derivative wrt sigma_x
ivar = 0
ix = iend_phi
for iman in range(X):
dmuxy[ivar:(ivar + Y), ix] -= big_c[iman, :]
ivar += Y
ix += 1
# on the derivative wrt tau_y
iy = iend_sig
for iwoman in range(Y):
dmuxy[iwoman:n_prod_categories:Y, iy] -= big_d[:, iwoman]
iy += 1
return (muxy, mux0, mu0y), marg_err_x, marg_err_y, (dmuxy, dmux0, dmu0y)
def _print_simulated_ipfp(muxy, marg_err_x, marg_err_y):
print(" simulated matching:")
print(muxy[:4, :4])
print(f"margin error on x: {npmaxabs(marg_err_x)}")
print(f" on y: {npmaxabs(marg_err_y)}")
if __name__ == "__main__":
do_test_gradient_gender_heteroskedastic = True
do_test_gradient_heteroskedastic = True
# we generate a Choo and Siow homoskedastic matching
X = Y = 20
n_sum_categories = X + Y
n_prod_categories = X * Y
mu, sigma = 0.0, 1.0
n_bases = 4
bases_surplus = np.zeros((X, Y, n_bases))
x_men = (np.arange(X) - X / 2.0) / X
y_women = (np.arange(Y) - Y / 2.0) / Y
bases_surplus[:, :, 0] = 1
for iy in range(Y):
bases_surplus[:, iy, 1] = x_men
for ix in range(X):
bases_surplus[ix, :, 2] = y_women
for ix in range(X):
for iy in range(Y):
bases_surplus[ix, iy, 3] = \
(x_men[ix] - y_women[iy]) * (x_men[ix] - y_women[iy])
men_margins = np.random.uniform(1.0, 10.0, size=X)
women_margins = np.random.uniform(1.0, 10.0, size=Y)
# np.random.normal(mu, sigma, size=n_bases)
true_surplus_params = np.array([3.0, -1.0, -1.0, -2.0])
true_surplus_matrix = bases_surplus @ true_surplus_params
print_stars("Testing ipfp homoskedastic:")
mus, marg_err_x, marg_err_y = \
ipfp_homoskedastic_solver(true_surplus_matrix, men_margins,
women_margins, tol=1e-12)
muxy, mux0, mu0y = mus
print(" checking matching:")
print(" true matching:")
print(muxy[:4, :4])
_print_simulated_ipfp(muxy, marg_err_x, marg_err_y)
# and we test ipfp_gender_heteroskedastic for tau = 1
tau = 1.0
print_stars("Testing ipfp gender-heteroskedastic for tau = 1:")
mus_tau, marg_err_x_tau, marg_err_y_tau = \
ipfp_gender_heteroskedastic_solver(true_surplus_matrix, men_margins,
women_margins, tau)
print(" checking matching:")
print(" true matching:")
print(muxy[:4, :4])
muxy_tau, _, _ = mus_tau
_print_simulated_ipfp(muxy_tau, marg_err_x_tau, marg_err_y_tau)
# and we test ipfp heteroxy for sigma = tau = 1
print_stars("Testing ipfp heteroskedastic for sigma_x and tau_y = 1:")
sigma_x = np.ones(X)
tau_y = np.ones(Y)
mus_hxy, marg_err_x_hxy, marg_err_y_hxy = \
ipfp_heteroskedastic_solver(true_surplus_matrix, men_margins, women_margins,
sigma_x, tau_y)
muxy_hxy, _, _ = mus_hxy
_print_simulated_ipfp(muxy_hxy, marg_err_x_hxy, marg_err_y_hxy)
# and we test ipfp homo w/o singles
print_stars("Testing ipfp homoskedastic w/o singles:")
# we need as many women as men
women_margins_nosingles = women_margins * \
( | np.sum(men_margins) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mie solution
Calculates the electric field component (Ex) for a plane wave that
is scattered by a dielectric sphere.
Some of this code is a partial translation of the Matlab code from
<NAME>
http://www.mathworks.de/matlabcentral/fileexchange/30162-cylinder-scattering
"""
from __future__ import division
from __future__ import print_function
import numpy as np
from ._Classes import *
from ._Functions import *
__all__ = ["GetFieldSphere", "io_GetCartesianField2D" ]
def GetFieldSphere(radius, nmed, nsphe, lD, size, res):
sphere = DielectricMaterial(nsphe**2,0.0)
background = DielectricMaterial(nmed**2,0.0)
reference = DielectricMaterial(nmed**2,0.0) #HHH reference is medium...
lambref = reference.getElectromagneticWavelength(1.0)
xmax = size / res / 2.0
# the detector resolution is not dependent on the medium
detector = np.linspace(-xmax, xmax, size, endpoint=True) * lambref
sensor_location = np.zeros((3,size))
sensor_location[2] = lD*lambref # optical path length to detector
sensor_location[1] = detector
sensor_location[0] = detector #HHH 3D experience
return getDielectricSphereFieldUnderPlaneWave(radius*lambref,
sphere, background, sensor_location)
def getDielectricSphereFieldUnderPlaneWave(radius, sphere, background,
sensor_location, frequency=1):
"""
Calculate the field scattered by a dielectric sphere centered at
the origine due to an incident x-polarized plane wave. The
scattered field is in (11-239) in [Balanis1989]. see the notes
on 2008-05-24 for the coefficients, a_n, b_n, and c_n.
See Fig. 11-25 in [Balanis1989] for the exact geometry.
Input:
radius scalar to denote the radius of the sphere (m)
sphere object of DielectricMaterial
background object of DielectricMaterial
sensor_location 3x1 vector in the form of [x; y; z] (m)
frequency Nx1 vector in (Hz)
Output:
E_r Nx1 vector (V/m)
E_phi Nx1 vector (V/m)
E_theta Nx1 vector (V/m)
H_r Nx1 vector (A/m)
H_phi Nx1 vector (A/m)
H_theta Nx1 vector (A/m)
This function is a translation to Python from a matlab script
by <NAME>.
"""
#p = inputParser
#p.addRequired[int('radius')-1,int(isnumeric)-1]
#p.addRequired[int('sphere')-1,int(isobject)-1]
#p.addRequired[int('background')-1,int(isobject)-1]
#p.addRequired[int('sensor_location')-1,int(isvector)-1]
#p.addRequired[int('frequency')-1,int(isnumeric)-1]
#p.addParamValue[int('debug')-1,-1,lambda x: x == 0. or x == 1.]
#p.parse[int(radius)-1,int(sphere)-1,int(background)-1,int(sensor_location)-1,int(frequency)-1,varargin.cell[:]]
#if p.Results.debug:
# np.disp((p.Results))
# Compute all intrinsic variables
EPS_O = 8.8541878176*1e-12; #HHH ....this was not here before. changes results BY A LOT hmm.........
MU_O = 4*np.pi*1e-7; #HHH as above
#omega = 2.*np.pi*frequency
eta = background.getIntrinsicImpedance(frequency)
k = background.getElectromagneticWaveNumber(frequency)
mu = background.getComplexPermeability(frequency) * MU_O #HHH as mentioned, this multiplication was not being done.....
eps = background.getComplexPermittivity(frequency) * EPS_O #HHH as above
eta_d = sphere.getIntrinsicImpedance(frequency)
k_d = sphere.getElectromagneticWaveNumber(frequency)
mu_d = sphere.getComplexPermeability(frequency) * MU_O #HHH as above above
eps_d = sphere.getComplexPermittivity(frequency) * EPS_O #HHH as above above above
N = getN_max(radius, sphere, background, frequency)
#N = getN_max((p.Results.radius), cellarray(np.hstack((p.Results.sphere))), (p.Results.background), (p.Results.frequency))
#N = matcompat.max(N)
nu = np.arange(N) + 1
[r, theta, phi] = cart2sph(sensor_location[0], sensor_location[1], sensor_location[2])
#r.resize(len(r),1) #HHH This is used to flatten to a column. Deprecated because I changed cart2sph to build the variables in column already.
#theta.resize(len(theta),1) #HHH as above
#phi.resize(len(phi),1) #HHH as above above
# Compute coefficients
a_n = 1j**(-nu) * (2*nu+1) / (nu*(nu+1))
#a_n = np.dot(np.ones(nFreq, 1.), a_n)
# temp2 denotes the expression
# kzlegendre(nu,1,cos(theta))/sin(theta). Here I am using a
# recursive relation to compute temp2, which avoids the numerical
# difficulty when theta == 0 or PI.
temp2 = np.zeros((len(theta),len(nu))) #HHH matlab original: temp2 = np.zeros((len(nu), len(theta))) ###### changed to akin to matlab
temp2[:,0] = -1 #HHH all in first column
temp2[:,1] = (-3*np.cos(theta)).T #HHH matlab original: temp2(2) = -3*cos(theta) ##### Transverse or it doens't work. You need to replace a column with a row, figure that.
# if N = 10, then nu = [1,2,3,4,5,6,7,8,9,19]
for n in np.arange(len(nu)-2)+1:
# matlab: [2,3,4,5,6,7,8,9]
# python: [1,2,3,4,5,6,7,8]
temp2[:,n+1] = (2*n+1)/n * np.cos(theta).T*temp2[:,n] - (n+1)/n * temp2[:,n-1] #HHH matlab original: temp2(n+1) = (2*n+1)/n*cos(theta)*temp2(n) - (n+1)/n*temp2(n-1) ####selecting whole columns, using transverses properly
# temp1 denotes the expression
# sin(theta)*kzlegendre_derivative(nu,1,cos(theta)). Here I am
# also using a recursive relation to compute temp1 from temp2,
# which avoids numerical difficulty when theta == 0 or PI.
temp1 = np.zeros((len(theta), len(nu))) #HHH changed to keep matlab's structure.
temp1[:,0] = np.cos(theta).T
for n in np.arange(len(nu)-1)+1:
# matlab: [2,3,4,5,6,7,8,9,10] (index starts at 1)
# python: [1,2,3,4,5,6,7,8,9] (index starts at 0)
temp1[:,n-1] = (n+1) * temp2[:,n-1]-n*np.cos(theta).T*temp2[:,n]
#temp1 = np.dot(np.ones(nFreq, 1.), temp1)
#temp2 = np.dot(np.ones(nFreq, 1.), temp2)
#iNU = 10
#if p.Results.debug:
# A = np.array(np.vstack((np.hstack((ric_besselh_derivative(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-np.sqrt(np.dot(eps, mu)), np.sqrt(np.dot(eps_d, mu_d))), ric_besselj_derivative(iNU, np.dot(k_d, radius))))), np.hstack((ric_besselh(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-mu, mu_d), ric_besselj(iNU, np.dot(k_d, radius))))))))
# rhs = np.dot(-a_n[int(iNU)-1], np.array(np.vstack((np.hstack((ric_besselj_derivative(iNU, np.dot(k, radius)))), np.hstack((ric_besselj(iNU, np.dot(k, radius))))))))
# x = linalg.solve(A, rhs)
# np.disp(np.array(np.hstack(('b_n ', num2str(x[0]), d_n, num2str(x[1])))))
# A = np.array(np.vstack((np.hstack((ric_besselh(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-np.sqrt(np.dot(eps, mu)), np.sqrt(np.dot(eps_d, mu_d))), ric_besselj(iNU, np.dot(k_d, radius))))), np.hstack((ric_besselh_derivative(iNU, 2., np.dot(k, radius)), np.dot(matdiv(-mu, mu_d), ric_besselj_derivative(iNU, np.dot(k_d, radius))))))))
# rhs = np.dot(-a_n[int(iNU)-1], np.array(np.vstack((np.hstack((ric_besselj(iNU, np.dot(k, radius)))), np.hstack((ric_besselj_derivative(iNU, np.dot(k, radius))))))))
# x = linalg.solve(A, rhs)
# np.disp(np.array(np.hstack(('c_n ', num2str(x[0]), e_n, num2str(x[1])))))
# np.disp('------')
#alpha = np.zeros((len(theta),len(nu))) #HHH In matlab, alpha is a row, with nu number values. since here r,theta,phi is a column, alpha has to be an array the size of (theta,nu), so it can include all the nus (in row) per value of r (in colum)
#print("alpha shape",np.shape(alpha))
#HHH initializing final result, and adding 0j so it's imaginary from the start
E_r = np.zeros(np.shape(r)) + 0j
E_theta = np.zeros(np.shape(r)) + 0j
E_phi = np.zeros(np.shape(r)) + 0j
H_r = np.zeros(np.shape(r)) + 0j
H_theta = np.zeros(np.shape(r)) + 0j
H_phi = np.zeros(np.shape(r)) + 0j
for elem in range(0,np.size(r)): #HHH gotta evaluate element by element in r (which is a column array)
if r[elem] < radius:
#num = j.*mu_d/sqrt(mu)*sqrt(eps_d);
num = 1j*mu_d/np.sqrt(mu)*np.sqrt(eps_d)
#print("num",num)
#den = - sqrt(mu. *eps_d) *ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselh_derivative(nu,2,k*radius))...
# + sqrt(mu_d.*eps) *ones(1,N). *transpose(ric_besselh(nu,2,k*radius)). *transpose(ric_besselj_derivative(nu,k_d*radius));
den = ( - (np.sqrt(mu * eps_d)*np.ones((1,N))) * np.transpose(ric_besselj(nu,k_d*radius)) * np.transpose(ric_besselh_derivative(nu,2,k*radius))
+ (np.sqrt(mu_d * eps )*np.ones((1,N))) * np.transpose(ric_besselh(nu,2,k*radius)) * np.transpose(ric_besselj_derivative(nu,k_d*radius)) )
#print("den",den)
#d_n = num*ones(1,N)./den.*a_n;
d_n = num*np.ones((1, N))/den*a_n
#den = + sqrt(mu.*eps_d) *ones(1,N). *transpose(ric_besselh(nu,2,k*radius)). *transpose(ric_besselj_derivative(nu,k_d*radius))...
# - sqrt(mu_d.*eps) *ones(1,N). *transpose(ric_besselj(nu,k_d*radius)). *transpose(ric_besselh_derivative(nu,2,k*radius));
den = ( + (np.sqrt(mu * eps_d)*np.ones((1,N))) * np.transpose(ric_besselh(nu,2,k*radius)) * np.transpose(ric_besselj_derivative(nu,k_d*radius))
- (np.sqrt(mu_d * eps )*np.ones((1,N))) * np.transpose(ric_besselj(nu,k_d*radius)) * np.transpose(ric_besselh_derivative(nu,2,k*radius)) )
#e_n = num*ones(1,N)./den.*a_n;
e_n = num*np.ones((1, N))/den*a_n
x = k_d * r[elem] #HHH x of the current r[elem]
x=x[0] #HHH x should be integer... or problems
## Implement (11-239a) in [Balanis1989]
#alpha = (transpose(ric_besselj_derivative(nu,x,2))+transpose(ric_besselj(nu,x)))...
# .*transpose(kzlegendre(nu,1,cos(theta))*ones(1,nFreq));
alpha = ( (np.transpose(ric_besselh_derivative(nu, 2, x, 2)) + np.transpose(ric_besselh(nu, 2, x))) *
np.transpose(kzlegendre(nu, 1, np.cos(theta[elem]))) ) #HHH obviously, specific theta[elem] is used for alpha
# E_r = -j*cos(phi)*sum(d_n.*alpha, 2);
E_r[elem] = (-1j*np.cos(phi[elem]) * np.sum(d_n*alpha, 1))[0] #HHH use specific row of phi to get a single number
print("elem:",elem,"/", | np.size(r) | numpy.size |
#!/usr/bin/env python
__all__ = ['sun', 'earth', 'moon', 'jupiter', 'true_anomaly', 'eci2perif',
'elem2rv', 'rv2elem', 'tle2elem', 'calc_atmospheric_density', 'T',
'timedelta']
import os
import pathlib
import datetime
from datetime import timedelta
from numpy import sin, cos, arctan2, arccos, deg2rad, rad2deg, pi
from numpy import log, exp, sqrt, array, transpose, cross, dot
from numpy.linalg import norm
from skyfield.api import load
kernel_name = 'de421.bsp'
p = pathlib.Path(__file__).parent.absolute()
kernel = os.path.join(*p.parts, 'data', kernel_name)
jpl = load(kernel)
T = load.timescale(builtin=True)
# https://nssdc.gsfc.nasa.gov/planetary/planetfact.html
sun = {
'name': 'Sun',
'mass': 1988500e24,
'radius': 695700.0,
'mu': 132712e6,
'eph': jpl['sun']
}
earth = {
'name': 'Earth',
'mass': 5.972e24,
'radius': 6378.0,
'mu': 0.39860e6,
'j2': -1.082635854e-3,
'atm': {
'rot_vector': array([0.0, 0.0, 72.9211e-6]),
'table': array([[63.096, 2.059e-4],
[251.189, 5.909e-11],
[1000.0, 3.561e-15]])
},
'eph': jpl['earth']
}
moon = {
'name': 'Moon',
'mass': 0.07346e24,
'radius': 1738.1,
'mu': 0.00490e6,
'eph': jpl['moon']
}
jupiter = {
'name': 'Jupiter',
'mass': 1898.19e24,
'radius': 69911,
'mu': 126.687e6,
'eph': jpl['jupiter barycenter']
}
def find_rho_z(z, center):
if not 1.0 < z < 1000.0:
return [[0.0, 0.0], [0.0, 0.0]]
zs = center['atm']['table'][:, 0]
rhos = center['atm']['table'][:, 1] * 1e8
for n in range(len(rhos) - 1):
if zs[n] < z < zs[n + 1]:
return [[rhos[n], rhos[n + 1]], [zs[n], zs[n + 1]]]
return [[0.0, 0.0], [0.0, 0.0]]
def calc_atmospheric_density(z, center):
rhos, zs = find_rho_z(z, center)
if rhos[0] == 0:
return 0
Hi = -(zs[1] - zs[0]) / log(rhos[1] / rhos[0])
return rhos[0] * exp(-(z - zs[0]) / Hi)
def ecc_anomaly(e, M, eps=1e-8, max_iter=100):
u1 = M
for _ in range(max_iter):
u2 = u1 - ((u1 - e * sin(u1) - M) / (1 - e * cos(u1)))
if abs(u2 - u1) < eps:
break
u1 = u2
else:
return None
return u2
def true_anomaly(e, E):
return 2 * arctan2(sqrt(1 + e) * sin(E / 2), sqrt(1 + e) * cos(E / 2))
def eci2perif(lan, aop, i):
u = [
-sin(lan) * cos(i) * sin(aop) + cos(lan) * cos(aop),
cos(lan) * cos(i) * sin(aop) + sin(lan) * cos(aop),
sin(i) * sin(aop)
]
v = [
-sin(lan) * cos(i) * cos(aop) - cos(lan) * sin(aop),
cos(lan) * cos(i) * cos(aop) - sin(lan) * sin(aop),
sin(i) * cos(aop)
]
w = [
sin(lan) * sin(i),
-cos(lan) * sin(i),
cos(i)
]
return array([u, v, w])
def elem2rv(elements, mu=earth['mu']):
a, e = elements['a'], elements['e']
i = deg2rad(elements['i'])
aop = deg2rad(elements['argument_of_periapsis'])
lan = deg2rad(elements['longitude_of_ascending_node'])
E = ecc_anomaly(e=e, M=elements['mean_anomaly'])
ta = true_anomaly(e=e, E=E)
r_norm = a * (1 - e * cos(E))
r_perif = r_norm * array([cos(ta), sin(ta), 0])
v_perif = sqrt(mu * a) / r_norm
v_perif *= array([-sin(E), cos(E) * sqrt(1 - e ** 2), 0])
perif2eci = transpose(eci2perif(lan, aop, i))
position = dot(perif2eci, r_perif).tolist()
velocity = dot(perif2eci, v_perif).tolist()
period = 2 * pi * sqrt(a ** 3 / mu)
return position, velocity, period
def rv2elem(r, v, mu=earth['mu'], return_dict=False):
r_norm = norm(r)
# specific angular momentum
h = cross(r, v)
h_norm = norm(h)
# inclination
i = | arccos(h[2] / h_norm) | numpy.arccos |
#coding=utf-8
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import Image, NavSatFix
from map_generator.msg import tjy
from nav_msgs.msg import Path
import numpy as np
import time
from googleplaces import GooglePlaces
import googlemaps
import time
import sys
import math
from math import cos,sin,tan,sqrt
from visualization_msgs.msg import Marker
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
C_EARTH = 6378137.0
class GoogleMaps(object):
def __init__(self):
self._GOOGLE_MAPS_KEY = "<KEY>"
self._Google_Places = GooglePlaces(self._GOOGLE_MAPS_KEY)
self._Google_Geocod = googlemaps.Client(key=self._GOOGLE_MAPS_KEY)
def _nearby_search(self, lng, lat, language, radius, result=None):
if result is None:
nearby_query_result = self._Google_Places.nearby_search(language=language,
lat_lng={'lat': lat, 'lng': lng}, radius=radius)
else:
if result.has_next_page_token:
#print(result.next_page_token)
nearby_query_result = self._Google_Places.nearby_search(
pagetoken=result.next_page_token, lat_lng={'lat': lat, 'lng': lng}, radius=radius)
else:
nearby_query_result = None
return nearby_query_result
def get_all_data(self, lng, lat, language='en', radius=100):
count = 0
list_return_info = []
list_nearby_search_result = self._nearby_search(lng, lat, language, radius)
while(list_nearby_search_result is not None):
for place in list_nearby_search_result.places:
# Returned places from a query are place summaries.
print(place.name)
print(place.geo_location['lng'])
print(place.geo_location['lat'])
print(count)
count = count+1
list_return_info.append({"name":place.name, "lng":place.geo_location['lng'], "lat":place.geo_location['lat']})
#print place.place_id
# The following method has to make a further API call.
#place.get_details()
# Referencing any of the attributes below, prior to making a call to
# get_details() will raise a googleplaces.GooglePlacesAttributeError.
#print place.details # A dict matching the JSON response from Google.
#print place.local_phone_number
#print place.international_phone_number
#print place.website
#print place.url
# Are there any additional pages of results?
list_nearby_search_result = self._nearby_search(lng, lat, language, radius, list_nearby_search_result)
return list_return_info
class Transform(object):
def __init__(self):
self.R = None
self.t = None
def centroid_point(self, samples):
means = np.mean(samples, axis=0)
return means
def transform_lamda(self, A, B):
A_norm = np.sum(A*A,axis=1)
B_norm = np.sum(B*B,axis=1)
#lam=np.sqrt(A_norm)/np.sqrt(B_norm)
lam = A_norm/B_norm
lam=np.mean(lam)
return lam
def transform_3D_RT(self, A, B):
assert A.shape == B.shape
# A is original, B is target
centroidA = self.centroid_point(A)
centroidB = self.centroid_point(B)
H = np.dot((A - centroidA).T , (B - centroidB))
A_move=A - centroidA
B_move=B - centroidB
lam = self.transform_lamda(A_move, B_move)
U,S,V = np.linalg.svd(H)
R = np.dot(V,U.T)
if np.linalg.det(R) < 0:
#print('Reflection detected')
V[:,2] = -1*V[:,2]
R = np.dot(V,U.T)
t = - np.dot((R/sqrt(lam)),centroidA.T) + centroidB.T
R = R/sqrt(lam)
self.R= R
self.t = t.reshape((3,1))
return R, t
def transform(self, A, R = None, t = None):
if R is None:
R = self.R
t = self.t
B = np.dot(R, A.T) + t
return B
class NearbySearch(object):
def __init__(self):
self._sub = rospy.Subscriber('/trajectory',tjy, self.callback, queue_size=100)
#self._pub = rospy.Publisher('/nearby_gps', NavSatFix, queue_size = 100)
#self._pub1 = rospy.Publisher('/car_gps', NavSatFix, queue_size = 100)
self._pub = rospy.Publisher('/location', Marker, queue_size=1000)
self.google_maps = GoogleMaps()
self.count = 0
self.gps_result = []
self.new_gps = []
#self.xyz_temp = NavSatFix()
self._timenum = 0
self.init_lat =0.0
self.init_lng = 0.0
self.init_x = 0.0
self.init_y = 0.0
self.init_z = 0.0
self.init = True
self.init_pose_x = 0.0
self.init_pose_y = 0.0
self.init_pose_z = 0.0
self.number_limit = 20
self.sample_num = 30
self.xyz_samples = []
self.pose_samples = []
self.ave_xyz_samples = []
self.ave_pose_samples = []
self.transform = Transform()
self.display_freq = 10
self.marker_scale = 0.2
self.marker_lifetime = 8 # 0 is forever
self.marker_id = 0
self.marker_ns = 'building'+str(self.marker_id)
self.marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
#self.marker_init()
def add_point(self, samples, point):
if len(samples)== self.number_limit:
samples.remove(samples[0])
samples.append(point)
return samples
def ave_append(self, ave, sample):
if len(sample) == self.number_limit:
sam = np.mean(np.array(sample).reshape((-1,3)), axis=0)
ave.append([sam[0], sam[1], sam[2]])
return ave
def add_samples(self, samples, ave_samples):
if not len(ave_samples) == 0:
new = ave_samples + samples
else:
new = samples
print("current groundtrue sample: ", len(new))
return | np.array(new) | numpy.array |
"""
Helper functions which obtain forces and energies
corresponding to atoms in structures. These functions automatically
cast atoms into their respective atomic environments.
"""
import numpy as np
from flare.gp import GaussianProcess
from flare.struc import Structure
from copy import deepcopy
from flare.predict import (
predict_on_structure_par,
predict_on_atom,
predict_on_atom_en,
predict_on_structure_par_en,
)
from .fake_gp import generate_hm, get_tstp, get_random_structure
from flare.predict import (
predict_on_structure,
predict_on_structure_par,
predict_on_structure_efs,
predict_on_structure_efs_par,
)
import pytest
import time
def fake_predict(_, __):
return np.random.uniform(-1, 1), np.random.uniform(-1, 1)
def fake_predict_local_energy(_):
return np.random.uniform(-1, 1)
@pytest.fixture(scope="class")
def two_plus_three_gp() -> GaussianProcess:
"""Returns a GP instance with a 2+3-body kernel."""
cutoffs = {"twobody": 0.8, "threebody": 0.8}
hyps = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
gp_model = GaussianProcess(
kernels=["twobody", "threebody"],
hyps=hyps,
cutoffs=cutoffs,
multihyps=False,
parallel=False,
n_cpus=1,
)
test_structure, forces = get_random_structure(np.eye(3), [1, 2], 3)
energy = 3.14
gp_model.update_db(test_structure, forces, energy=energy)
yield gp_model
del gp_model
_fake_gp = GaussianProcess(
kernel_name="2+3", cutoffs=[5.0, 5.0], hyps=[1.0, 1.0, 1.0, 1.0, 1.0]
)
_fake_structure = Structure(
cell=np.eye(3), species=[1, 1, 1], positions=np.random.uniform(0, 1, size=(3, 3))
)
_fake_gp.predict = fake_predict
_fake_gp.predict_local_energy = fake_predict_local_energy
assert isinstance(_fake_gp.predict(1, 1), tuple)
assert isinstance(_fake_gp.predict_local_energy(1), float)
@pytest.mark.parametrize("n_cpu", [None, 1, 2])
def test_predict_on_structure_par(n_cpu):
# Predict only on the first atom, and make rest NAN
selective_atoms = [0]
skipped_atom_value = np.nan
forces, stds = predict_on_structure_par(
_fake_structure,
_fake_gp,
n_cpus=n_cpu,
write_to_structure=False,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value,
)
for x in forces[0][:]:
assert isinstance(x, float)
for x in forces[1:]:
assert np.isnan(x).all()
# Predict only on the second and third, and make rest 0
selective_atoms = [1, 2]
skipped_atom_value = 0
forces, stds = predict_on_structure_par(
_fake_structure,
_fake_gp,
write_to_structure=False,
n_cpus=n_cpu,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value,
)
for x in forces[1]:
assert isinstance(x, float)
for x in forces[2]:
assert isinstance(x, float)
assert np.equal(forces[0], 0).all()
# Make selective atoms be all and ensure results are normal
selective_atoms = [0, 1, 2]
forces, stds = predict_on_structure_par(
_fake_structure,
_fake_gp,
write_to_structure=True,
n_cpus=n_cpu,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value,
)
for x in forces.flatten():
assert isinstance(x, float)
for x in stds.flatten():
assert isinstance(x, float)
assert np.array_equal(_fake_structure.forces, forces)
assert np.array_equal(_fake_structure.stds, stds)
# Make selective atoms be nothing and ensure results are normal
forces, stds = predict_on_structure_par(
_fake_structure,
_fake_gp,
write_to_structure=True,
n_cpus=n_cpu,
selective_atoms=None,
skipped_atom_value=skipped_atom_value,
)
for x in forces.flatten():
assert isinstance(x, float)
for x in stds.flatten():
assert isinstance(x, float)
assert np.array_equal(_fake_structure.forces, forces)
assert np.array_equal(_fake_structure.stds, stds)
# Get new examples to also test the results not being written
selective_atoms = [0, 1]
selective_atoms = [0, 1]
forces, stds = predict_on_structure_par(
_fake_structure,
_fake_gp,
write_to_structure=True,
n_cpus=n_cpu,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value,
)
for x in forces.flatten():
assert isinstance(x, float)
for x in stds.flatten():
assert isinstance(x, float)
assert | np.array_equal(_fake_structure.forces[:2][:], forces[:2][:]) | numpy.array_equal |
# -*- coding: utf-8 -*-
"""Quadrature implementations"""
from __future__ import division
import numpy as np
from typing import Tuple
from .typing import ArrayLike
def clenshaw_curtis(n: int, a: float = -1.0, b: float = 1.0) -> Tuple[ArrayLike,ArrayLike]:
"""
Computes the points and weights for a Clenshaw-Curtis integration
from a to b. In other words, for the approximation to the integral
\int_a^b f(x) dx \approx \sum_{i=0}^{n} w_i f(x_i)
with the Clenshaw-Curtis quadrature, this function returns the
positions x_i and the weights w_i.
"""
assert b > a and n > 1
npoints = n
nsegments = n - 1
theta = np.pi * np.flip(np.arange(npoints)) / nsegments
xx = np.cos(theta) * 0.5 * (b - a) + 0.5 * (a + b)
wcc0 = 1.0/(nsegments*nsegments - 1 + (nsegments%2))
# build v vector
v = np.zeros(nsegments)
v[:nsegments//2] = 2.0/(1.0 - 4.0 * np.arange(nsegments//2)**2)
v[nsegments//2] = (nsegments - 3) / (2 * (nsegments//2) - 1) - 1
kk = np.arange(1, npoints//2)
v[nsegments-kk] = np.conj(v[kk])
# build g vector
g = np.zeros(nsegments)
g[:nsegments//2] = -wcc0
g[nsegments//2] = wcc0 * ( (2 - (nsegments%2)) * nsegments - 1 )
g[nsegments-kk] = np.conj(g[kk])
h = v + g
wcc = np.fft.ifft(h)
# sanity check
imag_norm = np.linalg.norm(np.imag(wcc))
assert imag_norm < 1e-14
out = np.zeros(npoints)
out[:nsegments] = np.real(wcc)
out[nsegments] = out[0]
out = np.flip(out) # might be redundant, but for good measure
out *= 0.5 * (b - a)
return xx, out
def midpoint(n: int, a: float = -1.0, b: float = 1.0) -> Tuple[ArrayLike,ArrayLike]:
"""
Returns the points and weights for a midpoint integration
from a to b. In other words, for the approximation to the integral
\int_a^b f(x) dx \approx \frac{b-a}{n} \sum_{i=0}^n f((x_0 + x_1)/2)
"""
assert b > a and n > 1
weights = np.ones(n) * (b - a) / n
points = a + ((b - a) / n * (np.arange(n) + 0.5))
return points, weights
def trapezoid(n: int, a: float = -1.0, b: float = 1.0) -> Tuple[ArrayLike,ArrayLike]:
"""
Returns the points and weights for a trapezoid integration
from a to b. In other words, for the approximation to the integral
\int_a^b f(x) dx \approx \frac{b-a}{n} \sum_{i=0}^n f((x_0 + x_1)/2)
"""
assert b > a and n > 1
ninterval = n - 1
weights = | np.ones(n) | numpy.ones |
#! /usr/bin/env python
"""
A set of functions for calculating flux weights given an array of energy and
cos(zenith) values based on the Honda atmospheric flux tables. A lot of this
functionality will be copied from honda.py but since I don't want to initialise
this as a stage it makes sense to copy it in to here so somebody can't
accidentally do the wrong thing with that script.
"""
import numpy as np
import scipy.interpolate as interpolate
from pisa.utils.log import logging
from pisa.utils.resources import open_resource
__all__ = [
"load_2d_honda_table",
"load_2d_bartol_table",
"load_2d_table",
"calculate_2d_flux_weights",
"load_3d_honda_table",
"load_3d_table",
"calculate_3d_flux_weights",
]
__author__ = "<NAME>"
__license__ = """Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
PRIMARIES = ["numu", "numubar", "nue", "nuebar"]
T_MODE_PRIMARIES = ["numu", "numubar", "nue", "nuebar", "nutau", "nutaubar"]
TEXPRIMARIES = [r"$\nu_{\mu}$", r"$\bar{\nu}_{\mu}$", r"$\nu_{e}$", r"$\bar{\nu}_{e}$"]
def load_2d_honda_table(flux_file, enpow=1, return_table=False, hg_taumode=False):
"""
Added "hg_taumode" to load in hillas gaisser h3a tables made with tau neutrino contributions.
"""
logging.debug("Loading atmospheric flux table %s", flux_file)
# columns in Honda files are in the same order
cols = ["energy"]
cols += T_MODE_PRIMARIES if hg_taumode else PRIMARIES
# Load the data table
table = np.genfromtxt(open_resource(flux_file), usecols=list(range(len(cols))))
if hg_taumode:
mask = np.array([all(~np.isnan(table)[i]) for i in range(len(table))])
table = table[mask].T
else:
mask = np.all(np.isnan(table) | np.equal(table, 0), axis=1)
table = table[~mask].T
flux_dict = dict(zip(cols, table))
for key in flux_dict.keys():
# There are 20 lines per zenith range
flux_dict[key] = np.array(np.split(flux_dict[key], 100 if hg_taumode else 20))
# Set the zenith and energy range as they are in the tables
# The energy may change, but the zenith should always be
# 20 bins, full sky.
flux_dict["energy"] = flux_dict["energy"][0]
if hg_taumode:
_edges = np.linspace(-1.0, 1.0, 101)
flux_dict["coszen"] = 0.5 * (_edges[:-1] + _edges[1:])
else:
flux_dict["coszen"] = np.linspace(-0.95, 0.95, 20)
# Now get a spline representation of the flux table.
logging.debug("Make spline representation of flux")
logging.debug("Doing this integral-preserving.")
spline_dict = {}
# Do integral-preserving method as in IceCube's NuFlux
# This one will be based purely on SciPy rather than ROOT
# Stored splines will be 1D in integrated flux over energy
int_flux_dict = {}
# Energy and CosZenith bins needed for integral-preserving
# method must be the edges of those of the normal tables
if hg_taumode:
int_flux_dict["logenergy"] = np.linspace(1.0, 6.0, 101)
int_flux_dict["coszen"] = np.linspace(-1, 1, 101)
else:
int_flux_dict["logenergy"] = np.linspace(-1.025, 4.025, 102)
int_flux_dict["coszen"] = np.linspace(-1, 1, 21)
for nutype in T_MODE_PRIMARIES if hg_taumode else PRIMARIES:
# spline_dict now wants to be a set of splines for
# every table cosZenith value.
splines = {}
cz_iter = 1
for energyfluxlist in flux_dict[nutype]:
int_flux = []
tot_flux = 0.0
int_flux.append(tot_flux)
for energyfluxval, energyval in zip(energyfluxlist, flux_dict["energy"]):
# Spline works best if you integrate flux * energy
tot_flux += energyfluxval * np.power(energyval, enpow) * 0.05
int_flux.append(tot_flux)
spline = interpolate.splrep(int_flux_dict["logenergy"], int_flux, s=0)
cz_value = "%.2f" % (1.05 - cz_iter * 0.1)
splines[cz_value] = spline
cz_iter += 1
spline_dict[nutype] = splines
for prim in T_MODE_PRIMARIES if hg_taumode else PRIMARIES:
flux_dict[prim] = flux_dict[prim][::-1]
if return_table:
return spline_dict, flux_dict
return spline_dict
def load_2d_bartol_table(flux_file, enpow=1, return_table=False):
logging.debug("Loading atmospheric flux table %s", flux_file)
# Bartol tables have been modified to look like Honda tables
cols = ["energy"] + PRIMARIES
# Load the data table
table = np.genfromtxt(open_resource(flux_file), usecols=list(range(len(cols))))
mask = np.all(np.isnan(table) | np.equal(table, 0), axis=1)
table = table[~mask].T
flux_dict = dict(zip(cols, table))
for key in flux_dict.keys():
# There are 20 lines per zenith range
flux_dict[key] = np.array(np.split(flux_dict[key], 20))
# Set the zenith and energy range as they are in the tables
# The energy may change, but the zenith should always be
# 20 bins, full sky.
flux_dict["energy"] = flux_dict["energy"][0]
flux_dict["coszen"] = np.linspace(-0.95, 0.95, 20)
# Now get a spline representation of the flux table.
logging.debug("Make spline representation of flux")
logging.debug("Doing this integral-preserving.")
spline_dict = {}
# Do integral-preserving method as in IceCube's NuFlux
# This one will be based purely on SciPy rather than ROOT
# Stored splines will be 1D in integrated flux over energy
int_flux_dict = {}
# Energy and CosZenith bins needed for integral-preserving
# method must be the edges of those of the normal tables
low_log_energy = np.linspace(-1, 1, 41)
high_log_energy = np.linspace(1.1, 4, 30)
int_flux_dict["logenergy"] = np.concatenate([low_log_energy, high_log_energy])
int_flux_dict["coszen"] = np.linspace(-1, 1, 21)
for nutype in PRIMARIES:
# spline_dict now wants to be a set of splines for
# every table cosZenith value.
splines = {}
cz_iter = 1
for energyfluxlist in flux_dict[nutype]:
int_flux = []
tot_flux = 0.0
int_flux.append(tot_flux)
for energyfluxval, energyval in zip(energyfluxlist, flux_dict["energy"]):
# Spline works best if you integrate flux * energy
if energyval < 10.0:
tot_flux += energyfluxval * np.power(energyval, enpow) * 0.05
else:
tot_flux += energyfluxval * np.power(energyval, enpow) * 0.1
int_flux.append(tot_flux)
spline = interpolate.splrep(int_flux_dict["logenergy"], int_flux, s=0)
cz_value = "%.2f" % (1.05 - cz_iter * 0.1)
splines[cz_value] = spline
cz_iter += 1
spline_dict[nutype] = splines
for prim in PRIMARIES:
flux_dict[prim] = flux_dict[prim][::-1]
if return_table:
return spline_dict, flux_dict
return spline_dict
def load_2d_table(flux_file, enpow=1, return_table=False):
"""Manipulate 2 dimensional flux tables.
2D is expected to mean energy and cosZenith, where azimuth is averaged
over (before being stored in the table) and the zenith range should
include both hemispheres.
Parameters
----------
flux_file : string
The location of the flux file you want to spline. Should be a honda
azimuth-averaged file.
enpow : integer
The power to which the energy will be raised in the construction of the
splines. If you don't know what this means, leave it as 1.
return_table : boolean
Flag to true if you want the function to also return a dictionary
of the underlying values from the tables. Useful for comparisons.
"""
if not isinstance(enpow, int):
raise TypeError("Energy power must be an integer")
if not isinstance(return_table, bool):
raise TypeError("return_table must be a boolean")
if not isinstance(flux_file, str):
raise TypeError("Flux file name must be a string")
if "aa" not in flux_file:
raise ValueError("Azimuth-averaged tables are expected")
if ("honda" not in flux_file) and ("hillas" not in flux_file):
if "bartol" in flux_file:
if return_table:
spline_dict, flux_dict = load_2d_bartol_table(
flux_file, enpow=enpow, return_table=True
)
else:
spline_dict = load_2d_bartol_table(flux_file, enpow=enpow)
spline_dict["name"] = "bartol"
else:
raise ValueError(
"Flux file must be from the Honda, Hillas, or " "Bartol groups"
)
else:
if return_table:
spline_dict, flux_dict = load_2d_honda_table(
flux_file,
enpow=enpow,
return_table=True,
hg_taumode="hillas" in flux_file,
)
else:
spline_dict = load_2d_honda_table(
flux_file, enpow=enpow, hg_taumode="hillas" in flux_file
)
spline_dict["name"] = "hillas" if "hillas" in flux_file else "honda"
if return_table:
return spline_dict, flux_dict
return spline_dict
def calculate_2d_flux_weights(
true_energies, true_coszens, en_splines, enpow=1, out=None
):
"""Calculate flux weights for given array of energy and cos(zenith).
Arrays of true energy and zenith are expected to be for MC events, so
they are tested to be of the same length.
`en_splines` should be the spline for the primary of interest. The entire
dictionary is calculated in the previous function.
Parameters
----------
true_energies : list or numpy array
A list of the true energies of your MC events. Pass this in GeV!
true_coszens : list or numpy array
A list of the true coszens of your MC events
en_splines : list of splines
A list of the initialised energy splines from the previous function
for your desired primary.
enpow : integer
The power to which the energy was raised in the construction of the
splines. If you don't know what this means, leave it as 1.
out : np.array
optional array to store results
Example
-------
Use the previous function to calculate the spline dict for the South Pole.
spline_dict = load_2d_table('flux/honda-2015-spl-solmax-aa.d')
Then you must have some equal length arrays of energy and zenith.
ens = [3.0, 4.0, 5.0]
czs = [-0.4, 0.7, 0.3]
These are used in this function, along with whatever primary you are
interested in calculating the flux weights for.
flux_weights = calculate_2d_flux_weights(ens, czs, spline_dict['numu'])
Done!
"""
if not isinstance(true_energies, np.ndarray):
if not isinstance(true_energies, list):
raise TypeError("true_energies must be a list or numpy array")
else:
true_energies = np.array(true_energies)
if not isinstance(true_coszens, np.ndarray):
if not isinstance(true_coszens, list):
raise TypeError("true_coszens must be a list or numpy array")
else:
true_coszens = np.array(true_coszens)
if not ((true_coszens >= -1.0).all() and (true_coszens <= 1.0).all()):
raise ValueError("Not all coszens found between -1 and 1")
if not len(true_energies) == len(true_coszens):
raise ValueError("length of energy and coszen arrays must match")
if not isinstance(enpow, int):
raise TypeError("Energy power must be an integer")
num_cz_points = 20
czkeys = ["%.2f" % x for x in np.linspace(-0.95, 0.95, num_cz_points)]
cz_spline_points = np.linspace(-1, 1, num_cz_points + 1)
if out is None:
out = np.empty_like(true_energies)
spline_vals = np.zeros(num_cz_points + 1)
for i in range(len(true_energies)):
true_log_energy = np.log10(true_energies[i])
for j in range(num_cz_points):
spline_vals[j + 1] = interpolate.splev(
true_log_energy, en_splines[czkeys[j]], der=1
)
int_spline_vals = np.cumsum(spline_vals) * 0.1
spline = interpolate.splrep(cz_spline_points, int_spline_vals, s=0)
out[i] = interpolate.splev(true_coszens[i], spline, der=1) / np.power(
true_energies[i], enpow
)
return out
def load_3d_honda_table(flux_file, enpow=1, return_table=False):
logging.debug("Loading atmospheric flux table %s", flux_file)
# columns in Honda files are in the same order
cols = ["energy"] + PRIMARIES
# Load the data table
table = np.genfromtxt(open_resource(flux_file), usecols=list(range(len(cols))))
mask = np.all(np.isnan(table) | np.equal(table, 0), axis=1)
table = table[~mask].T
flux_dict = dict(zip(cols, table))
for key in flux_dict.keys():
# There are 20 lines per zenith range
coszenith_lists = np.array(np.split(flux_dict[key], 20))
azimuth_lists = []
for coszenith_list in coszenith_lists:
azimuth_lists.append(np.array(np.split(coszenith_list, 12)).T)
flux_dict[key] = np.array(azimuth_lists)
if not key == "energy":
flux_dict[key] = flux_dict[key].T
# Set the zenith and energy range as they are in the tables
# The energy may change, but the zenith should always be
# 20 bins and the azimuth should always be 12 bins, full sky
flux_dict["energy"] = flux_dict["energy"][0].T[0]
flux_dict["coszen"] = | np.linspace(0.95, -0.95, 20) | numpy.linspace |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import sys
import math
from data_loader import DataLoader
import pickle
import os
import importlib
def main(args):
# Build the inference graph
network = importlib.import_module('models.inception_resnet_v1', 'inference')
phase_train_bool = False
with tf.Session() as sess:
np.random.seed(seed=args.seed)
data_loader = DataLoader(args.data_dir, import_embedding=False)
print('Number of images: %d' % len(data_loader.dataset))
images_placeholder = tf.placeholder(np.float32,
shape=(None, args.image_size, args.image_size, 3),
name='input')
# keep probability is 1.0
# because we are not doing bayesian epistemic uncertainty on the embedding space
_, endpoints = network.inference(images_placeholder, keep_probability=1.0, bottleneck_layer_size=512,
phase_train=phase_train_bool)
embeddings = endpoints[f'{args.embedding_name}_flatten']
embedding_size = embeddings.get_shape()[1]
# Load the model
print('Loading pre-trained model...')
data_loader.load_model(args.model)
tf.summary.FileWriter(os.path.join(args.model, 'graph'), tf.get_default_graph())
# Run forward pass to calculate embeddings
print('Calculating features for images...')
nrof_images = data_loader.get_nrof_samples()
nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / args.batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches_per_epoch):
start_index = i * args.batch_size
end_index = min((i + 1) * args.batch_size, nrof_images)
images, _, _ = data_loader.load_data(indices= | np.arange(start_index, end_index) | numpy.arange |
import numpy as np
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers.pooling import _GlobalPooling1D
from keras.layers import Conv1D, Input, LocallyConnected1D
from keras.layers.core import Dropout
from concise.utils.plot import seqlogo, seqlogo_fig
import matplotlib.pyplot as plt
from keras.engine import InputSpec
from concise.utils.pwm import DEFAULT_BASE_BACKGROUND, pssm_array2pwm_array, _pwm2pwm_info
from keras import activations
from keras import constraints
from concise import initializers
from concise import regularizers
from concise.regularizers import GAMRegularizer, SplineSmoother
from concise.utils.splines import BSpline
from concise.utils.helper import get_from_module
from concise.utils.plot import heatmap
from concise.preprocessing.sequence import (DNA, RNA, AMINO_ACIDS,
CODONS, STOP_CODONS)
from concise.preprocessing.structure import RNAplfold_PROFILES
# --------------------------------------------
# Input()
def InputDNA(seq_length, name=None, **kwargs):
"""Input placeholder for array returned by `encodeDNA` or `encodeRNA`
Wrapper for: `keras.layers.Input((seq_length, 4), name=name, **kwargs)`
"""
return Input((seq_length, 4), name=name, **kwargs)
InputRNA = InputDNA
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs):
"""Input placeholder for array returned by `encodeCodon`
Note: The seq_length is divided by 3
Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)`
"""
if ignore_stop_codons:
vocab = CODONS
else:
vocab = CODONS + STOP_CODONS
assert seq_length % 3 == 0
return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
def InputAA(seq_length, name=None, **kwargs):
"""Input placeholder for array returned by `encodeAA`
Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)`
"""
return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
def InputRNAStructure(seq_length, name=None, **kwargs):
"""Input placeholder for array returned by `encodeRNAStructure`
Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)`
"""
return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
# deprecated
def InputSplines(seq_length, n_bases=10, name=None, **kwargs):
"""Input placeholder for array returned by `encodeSplines`
Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs)
def InputSplines1D(seq_length, n_bases=10, name=None, **kwargs):
"""Input placeholder for array returned by `encodeSplines`
Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs)
# TODO - deprecate
def InputDNAQuantity(seq_length, n_features=1, name=None, **kwargs):
"""Convenience wrapper around `keras.layers.Input`:
`Input((seq_length, n_features), name=name, **kwargs)`
"""
return Input((seq_length, n_features), name=name, **kwargs)
# TODO - deprecate
def InputDNAQuantitySplines(seq_length, n_bases=10, name="DNASmoothPosition", **kwargs):
"""Convenience wrapper around keras.layers.Input:
`Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs)
# --------------------------------------------
class GlobalSumPooling1D(_GlobalPooling1D):
"""Global average pooling operation for temporal data.
# Note
- Input shape: 3D tensor with shape: `(batch_size, steps, features)`.
- Output shape: 2D tensor with shape: `(batch_size, channels)`
"""
def call(self, inputs):
return K.sum(inputs, axis=1)
class ConvSequence(Conv1D):
"""Convenience wrapper over `keras.layers.Conv1D` with 3 changes:
- additional plotting method: `plot_weights(index=None, plot_type="motif_raw", figsize=None, ncol=1)`
- **index**: can be a particular index or a list of indicies
- **plot_type**: Can be one of `"heatmap"`, `"motif_raw"`, `"motif_pwm"` or `"motif_pwm_info"`.
- **figsize**: tuple, Figure size
- **ncol**: Number of axis columns
- additional argument `seq_length` instead of `input_shape`
- restriction in build method: `input_shape[-1]` needs to the match the vocabulary size
Clasess `Conv*` all inherit from `ConvSequence` and define the corresponding vocabulary:
- ConvDNA
- ConvRNA
- ConvRNAStructure
- ConvAA
- ConvCodon
"""
VOCAB = DNA
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
seq_length=None,
**kwargs):
# override input shape
if seq_length:
kwargs["input_shape"] = (seq_length, len(self.VOCAB))
kwargs.pop("batch_input_shape", None)
super(ConvSequence, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.seq_length = seq_length
def build(self, input_shape):
if input_shape[-1] is not len(self.VOCAB):
raise ValueError("{cls} requires input_shape[-1] == {n}. Given: {s}".
format(cls=self.__class__.__name__, n=len(self.VOCAB), s=input_shape[-1]))
return super(ConvSequence, self).build(input_shape)
def get_config(self):
config = super(ConvSequence, self).get_config()
config["seq_length"] = self.seq_length
return config
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs):
"""Plot weights as a heatmap
index = can be a particular index or a list of indicies
**kwargs - additional arguments to concise.utils.plot.heatmap
"""
W = self.get_weights()[0]
if index is None:
index = | np.arange(W.shape[2]) | numpy.arange |
from sklearn.cluster import KMeans
import numpy as np
import math
import matplotlib.pyplot as plt
import socialGroup
class Simulation:
def __init__(self, numSteps, numRuns, agents, socialCtx, outputF):
self.numSteps = numSteps
self.numRuns = numRuns
self.agents = agents
self.socialCtx = socialCtx
self.thresholdCluster = 0.3
self.thresholdNormative = 0.2
self.thresholdMinimalSalience = 0.2
self.outputF = outputF
#For Plots
plt.rcParams['font.size'] = 12
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Bahnschrift']
plt.rcParams["font.weight"] = "normal"
self.plotSymbols = ["o", "v", "H", "*", "X", "d", "P", "s", "D", "p"]
self.colors = ["orange", "deepskyblue"]
self.colorsAgents = ["yellow", "aqua", "green", "pink", "brown", "darkorange", "purple", "gray", "olive", "magenta"]
self.plotLine = ['x','+']
self.contextWeights = []
for c in self.socialCtx.theme:
self.contextWeights.append(self.socialCtx.theme[c])
self.cycle()
def plotStyle(self, plot):
plot.grid(color='gray', linewidth=1.0)
plot.spines['top'].set_visible(False)
plot.spines['right'].set_visible(False)
plot.spines['bottom'].set_visible(False)
plot.spines['left'].set_visible(False)
def distanceToCentroids(self, first, second):
distance = []
lenFirst = len(first)
for i in range(0, lenFirst):
auxDist = self.distanceWeights(first[i], second[i])
distance.append(auxDist)
return distance
def distanceWeights(self, first, second):
lenWeights = len(self.contextWeights)
#when theme is empty
if lenWeights == 0:
lenWeights = len(first)
lDist = abs(np.array(first)- | np.array(second) | numpy.array |
import unittest
import numpy as np
import tensorflow as tf
import torch
import fastestimator as fe
import fastestimator.test.unittest_util as fet
class TestToType(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_np = {
"x": np.ones((10, 15), dtype="float32"),
"y": [np.ones((4), dtype="int8"), np.ones((5, 3), dtype="double")],
"z": {
"key": np.ones((2, 2), dtype="int64")
}
}
cls.data_tf = {
"x": tf.ones((10, 15), dtype="float32"),
"y": [tf.ones((4), dtype="int8"), tf.ones((5, 3), dtype="double")],
"z": {
"key": tf.ones((2, 2), dtype="int64")
}
}
cls.data_torch = {
"x": torch.ones((10, 15), dtype=torch.float32),
"y": [torch.ones((4), dtype=torch.int8), torch.ones((5, 3), dtype=torch.double)],
"z": {
"key": torch.ones((2, 2), dtype=torch.long)
}
}
cls.op_np = {
'x': np.dtype('float32'), 'y': [np.dtype('int8'), | np.dtype('float64') | numpy.dtype |
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from timeit import default_timer as timer
import cuml.tsa.arima as arima
from scipy.optimize.optimize import _approx_fprime_helper
from cuml.utils.input_utils import input_to_host_array
# test data time
t = np.array([1, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21,
24, 25, 26, 28, 39, 40, 41, 42, 43, 45, 46, 48, 50, 51, 52, 53,
55, 56, 58, 59, 60, 63, 71, 72, 74])
# test dataset 0
data0 = np.array([16454, 12708, 14084, 20929, 11888, 13378, 20503, 17422,
16574, 16567, 14222, 14471, 11988, 17122, 15448, 14290,
13679, 10690, 17240, 17900, 16673, 1070, 16165, 15832, 18495,
15160, 15638, 21688, 18284, 2306, 10159, 8224, 7517, 14363,
11185, 15804, 2816, 12217, 7739, 5459, 6241, 171, 11118])
# test dataset 1
data1 = np.array([16492, 12671, 13644, 18120, 11420, 10904, 20723, 17011,
15109, 15791, 13014, 14622, 12029, 15932, 14731, 13573,
13229, 11371, 16400, 16939, 16672, 2520, 14627, 14035, 14724,
15094, 12812, 20126, 16411, 2687, 9582, 8291, 7352, 14313,
10552, 14143, 2790, 12960, 7362, 4606, 6155, 158, 11435])
# The ARIMA model of dataset 0. ("smoothed dataset 0")
data_smooth = np.array([16236.380267964598, 14198.707110817017,
13994.129600585984, 15705.975404284243,
14455.226246272636, 14114.076675764649,
15033.216755054425, 15021.10438408751,
14954.822759706418, 14904.042532492134,
14557.421649530697, 14347.41471896904,
13877.476483976807, 14059.990544916833,
13888.386639087348, 13665.988312305493,
13436.674608089721, 12979.25813798955,
13199.416272194492, 13243.849692596767,
13157.053784142185, 11904.470827085499,
12356.442250181439, 12279.590418507576,
12401.153685335092, 12190.66504090282,
12122.442825730872, 12444.119210649873,
12326.524612239178, 11276.55939500802,
11278.522346300862, 10911.26233776968,
10575.493222628831, 10692.727355175008,
10395.405550019213, 10480.90443078538,
9652.114779061498, 9806.45087894164, 9401.00281392505,
9019.688213508754, 8766.056499652503,
8158.794074075997, 8294.86605488629])
def create_yp_ref():
""" creates reference prediction """
(_, y) = get_data()
model = arima.ARIMAModel((1, 1, 1), np.array([-217.7230173548441,
-206.81064091237104]),
[np.array([0.0309380078339684]),
np.array([-0.0371740508810001])],
[np.array([-0.9995474311219695]),
np.array([-0.9995645146854383])], y)
y_p_ref = model.predict_in_sample()
print("yp_ref=", y_p_ref)
# reference prediction for (1,1,1). recreate with `create_yp_ref()`
yp_ref = [[16236.276982645155, 14199.83635121614, 13993.612504802639,
15701.001917841138, 14457.318782427961, 14114.436684625534,
15028.287621746756, 15018.826402730409, 14953.184455915669,
14902.339385888643, 14557.310116753155, 14346.872075971714,
13878.49920540047, 14056.977409351373, 13886.543137497267,
13664.846049477095, 13435.78531068983, 12980.316970030086,
13195.421878944875, 13239.340147071023, 13153.408671153384,
11912.022478836143, 12352.451020219527, 12275.76344198953,
12395.309983436986, 12187.223001418526, 12118.535320809358,
12435.729542924131, 12320.661934977046, 11282.214722260982,
11278.023270572445, 10911.916572651637, 10576.37138790725,
10688.356982664653, 10393.213992661886, 10475.175832966357,
9655.699137880823, 9802.85623495, 9400.49054615417,
9020.574181472959, 8766.084012642543, 8162.945169968312,
8291.973806637427],
[16285.189359087628, 14338.460909054174,
13850.63823251114, 14556.56635360983, 13954.695497411303,
13244.780548562172, 13744.642846463914, 14080.121846941318,
14051.566389907626, 13986.915237521414, 13786.62136453952,
13582.380651361393, 13344.11985120289, 13170.332862411682,
13105.025676475907, 12962.955049014487, 12776.960524427446,
12553.108594193804, 12354.900642927994, 12350.399980965518,
12352.327486277976, 12215.518342586416, 11656.131573206087,
11563.059813979233, 11449.754138979828, 11362.05755263616,
11286.236966021392, 11116.378254211602, 11223.015986560224,
11233.463302287848, 10709.250034043267, 10466.998468513524,
10192.800693817426, 9840.980314287335, 9786.651333552647,
9559.92129655608, 9584.118472336395, 9080.57441537021,
9030.024898020312, 8807.168013053131, 8470.279842824808,
8280.44295003853, 7648.106311322318]]
def get_data():
"""Convenience function to get reference data"""
d = np.zeros((len(t), 2))
d[:, 0] = data0
d[:, 1] = data1
return (t, d)
def test_transform():
"""Test the parameter transformation code."""
x0 = np.array([-36.24493319, -0.76159416, -0.76159516, -167.65533746,
-0.76159416, -0.76159616])
# Without corrections to the MA parameters, this inverse transform will
# return NaN
Tx0 = arima._batch_invtrans(0, 1, 2, 2, x0)
assert(not np.isnan(Tx0).any())
Tx0 = arima._batch_invtrans(2, 1, 0, 2, x0)
assert(not np.isnan(Tx0).any())
Tx0 = arima._batch_invtrans(1, 1, 1, 2, np.array([-1.27047619e+02,
1.90024682e-02,
-5.88867176e-01,
-1.20404762e+02,
5.12333137e-05,
-6.14485076e-01]))
np.testing.assert_allclose(Tx0, np.array([-1.27047619e+02,
3.80095119e-02,
-1.35186024e+00,
-1.20404762e+02,
1.02466627e-04,
-1.43219144e+00]))
def test_log_likelihood():
"""
Test loglikelihood against reference results using reference parameters
"""
x0 = [[-220.35376518754148,
-0.2617000627224417,
-2.1893003751753457],
[-2.3921544864718811e+02, -1.3525124433776395e-01,
-7.5978156540072991e-02,
-2.4055488944465053e+00]]
ref_ll = [-415.7117855771454, -415.32341960785186]
_, y = get_data()
for p in range(1, 3):
order = (p, 1, 1)
y0 = np.zeros((len(t), 1), order='F')
y0[:, 0] = y[:, 0]
ll = arima.ll_f(1, len(t), order, y0, np.copy(x0[p-1]), trans=True)
np.testing.assert_almost_equal(ll, ref_ll[p-1])
x = [-1.2704761899e+02, 3.8009501900e-02, -1.3518602400e+00,
-1.2040476199e+02, 1.0245662700e-04, -1.4321914400e+00]
ll = arima.ll_f(2, len(t), (1, 1, 1), y, np.array(x))
np.set_printoptions(precision=14)
ll_ref = np.array([-418.2732740315433, -413.7692130741877])
np.testing.assert_allclose(ll, ll_ref)
def test_gradient_ref():
"""Tests the gradient based on a reference output"""
x = np.array([-1.2704761899e+02, 3.8009511900e-02, -1.3518602400e+00,
-1.2040476199e+02, 1.0246662700e-04, -1.4321914400e+00])
_, y = get_data()
np.set_printoptions(precision=14)
g = arima.ll_gf(2, len(t), 3, (1, 1, 1), y, x)
g_ref = np.array([-7.16227077646181e-04, -4.09565927839139e+00,
-4.10715017551411e+00, -1.02602371043758e-03,
-4.46265460141149e+00,
-4.18378931499319e+00])
np.testing.assert_allclose(g, g_ref, rtol=1e-6)
def test_gradient():
"""test gradient implementation using FD"""
num_samples = 100
xs = np.linspace(0, 1, num_samples)
np.random.seed(12)
noise = np.random.normal(scale=0.1, size=num_samples)
ys = noise + 0.5*xs
for num_batches in range(1, 5):
ys_df = np.reshape(np.tile(np.reshape(ys,
(num_samples, 1)),
num_batches),
(num_batches, num_samples), order="C").T
order = (1, 1, 1)
mu = 0.0
arparams = np.array([-0.01])
maparams = np.array([-1.0])
x = np.r_[mu, arparams, maparams]
x = np.tile(x, num_batches)
num_samples = ys_df.shape[0]
num_batches = ys_df.shape[1]
p, d, q = order
num_parameters = d + p + q
g = arima.ll_gf(num_batches, num_samples,
num_parameters, order, ys_df, x)
grad_fd = np.zeros(len(x))
h = 1e-8
for i in range(len(x)):
def fx(xp):
return arima.ll_f(num_batches, num_samples, order,
ys_df, xp).sum()
xph = np.copy(x)
xmh = np.copy(x)
xph[i] += h
xmh[i] -= h
f_ph = fx(xph)
f_mh = fx(xmh)
grad_fd[i] = (f_ph-f_mh)/(2*h)
np.testing.assert_allclose(g, grad_fd, rtol=1e-4)
def f(xk):
return arima.ll_f(num_batches, num_samples, order,
ys_df, xk).sum()
# from scipy
g_sp = _approx_fprime_helper(x, f, h)
np.testing.assert_allclose(g, g_sp, rtol=1e-4)
def test_bic():
"""Test "Bayesian Information Criterion" metric. BIC penalizes the
log-likelihood with the number of parameters.
"""
np.set_printoptions(precision=16)
bic_reference = [[851.0904458614862, 842.6620993460326],
[854.747970752074, 846.2220267762417]]
_, y = get_data()
for p in range(1, 3):
order = (p, 1, 1)
mu0, ar0, ma0 = arima.estimate_x0(order, y)
batched_model = arima.fit(y, order,
mu0,
ar0,
ma0,
opt_disp=-1, h=1e-9)
np.testing.assert_allclose(batched_model.bic,
bic_reference[p-1], rtol=1e-4)
def test_fit():
"""Test the `fit()` function against reference parameters."""
_, y = get_data()
mu_ref = [np.array([-217.7230173548441, -206.81064091237104]),
np.array([-217.72325384510506, -206.77224439903458])]
ar_ref = [
np.array([[0.0309380078339684, -0.0371740508810001]], order='F'),
np.array([[0.0309027562133337, -0.0386322768036704],
[-0.0191533926207157, -0.0330133336831984]], order='F')]
ma_ref = [
np.array([[-0.9995474311219695, -0.9995645146854383]], order='F'),
np.array([[-0.999629811305126, -0.9997747315789454]], order='F')]
ll_ref = [[-414.7628631782474, -410.049081775547],
[-414.7559799310751, -410.0285309839064]]
for p in range(1, 3):
order = (p, 1, 1)
mu0, ar0, ma0 = arima.estimate_x0(order, y)
batched_model = arima.fit(y, order,
mu0,
ar0,
ma0,
opt_disp=-1, h=1e-9)
print("num iterations: ", batched_model.niter)
x = arima.pack(p, 1, 1, 2, batched_model.mu,
batched_model.ar_params, batched_model.ma_params)
llx = arima.ll_f(2, len(t), (p, 1, 1), y, x, trans=False)
rtol = 1e-2
# parameter differences are more difficult to test precisely due to the
# nonlinear-optimization.
np.testing.assert_allclose(batched_model.mu, mu_ref[p-1], rtol=rtol)
np.testing.assert_allclose(batched_model.ar_params, ar_ref[p-1],
rtol=rtol)
np.testing.assert_allclose(batched_model.ma_params, ma_ref[p-1],
rtol=rtol)
# more important is that the loglikelihood is close to a relatively
# higher tolerance.
np.testing.assert_allclose(llx, ll_ref[p-1], rtol=1e-6)
def test_predict(plot=False):
"""Test the `predict_in_sample()` function using provided parameters"""
_, y = get_data()
mu = [np.array([-217.7230173548441, -206.81064091237104]),
np.array([-217.72325384510506, -206.77224439903458])]
ar = [[np.array([0.0309380078339684]), np.array([-0.0371740508810001])],
[np.array([0.0309027562133337, -0.0191533926207157]),
np.array([-0.0386322768036704, -0.0330133336831984])]]
ma = [[np.array([-0.9995474311219695]), np.array([-0.9995645146854383])],
[np.array([-0.999629811305126]), np.array([-0.9997747315789454])]]
l2err_ref = [[7.611525998416604e+08, 7.008862739645946e+08],
[7.663156224285843e+08, 6.993847054122686e+08]]
for p in range(1, 3):
order = (p, 1, 1)
model = arima.ARIMAModel(order, mu[p-1], ar[p-1], ma[p-1], y)
d_y_b_p = model.predict_in_sample()
y_b_p = input_to_host_array(d_y_b_p).array
if plot:
import matplotlib.pyplot as plt
nb_plot = 2
fig, axes = plt.subplots(nb_plot, 1)
axes[0].plot(t, y[:, 0], t, y_b_p[:, 0], "r-")
axes[1].plot(t, y[:, 1], t, y_b_p[:, 1], "r-")
if p == 1:
axes[0].plot(t, yp_ref[p-1][0], "g--")
axes[1].plot(t, yp_ref[p-1][1], "g--")
plt.show()
l2_error_predict = np.sum((y_b_p - y)**2, axis=0)
np.testing.assert_allclose(l2err_ref[p-1], l2_error_predict)
if p == 1:
np.testing.assert_allclose(y_b_p[:, 0], yp_ref[0])
np.testing.assert_allclose(y_b_p[:, 1], yp_ref[1])
def test_forecast():
"""Test forecast using provided parameters"""
_, y = get_data()
mu = [np.array([-217.7230173548441, -206.81064091237104]),
np.array([-217.72325384510506, -206.77224439903458])]
ar = [[np.array([0.0309380078339684]), np.array([-0.0371740508810001])],
[np.array([0.0309027562133337, -0.0191533926207157]),
np.array([-0.0386322768036704, -0.0330133336831984])]]
ma = [[np.array([-0.9995474311219695]), np.array([-0.9995645146854383])],
[np.array([-0.999629811305126]), np.array([-0.9997747315789454])]]
y_fc_ref = [np.array([[8291.97380664, 7993.55508519, 7773.33550351],
[7648.10631132, 7574.38185979, 7362.6238661]]),
np.array([[7609.91057747, 7800.22971962, 7473.00968599],
[8016.79544837, 7472.39902223, 7400.83781943]])]
for p in range(1, 3):
order = (p, 1, 1)
model = arima.ARIMAModel(order, mu[p-1], ar[p-1], ma[p-1], y)
d_y_b_fc = model.forecast(3)
y_b_fc = input_to_host_array(d_y_b_fc).array
np.testing.assert_allclose(y_fc_ref[p-1], y_b_fc.T)
def test_fit_predict_forecast(plot=False):
"""Full integration test: Tests fit followed by in-sample prediction and
out-of-sample forecast
"""
np.set_printoptions(precision=16)
_, y = get_data()
ns_train = 35
ns_test = len(t) - ns_train
y_b_p = []
y_f_p = []
for p in range(1, 3):
order = (p, 1, 1)
nb = 2
y_train = np.zeros((ns_train, nb))
for i in range(nb):
y_train[:, i] = y[:ns_train, i]
p, _, _ = order
mu0, ar0, ma0 = arima.estimate_x0(order, y_train)
batched_model = arima.fit(y_train, order,
mu0,
ar0,
ma0,
opt_disp=-1, h=1e-9)
d_y_b = batched_model.predict_in_sample()
y_b = input_to_host_array(d_y_b).array
d_y_fc = batched_model.forecast(ns_test)
y_fc = input_to_host_array(d_y_fc).array
y_b_p.append(y_b)
y_f_p.append(y_fc)
if plot:
import matplotlib.pyplot as plt
nb_plot = 2
_, axes = plt.subplots(nb_plot, 1)
axes[0].plot(t, y[:, 0], t[:ns_train], y_b_p[0][:, 0], "r-",
t[ns_train-1:-1], y_f_p[0][:, 0], "--")
axes[0].plot(t[:ns_train], y_b_p[1][:, 0], "g-",
t[ns_train-1:-1], y_f_p[1][:, 0], "y--")
axes[0].plot(t, yp_ref[0], "b--")
axes[1].plot(t, y[:, 1], t[:ns_train], y_b_p[0][:, 1], "r-",
t[ns_train-1:-1], y_f_p[0][:, 1], "--")
axes[1].plot(t[:ns_train], y_b_p[1][:, 1], "g-",
t[ns_train-1:-1], y_f_p[1][:, 1], "y--")
axes[1].plot(t, yp_ref[1], "b--")
plt.show()
l2_error_predict0 = np.sum((y_b_p[0][:, :] - y[:ns_train, :])**2, axis=0)
l2_error_predict1 = np.sum((y_b_p[1][:, :] - y[:ns_train, :])**2, axis=0)
l2_error_ref0 = [5.1819845778009456e+08, 4.4313075823450834e+08]
l2_error_ref1 = [5.4015810529295897e+08, 4.6489505018349826e+08]
l2_error_forecast0 = np.sum((y_f_p[0][:, :] - y[ns_train-1:-1, :])**2,
axis=0)
l2_error_forecast1 = np.sum((y_f_p[1][:, :] - y[ns_train-1:-1, :])**2,
axis=0)
l2_error_fc_ref0 = [2.7841860168252653e+08, 2.4003239604745972e+08]
l2_error_fc_ref1 = [3.728470033076098e+08, 3.039953059636233e+08]
rtol = 5e-5
np.testing.assert_allclose(l2_error_predict0, l2_error_ref0, rtol=rtol)
np.testing.assert_allclose(l2_error_predict1, l2_error_ref1, rtol=rtol)
rtol = 1e-3
np.testing.assert_allclose(l2_error_forecast0, l2_error_fc_ref0, rtol=rtol)
np.testing.assert_allclose(l2_error_forecast1, l2_error_fc_ref1, rtol=rtol)
def test_grid_search(num_batches=2):
"""Tests grid search using random data over the default range of p,q
parameters"""
ns = len(t)
y_b = np.zeros((ns, num_batches))
for i in range(num_batches):
y_b[:, i] = np.random.normal(size=ns, scale=2000) + data_smooth
best_order, best_mu, best_ar, best_ma, best_ic = arima.grid_search(y_b,
d=1)
if num_batches == 2:
np.testing.assert_array_equal(best_order, [(0, 1, 1), (0, 1, 1)])
def demo():
"""Demo example from the documentation"""
import matplotlib.pyplot as plt
num_samples = 200
xs = np.linspace(0, 1, num_samples)
np.random.seed(12)
noise = np.random.normal(scale=0.05, size=num_samples)
noise2 = np.random.normal(scale=0.05, size=num_samples)
ys1 = noise + 0.5*xs + 0.1* | np.sin(xs/np.pi) | numpy.sin |
import numpy as np
import scipy.sparse as sp
from copy import deepcopy
from angler.linalg import construct_A, solver_direct, grid_average
from angler.derivatives import unpack_derivs
from angler.nonlinear_solvers import born_solve, newton_solve, newton_krylov_solve
from angler.source.mode import mode
from angler.nonlinearity import Nonlinearity
from angler.constants import (DEFAULT_LENGTH_SCALE, DEFAULT_MATRIX_FORMAT,
DEFAULT_SOLVER, EPSILON_0, MU_0)
from angler.filter import eps2rho
from angler.plot import plt_base_eps, plt_base
class Simulation:
def __init__(self, omega, eps_r, dl, NPML, pol, L0=DEFAULT_LENGTH_SCALE, use_dirichlet_bcs=False):
# initializes Fdfd object
self.L0 = L0
self.omega = omega
self.NPML = NPML
self.pol = pol
self.dl = dl
self.use_dirichlet_bcs = use_dirichlet_bcs
self._check_inputs()
grid_shape = eps_r.shape
if len(grid_shape) == 1:
self.flatten = True
grid_shape = (grid_shape[0], 1)
eps_r = np.reshape(eps_r, grid_shape)
else:
self.flatten = False
(Nx, Ny) = grid_shape
self.Nx = Nx
self.Ny = Ny
self.mu_r = np.ones((self.Nx, self.Ny))
self.src = np.zeros((self.Nx, self.Ny), dtype=np.complex64)
self.xrange = [0, float(self.Nx*self.dl)]
self.yrange = [0, float(self.Ny*self.dl)]
self.NPML = [int(n) for n in self.NPML]
self.omega = float(self.omega)
self.dl = float(dl)
# construct the system matrix
self.eps_r = eps_r
self.modes = []
self.nonlinearity = []
self.eps_nl = np.zeros(eps_r.shape)
self.dnl_de = np.zeros(eps_r.shape)
self.dnl_deps = np.zeros(eps_r.shape)
def setup_modes(self):
# calculates
for modei in self.modes:
modei.setup_src(self)
def add_mode(self, neff, direction_normal, center, width,
scale=1, order=1):
# adds a mode definition to the simulation
new_mode = mode(neff, direction_normal, center, width,
scale=scale, order=order)
self.modes.append(new_mode)
def compute_nl(self, e, matrix_format=DEFAULT_MATRIX_FORMAT):
# evaluates the nonlinear functions for a field e
self.eps_nl = np.zeros(self.eps_r.shape)
self.dnl_de = np.zeros(self.eps_r.shape)
self.dnl_deps = np.zeros(self.eps_r.shape)
for nli in self.nonlinearity:
self.eps_nl = self.eps_nl + nli.eps_nl(e, self.eps_r)
self.dnl_de = self.dnl_de + nli.dnl_de(e, self.eps_r)
self.dnl_deps = self.dnl_deps + nli.dnl_deps(e, self.eps_r)
Nbig = self.Nx*self.Ny
Anl = sp.spdiags(self.omega**2*EPSILON_0*self.L0*self.eps_nl.reshape((-1,)), 0, Nbig, Nbig, format=matrix_format)
self.Anl = Anl
def add_nl(self, chi, nl_region, nl_type='kerr', eps_scale=False, eps_max=None):
# adds a nonlinearity to the simulation
new_nl = Nonlinearity(chi/np.square(self.L0), nl_region, nl_type, eps_scale, eps_max)
self.nonlinearity.append(new_nl)
@property
def eps_r(self):
return self.__eps_r
@eps_r.setter
def eps_r(self, new_eps):
grid_shape = new_eps.shape
if len(grid_shape) == 1:
grid_shape = (grid_shape[0], 1)
new_eps.reshape(grid_shape)
(Nx, Ny) = grid_shape
self.Nx = Nx
self.Ny = Ny
self.__eps_r = new_eps
(A, derivs) = construct_A(self.omega, self.xrange, self.yrange,
self.eps_r, self.NPML, self.pol, self.L0,
matrix_format=DEFAULT_MATRIX_FORMAT,
use_dirichlet_bcs=self.use_dirichlet_bcs,
timing=False)
self.A = A
self.derivs = derivs
self.fields = {f: None for f in ['Ex', 'Ey', 'Ez', 'Hx', 'Hy', 'Hz']}
self.fields_nl = {f: None for f in ['Ex', 'Ey', 'Ez', 'Hx', 'Hy', 'Hz']}
def solve_fields(self, include_nl=False, timing=False, averaging=False,
matrix_format=DEFAULT_MATRIX_FORMAT):
# performs direct solve for A given source
EPSILON_0_ = EPSILON_0*self.L0
MU_0_ = MU_0*self.L0
if include_nl==False:
eps_tot = self.eps_r
X = solver_direct(self.A, self.src*1j*self.omega, timing=timing)
else:
eps_tot = self.eps_r + self.eps_nl
X = solver_direct(self.A + self.Anl, self.src*1j*self.omega, timing=timing)
(Nx, Ny) = self.src.shape
M = Nx*Ny
(Dyb, Dxb, Dxf, Dyf) = unpack_derivs(self.derivs)
if self.pol == 'Hz':
if averaging:
eps_x = grid_average(EPSILON_0_*(eps_tot), 'x')
vector_eps_x = eps_x.reshape((-1,))
eps_y = grid_average(EPSILON_0_*(eps_tot), 'y')
vector_eps_y = eps_y.reshape((-1,))
else:
vector_eps_x = EPSILON_0_*(eps_tot).reshape((-1,))
vector_eps_y = EPSILON_0_*(eps_tot).reshape((-1,))
T_eps_x_inv = sp.spdiags(1/vector_eps_x, 0, M, M,
format=matrix_format)
T_eps_y_inv = sp.spdiags(1/vector_eps_y, 0, M, M,
format=matrix_format)
ex = 1/1j/self.omega * T_eps_y_inv.dot(Dyb).dot(X)
ey = -1/1j/self.omega * T_eps_x_inv.dot(Dxb).dot(X)
if self.flatten:
Ex = ex.flatten()
Ey = ey.flatten()
Hz = X.flatten()
else:
Ex = ex.reshape((Nx, Ny))
Ey = ey.reshape((Nx, Ny))
Hz = X.reshape((Nx, Ny))
if include_nl==False:
self.fields['Ex'] = Ex
self.fields['Ey'] = Ey
self.fields['Hz'] = Hz
return (Ex, Ey, Hz)
elif self.pol == 'Ez':
hx = -1/1j/self.omega/MU_0_ * Dyb.dot(X)
hy = 1/1j/self.omega/MU_0_ * Dxb.dot(X)
if self.flatten:
Hx = hx.flatten()
Hy = hy.flatten()
Ez = X.flatten()
else:
Hx = hx.reshape((Nx, Ny))
Hy = hy.reshape((Nx, Ny))
Ez = X.reshape((Nx, Ny))
if include_nl==False:
self.fields['Hx'] = Hx
self.fields['Hy'] = Hy
self.fields['Ez'] = Ez
return (Hx, Hy, Ez)
else:
raise ValueError('Invalid polarization: {}'.format(str(self.pol)))
def solve_fields_nl(self,
timing=False, averaging=False,
Estart=None, solver_nl='hybrid', conv_threshold=1e-10,
max_num_iter=50,
matrix_format=DEFAULT_MATRIX_FORMAT):
# solves for the nonlinear fields of the simulation.
# note: F just stands for the field component (could be H or E depending on polarization)
if solver_nl == 'born':
(Fx, Fy, Fz, conv_array) = born_solve(self, Estart,
conv_threshold,
max_num_iter,
averaging=averaging)
elif solver_nl == 'newton':
(Fx, Fy, Fz, conv_array) = newton_solve(self, Estart,
conv_threshold,
max_num_iter,
averaging=averaging)
elif solver_nl == 'LM':
(Fx, Fy, Fz, conv_array) = LM_solve(self, Estart,
conv_threshold,
max_num_iter,
averaging=averaging)
elif solver_nl == 'krylov':
(Fx, Fy, Fz, conv_array) = newton_krylov_solve(self, Estart,
conv_threshold,
max_num_iter,
averaging=averaging)
elif solver_nl == 'hybrid':
(Fx, Fy, Fz, conv_array) = born_solve(self, Estart,
conv_threshold, 20,
averaging=averaging)
if conv_array[-1] > 1e-10:
(Fx, Fy, Fz, conv_array) = newton_solve(self, Fz,
conv_threshold, 20,
averaging=averaging)
else:
raise AssertionError("solver must be one of "
"{'born', 'newton', 'LM'}")
# return final nonlinear fields and an array of the convergences
if self.pol == 'Ez':
self.fields_nl['Hx'] = Fx
self.fields_nl['Hy'] = Fy
self.fields_nl['Ez'] = Fz
elif self.pol == 'Hz':
self.fields_nl['Ex'] = Fx
self.fields_nl['Ey'] = Fy
self.fields_nl['Hz'] = Fz
else:
raise ValueError('Invalid polarization: {}'.format(str(self.pol)))
return (Fx, Fy, Fz, conv_array)
def _check_inputs(self):
# checks the inputs and makes sure they are kosher
assert self.L0 > 0, "L0 must be a positive number, was supplied {},".format(str(self.L0))
assert self.omega > 0, "omega must be a positive number, was supplied {},".format(str(self.omega))
assert isinstance(self.NPML, list), "NPML must be a list of integers"
assert len(self.NPML) == 2, "yrange must be a list of length 2, was supplied {}, which is of length {}".format(str(self.NPML), len(self.NPML))
assert self.NPML[0] >= 0 and self.NPML[1] >= 0, "both elements of NPML must be >= 0"
assert self.pol in ['Ez', 'Hz'], "pol must be one of 'Ez' or 'Hz'"
# to do, check for correct types as well.
def flux_probe(self, direction_normal, center, width, nl=False):
# computes the total flux across the plane (line in 2D) defined by direction_normal, center, width
# first extract the slice of the permittivity
if direction_normal == "x":
inds_x = [center[0], center[0]+1]
inds_y = [int(center[1]-width/2), int(center[1]+width/2)]
elif direction_normal == "y":
inds_x = [int(center[0]-width/2), int(center[0]+width/2)]
inds_y = [center[1], center[1]+1]
else:
raise ValueError("The value of direction_normal is neither x nor y!")
if self.pol == 'Ez':
if nl:
field_val_Ez = self.fields_nl['Ez']
field_val_Hy = self.fields_nl['Hy']
field_val_Hx = self.fields_nl['Hx']
else:
field_val_Ez = self.fields['Ez']
field_val_Hy = self.fields['Hy']
field_val_Hx = self.fields['Hx']
Ez_x = grid_average(field_val_Ez[inds_x[0]:inds_x[1]+1, inds_y[0]:inds_y[1]+1], 'x')[:-1,:-1]
Ez_y = grid_average(field_val_Ez[inds_x[0]:inds_x[1]+1, inds_y[0]:inds_y[1]+1], 'y')[:-1,:-1]
# NOTE: Last part drops the extra rows/cols used for grid_average
if direction_normal == "x":
Sx = -1/2*np.real(Ez_x*np.conj(field_val_Hy[inds_x[0]:inds_x[1], inds_y[0]:inds_y[1]]))
return self.dl*np.sum(Sx)
elif direction_normal == "y":
Sy = 1/2*np.real(Ez_y*np.conj(field_val_Hx[inds_x[0]:inds_x[1], inds_y[0]:inds_y[1]]))
return self.dl*np.sum(Sy)
elif self.pol == 'Hz':
if nl:
field_val_Hz = self.fields_nl['Hz']
field_val_Ey = self.fields_nl['Ey']
field_val_Ex = self.fields_nl['Ex']
else:
field_val_Hz = self.fields['Hz']
field_val_Ey = self.fields['Ey']
field_val_Ex = self.fields['Ex']
Hz_x = grid_average(field_val_Hz[inds_x[0]:inds_x[1]+1, inds_y[0]:inds_y[1]+1], 'x')[:-1, :-1]
Hz_y = grid_average(field_val_Hz[inds_x[0]:inds_x[1]+1, inds_y[0]:inds_y[1]+1], 'y')[:-1, :-1]
# NOTE: Last part drops the extra rows/cols used for grid_average
if direction_normal == "x":
Sx = 1/2*np.real(field_val_Ey[inds_x[0]:inds_x[1], inds_y[0]:inds_y[1]]*np.conj(Hz_x))
return self.dl*np.sum(Sx)
elif direction_normal == "y":
Sy = -1/2*np.real(field_val_Ex[inds_x[0]:inds_x[1], inds_y[0]:inds_y[1]]*np.conj(Hz_y))
return self.dl*np.sum(Sy)
def init_design_region(self, design_region, eps_m, style=''):
""" Initializes the design_region permittivity depending on style"""
if style == 'full':
# eps_m filled in design region
eps_full = self.eps_r.copy()
eps_full[design_region == 1] = eps_m
self.eps_r = eps_full
elif style == 'halfway':
# halfway between 1 and eps_m in design region
eps_halfway = self.eps_r
eps_halfway[design_region == 1] = eps_m/2 + 1/2
self.eps_r = eps_halfway
elif style == 'empty':
# nothing in design region
eps_empty = self.eps_r.copy()
eps_empty[design_region == 1] = 1
self.eps_r = eps_empty
elif style == 'random':
# random pixels in design region
eps_random = (eps_m-1)*np.random.random(self.eps_r.shape)+1
eps_random[design_region == 0] = self.eps_r[design_region == 0]
self.eps_r = eps_random
elif style == 'random_sym':
# random pixels in design region. Symmetric across y=0
R = np.random.random(self.eps_r.shape)
R_sym = 1/2*(np.fliplr(R) + R)
eps_random = (eps_m-1)*R_sym+1
eps_random[design_region == 0] = self.eps_r[design_region == 0]
self.eps_r = eps_random
self.rho = eps2rho(self.eps_r, eps_m)
def compute_index_shift(self):
""" Computes array of nonlinear refractive index shift"""
_ = self.solve_fields()
_ = self.solve_fields_nl()
index_nl = np.sqrt(np.real(self.eps_r + self.eps_nl))
index_lin = np.sqrt(np.real(self.eps_r))
return np.abs(index_nl - index_lin)
def plt_abs(self, nl=False, cbar=True, outline=True, ax=None, vmin=None, vmax=None, logscale=False, tiled_y=1):
# plot np.absolute value of primary field (e.g. Ez/Hz)
if self.fields[self.pol] is None:
raise ValueError("need to solve the simulation first")
eps_r = deepcopy(self.eps_r)
eps_r = np.hstack(tiled_y*[eps_r])
if nl:
field_val = np.abs(self.fields_nl[self.pol])
else:
field_val = np.abs(self.fields[self.pol])
field_val = | np.hstack(tiled_y*[field_val]) | numpy.hstack |
"""run_experiment.
Usage:
run_experiment_slac.py run [--env=<kn>] [--steps=<kn>] [--seed=<kn>] [--render]
run_experiment_slac.py (-h | --help)
Options:
-h --help Show this screen.
--env=<kn> Environment (see readme.txt) [default: PendulumV].
--steps=<kn> How many steps to run [default: 50000].
--seed=<kn> Random seed [default: 0].
"""
from docopt import docopt
import numpy as np
import gym
import torch
from torch.nn.utils.rnn import pad_sequence
import torch.nn as nn
from torch.autograd import Variable
import time, os, argparse, warnings
import scipy.io as sio
from copy import deepcopy
from slac import SLAC
arguments = docopt(__doc__, version="1.0")
def test_performance(agent_test, env_test, action_filter, times=5):
EpiTestRet = 0
for _ in range(times):
# reset each episode
sp_seq = np.zeros([seq_len, env.observation_space.shape[0] + 1])
s = env_test.reset()
sp_seq[-1, :-1] = s
sp_seq[-1, -1] = 0.0 # reward padding
a = agent.select(sp_seq)
for _ in range(max_steps):
if np.any(np.isnan(a)):
raise ValueError
sp, r, done, _ = env_test.step(action_filter(a))
sp_seq[:-1] = deepcopy(sp_seq[1:])
sp_seq[-1, :-1] = deepcopy(sp)
sp_seq[-1, -1] = r
a = agent_test.select(
sp_seq, action_return="normal"
) # use tanh(mu_a) for evaluating performance
EpiTestRet += r
if done:
break
EpiTestRet_mean = 0
for _ in range(times):
# reset each episode
sp_seq = np.zeros([seq_len, env.observation_space.shape[0] + 1])
s = env_test.reset()
sp_seq[-1, :-1] = s
sp_seq[-1, -1] = 0.0 # reward padding
a = agent.select(sp_seq)
for _ in range(max_steps):
if np.any(np.isnan(a)):
raise ValueError
sp, r, done, _ = env_test.step(action_filter(a))
sp_seq[:-1] = deepcopy(sp_seq[1:])
sp_seq[-1, :-1] = deepcopy(sp)
sp_seq[-1, -1] = r
a = agent_test.select(
sp_seq, action_return="mean"
) # use tanh(mu_a) for evaluating performance
EpiTestRet_mean += r
if done:
break
return EpiTestRet / times, EpiTestRet_mean / times
savepath = "./data/"
if os.path.exists(savepath):
warnings.warn("{} exists (possibly so do data).".format(savepath))
else:
os.makedirs(savepath)
seed = int(arguments["--seed"]) # random seed
np.random.seed(seed)
torch.manual_seed(seed)
# Shared
computation_mode = "implicit"
beta_h = "auto_1.0"
optimizer = "adam"
batch_size = 32
seq_len = 8
reward_scale = 1.0
action_feedback = True
grad_clip = False
gamma = 0.99
equal_pad = True
pre_train = True
nc = False
model_act_fn = nn.Tanh
sigx = "auto"
max_all_steps = int(arguments["--steps"]) # total steps to learn
step_perf_eval = 2000 # how many steps to do evaluation
env_name = arguments["--env"]
step_start_rl = 1000
step_start_st = 1000
train_step_rl = 1
train_freq_rl = 1.0 / train_step_rl
train_step_st = 1
train_freq_st = 1.0 / train_step_st
if arguments["--render"]:
rendering = True
else:
rendering = False
if env_name == "Sequential":
from task import TaskT
env = TaskT(3)
env_test = TaskT(3)
action_filter = lambda a: a.reshape([-1])
max_steps = 128
est_min_steps = 10
elif env_name == "CartPole":
from task import ContinuousCartPoleEnv
env = ContinuousCartPoleEnv()
env_test = ContinuousCartPoleEnv()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "CartPoleP":
from task import CartPoleP
env = CartPoleP()
env_test = CartPoleP()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 10
elif env_name == "CartPoleV":
from task import CartPoleV
env = CartPoleV()
env_test = CartPoleV()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 10
elif env_name == "Pendulum":
import gym
env = gym.make("Pendulum-v0")
env_test = gym.make("Pendulum-v0")
action_filter = (
lambda a: a.reshape([-1]) * 2
) # because range of pendulum's action is [-2, 2]. For other environments, * 2 is not needed
max_steps = 200
est_min_steps = 199
elif env_name == "PendulumP":
from task import PendulumP
env = PendulumP()
env_test = PendulumP()
action_filter = lambda a: a.reshape([-1]) * 2
max_steps = 200
est_min_steps = 199
elif env_name == "PendulumV":
from task import PendulumV
env = PendulumV()
env_test = PendulumV()
action_filter = lambda a: a.reshape([-1]) * 2
max_steps = 200
est_min_steps = 199
elif env_name == "Hopper":
import gym
import roboschool
env = gym.make("RoboschoolHopper-v1")
env_test = gym.make("RoboschoolHopper-v1")
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "HopperP":
from task import RsHopperP
env = RsHopperP()
env_test = RsHopperP()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "HopperV":
from task import RsHopperV
env = RsHopperV()
env_test = RsHopperV()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "Walker2d":
import gym
import roboschool
env = gym.make("RoboschoolWalker2d-v1")
env_test = gym.make("RoboschoolWalker2d-v1")
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "Walker2dV":
from task import RsWalker2dV
env = RsWalker2dV()
env_test = RsWalker2dV()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "Walker2dP":
from task import RsWalker2dP
env = RsWalker2dP()
env_test = RsWalker2dP()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 5
elif env_name == "Ant":
import gym
import roboschool
env = gym.make("RoboschoolAnt-v1")
env_test = gym.make("RoboschoolAnt-v1")
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 20
elif env_name == "AntV":
from task import RsAntV
env = RsAntV()
env_test = RsAntV()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 20
elif env_name == "AntP":
from task import RsAntP
env = RsAntP()
env_test = RsAntP()
action_filter = lambda a: a.reshape([-1])
max_steps = 1000
est_min_steps = 20
# ----------------initialize-------------
max_episodes = int(max_all_steps / est_min_steps) + 1 # for replay buffer
agent = SLAC(
input_size=env.observation_space.shape[0] + 1,
action_size=env.action_space.shape[0],
seq_len=seq_len,
beta_h=beta_h,
model_act_fn=model_act_fn,
sigx=sigx,
)
agent_test = SLAC(
input_size=env.observation_space.shape[0] + 1,
action_size=env.action_space.shape[0],
seq_len=seq_len,
beta_h=beta_h,
model_act_fn=model_act_fn,
sigx=sigx,
)
S_real = np.zeros(
[max_episodes, max_steps + 1, env.observation_space.shape[0]], dtype=np.float32
)
A_real = np.zeros(
[max_episodes, max_steps, env.action_space.shape[0]], dtype=np.float32
)
R_real = np.zeros([max_episodes, max_steps], dtype=np.float32)
D_real = np.zeros([max_episodes, max_steps], dtype=np.float32) # done
V_real = np.zeros(
[max_episodes, max_steps], dtype=np.float32
) # whether a step is valid value: 1 (compute gradient at this step) or 0 (stop gradient at this step)
performance_wrt_step = []
performance_mean_action_wrt_step = []
global_steps = []
e_real = 0
global_step = 0
t_just = 0
while global_step < max_all_steps:
sp_seq = np.zeros([seq_len, env.observation_space.shape[0] + 1])
s = env.reset()
S_real[e_real, 0] = s.reshape([-1])
sp_seq[-1, :-1] = s
sp_seq[-1, -1] = 0.0
if equal_pad:
for tau in range(seq_len - 1):
sp_seq[tau, :-1] = s
a = agent.select(sp_seq)
for t in range(max_steps):
if global_step == max_all_steps:
break
sp, r, done, _ = env.step(action_filter(a))
sp_seq[:-1] = deepcopy(sp_seq[1:])
sp_seq[-1, :-1] = deepcopy(sp)
sp_seq[-1, -1] = r
A_real[e_real, t] = a
S_real[e_real, t + 1] = sp.reshape([-1])
R_real[e_real, t] = r
D_real[e_real, t] = 1 if done else 0
V_real[e_real, t] = 1
a = agent.select(sp_seq)
global_step += 1
s = deepcopy(sp)
if pre_train and global_step == step_start_st + 1:
for _ in range(5000):
weights = np.sum(V_real[:e_real], axis=-1) + 2 * seq_len - 2
sample_es = np.random.choice(
e_real, batch_size, p=weights / weights.sum()
)
SP = S_real[sample_es, 1:].reshape(
[batch_size, -1, env.observation_space.shape[0]]
)
A = A_real[sample_es].reshape(
[batch_size, -1, env.action_space.shape[0]]
)
R = R_real[sample_es].reshape([batch_size, -1, 1])
V = V_real[sample_es].reshape([batch_size, -1, 1])
agent.train_st(
x_obs=np.concatenate((SP, R), axis=-1), a_obs=A, r_obs=R, validity=V
)
if global_step > step_start_st and | np.random.rand() | numpy.random.rand |
import json
import funcy
from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np
from collections import defaultdict, Counter
from utils.dataset_converter import concatenate_datasets
from iterstrat.ml_stratifiers import MultilabelStratifiedShuffleSplit
import argparse
from utils.dataset_converter import convert_to_binary
# filter_annotations and save_coco on akarazniewicz/cocosplit
def filter_annotations(annotations, images):
image_ids = funcy.lmap(lambda im: int(im['id']), images)
return funcy.lfilter(lambda ann:
int(ann['image_id']) in image_ids, annotations)
def save_coco(dest, info, licenses,
images, annotations, categories):
data_dict = {'info': info,
'licenses': licenses,
'images': images,
'annotations': annotations,
'categories': categories}
with open(dest, 'w') as f:
json.dump(data_dict,
f, indent=2, sort_keys=True)
return data_dict
def PseudoStratifiedShuffleSplit(images,
annotations,
test_size):
# count categories per image
categories_per_image = defaultdict(Counter)
for ann in annotations:
categories_per_image[ann['image_id']][ann['category_id']] += 1
# find category with most annotations per image
max_category = []
for cat in categories_per_image.values():
cat = cat.most_common(1)[0][0]
max_category.append(cat)
# pseudo-stratified-split
strat_split = StratifiedShuffleSplit(n_splits=1,
test_size=test_size,
random_state=2020)
for train_index, test_index in strat_split.split(images,
| np.array(max_category) | numpy.array |
from copy import deepcopy
import numpy as np, matplotlib.pyplot as plt
from sklearn import cluster
from PIL import Image,ImageOps
def resize_and_crop(img, size=(100,100), crop_type='middle'):
# If height is higher we resize vertically, if not we resize horizontally
# Get current and desired ratio for the images
img_ratio = img.size[0] / float(img.size[1])
ratio = size[0] / float(size[1])
# The image is scaled/cropped vertically or horizontally
# depending on the ratio
if ratio > img_ratio:
img = img.resize((
size[0],
int(round(size[0] * img.size[1] / img.size[0]))),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, img.size[0], size[1])
elif crop_type == 'middle':
box = (
0,
int(round((img.size[1] - size[1]) / 2)),
img.size[0],
int(round((img.size[1] + size[1]) / 2)))
elif crop_type == 'bottom':
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
elif ratio < img_ratio:
img = img.resize((
int(round(size[1] * img.size[0] / img.size[1])),
size[1]),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, size[0], img.size[1])
elif crop_type == 'middle':
box = (
int(round((img.size[0] - size[0]) / 2)),
0,
int(round((img.size[0] + size[0]) / 2)),
img.size[1])
elif crop_type == 'bottom':
box = (
img.size[0] - size[0],
0,
img.size[0],
img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
else:
img = img.resize((
size[0],
size[1]),
Image.ANTIALIAS)
# If the scale is the same, we do not need to crop
return img
class Dataset(object):
# Returns data
def get_data(self):
return self.data
# Returns the background data
def get_bg(self):
return self.bg
# Returns the active data
def get_active(self):
return self.active
# Returns the active labels
def get_active_labels(self):
return self.active_labels
# Returns the pca directions for the active set
def get_pca_directions(self):
return self.pca_directions
# Returns the active set projected in the pca directions
def get_active_pca_projected(self):
return self.active_pca
def get_affinity_matrix(self):
return self.affinity_matrix
# A helper method to standardize arrays
def standardize(self, array):
standardized_array = (array-np.mean(array,axis=0)) / np.std(array,axis=0)
return np.nan_to_num(standardized_array)
"""
Initialization performs the following operations
1) Centering the active, background seperately
2) Standardize if to_standardize is specified as True
3) Calculate the covariance_matrices
"""
def __init__(self, to_standardize=True):
#from contrastive import PCA
# Housekeeping
self.pca_directions = None
self.bg_eig_vals = None
self.affinity_matrix = None
# Dataset sizes
self.n_active, _ = self.active.shape
self.n_bg, self.features_d = self.bg.shape
#Center the background data
self.bg = self.bg - np.mean(self.bg, axis=0)
if to_standardize: #Standardize if specified
self.bg = self.standardize(self.bg)
#Calculate the covariance matrix
self.bg_cov = self.bg.T.dot(self.bg)/(self.bg.shape[0]-1)
#Center the active data
self.active = self.active - np.mean(self.active, axis=0)
if to_standardize: #Standardize if specified
self.active = self.standardize(self.active)
#Calculate the covariance matrix
self.active_cov = self.active.T.dot(self.active)/(self.n_active-1)
#self.cpca = PCA()
#self.cpca.fit_transform(foreground=self.active, background=self.bg)
"""
Perfomes plain vanilla pca on the active dataset (TO DO: write the same code for background )
Not a part of init because this might be time consuming
"""
def pca_active(self, n_components = 2):
# Perform PCA only once (to save computation time)
if self.pca_directions is None:
#print("PCA is being perfomed on the dataset")
# Calculating the top eigen vectors on the covariance of the active dataset
w, v = LA.eig(self.active_cov)
# Sorting the vectors in the order of eigen values
idx = w.argsort()[::-1]
idx = idx[:n_components]
w = w[idx]
# Storing the top_pca_directions
self.pca_directions = v[:,idx]
# Storing the active dataset projected on the top_pca_directions
self.active_pca = self.active.dot(self.pca_directions)
else:
print("PCA has been previously perfomed on the dataset")
"""
Returns active and bg dataset projected in the cpca direction, as well as the top c_cpca eigenvalues indices.
If specified, it returns the top_cpca directions
"""
def cpca_alpha(self, n_components = 2, alpha=1, return_eigenvectors=False):
#return None, self.cpca.cpca_alpha(dataset=self.active,alpha=alpha), None
sigma = self.active_cov - alpha*self.bg_cov
w, v = LA.eig(sigma)
eig_idx = np.argpartition(w, -n_components)[-n_components:]
eig_idx = eig_idx[np.argsort(-w[eig_idx])]
v_top = v[:,eig_idx]
reduced_foreground = self.active.dot(v_top)
reduced_background = self.bg.dot(v_top)
reduced_foreground[:,0] = reduced_foreground[:,0]*np.sign(reduced_foreground[0,0])
reduced_foreground[:,1] = reduced_foreground[:,1]*np.sign(reduced_foreground[0,1])
if (return_eigenvectors):
#return eig_idx, reduced_foreground, reduced_background, v_top
print("WHat?")
return None
else:
#return eig_idx, reduced_foreground, reduced_background
return None, reduced_foreground, None
"""
This function performs contrastive PCA using the alpha technique on the
active and background dataset. It automatically determines n_alphas=4 important values
of alpha up to based to the power of 10^(max_log_alpha=5) on spectral clustering
of the top subspaces identified by cPCA.
The final return value is the data projected into the top (n_components = 2)
subspaces, which can be plotted outside of this function
"""
def automated_cpca(self, n_alphas=4, max_log_alpha=5, n_components = 2, affinity_metric='determinant', exemplar_method='medoid'):
best_alphas, all_alphas, angles0, labels = self.find_spectral_alphas(n_components, n_alphas-1, max_log_alpha, affinity_metric, exemplar_method)
best_alphas = np.concatenate(([0], best_alphas)) #one of the alphas is always alpha=0
data_to_plot = []
for alpha in best_alphas:
_, r_active, r_bg = self.cpca_alpha(n_components=n_components, alpha=alpha)
data_to_plot.append((r_active, r_bg))
return data_to_plot, best_alphas
"""
This function performs contrastive PCA using the alpha technique on the
active and background dataset. It returns the cPCA-reduced data for all values of alpha specified,
both the active and background, as well as the list of alphas
"""
def manual_cpca(self, max_log_alpha=5, n_components = 2):
alphas = np.concatenate(([0],np.logspace(-1,max_log_alpha,40)))
data_to_plot = []
for alpha in alphas:
n, r_active, r_bg = self.cpca_alpha(n_components, alpha=alpha)
data_to_plot.append((r_active, r_bg))
return data_to_plot, alphas
"""
This method performs spectral clustering on the affinity matrix of subspaces
returned by contrastive pca, and returns (`=3) exemplar values of alpha
"""
def find_spectral_alphas(self, n_components=2, max_num=3, max_log_alpha=5, affinity_metric='determinant', exemplar_method='medoid'):
#if (self.affinity_matrix is None): #commented out because different kinds of affinity can be defined
self.create_affinity_matrix(n_components, max_log_alpha, affinity_metric)
affinity = self.affinity_matrix
spectral = cluster.SpectralClustering(n_clusters=max_num+1, affinity='precomputed')
alphas = np.concatenate(([0],np.logspace(-1,max_log_alpha,40)))
spectral.fit(affinity)
labels = spectral.labels_
best_alphas = list()
for i in range(max_num+1):
idx = np.where(labels==i)[0]
if not(0 in idx): #because we don't want to include the cluster that includes alpha=0
if exemplar_method=='smallest':
exemplar_idx = int( | np.min(idx) | numpy.min |
import warnings
import numpy as np
import tensornetwork as tn
from . import InvalidQuantumStateException
from .tncompute import bipartitepurestate_partialtranspose_densitymatrix, flatten_bipartite_densitymatrix
def schmidt_coefficients(schmidt_modes):
""" Retrieving Schmidt coefficients from Schmidt modes.
:param schmidt_modes: Schmidt modes
:return: Schmidt coefficients
:type schmidt_modes: list
:rtype: numpy.array
"""
return np.array([mode[0] for mode in schmidt_modes])
def entanglement_entropy(schmidt_modes):
""" Calculate the entanglement entropy
Given the calculated Schmidt modes, compute the entanglement entropy
with the formula :math:`H=-\\sum_i p_i \\log p_i`.
:param schmidt_modes: Schmidt modes
:return: the entanglement entropy
:type schmidt_modes: list
:rtype: numpy.float
"""
eigenvalues = np.real(schmidt_coefficients(schmidt_modes))
square_eigenvalues = np.square(np.extract(eigenvalues > 0, eigenvalues))
entropy = np.sum(- square_eigenvalues * np.log(square_eigenvalues))
return entropy
# Renyi's entropy
def renyi_entanglement_entropy(schmidt_modes, alpha):
""" Calculate the Renyi's entanglement entropy
Given the calculated Schmidt modes and an :math:`\\alpha`, compute the
Renyi's entanglement entropy with the formula :math:`H= - \\frac{1}{1-\\alpha} \\log \\sum p_i^{\\alpha}`.
:param schmidt_modes:
:param alpha:
:return:
"""
if alpha == 1:
warnings.warn('alpha = 1, doing Shannon entanglement entropy.')
return entanglement_entropy(schmidt_modes)
eigenvalues = np.real(schmidt_coefficients(schmidt_modes))
square_eigenvalues = np.square(np.extract(eigenvalues > 0, eigenvalues))
renyi_entropy = np.log(np.sum(square_eigenvalues**alpha)) / (1-alpha)
return renyi_entropy
# participation ratio
def participation_ratio(schmidt_modes):
""" Calculate the participation ratio
Given the calculated Schmidt modes, compute the participation ratio
with the formula :math:`K=\\frac{1}{\\sum_i p_i^2}`.
:param schmidt_modes: Schmidt modes
:return: participation ratio
:type schmidt_modes: list
:rtype: numpy.float
"""
eigenvalues = np.real(np.real(schmidt_coefficients(schmidt_modes)))
K = 1. / np.sum(np.square(np.square(eigenvalues)))
return K
# negativity
def negativity(bipartite_tensor):
""" Calculate the negativity
Given a normalized bipartite discrete state, compute the negativity
with the formula :math:`N = \\frac{||\\rho^{\\Gamma_A}||_1-1}{2}`
:param bipartite_tensor: tensor describing the bi-partitite states, with each elements the coefficients for :math:`|ij\\rangle`
:return: negativity
:type bipartite_tensor: numpy.ndarray
:rtype: numpy.float
"""
dim0, dim1 = bipartite_tensor.shape
flatten_fullden_pt = flatten_bipartite_densitymatrix(
bipartitepurestate_partialtranspose_densitymatrix(
bipartite_tensor,
0 if dim0<dim1 else 1
)
)
eigenvalues = np.linalg.eigvals(flatten_fullden_pt)
return 0.5 * (np.sum( | np.abs(eigenvalues) | numpy.abs |
import unittest
import numpy as np
from math import pi
from airfoilprep import Polar, Airfoil, AirfoilAnalysis
class TestBlend(unittest.TestCase):
def setUp(self):
alpha = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11,
6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18,
14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25]
cl = [-0.071, 0.044, 0.144, 0.241, 0.338, 0.435, 0.535, 0.632,
0.728, 0.813, 0.883, 0.946, 1.001, 1.054, 1.056, 1.095,
1.138, 1.114, 1.073, 1.008, 0.95, 0.902, 0.795, 0.797, 0.8]
cd = [0.0122, 0.0106, 0.0114, 0.0134, 0.0136, 0.014, 0.0147,
0.0156, 0.0162, 0.0173, 0.0191, 0.0215, 0.0248, 0.0339,
0.0544, 0.0452, 0.0445, 0.067, 0.0748, 0.1028, 0.1473,
0.2819, 0.2819, 0.2819, 0.3]
cm = [-0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346, -0.0405,
-0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284, -0.0322,
-0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242, -0.1155,
-0.1068, -0.0981, -0.0894, -0.0807]
Re = 1
self.polar1 = Polar(Re, alpha, cl, cd, cm)
alpha = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11,
6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18,
15.189, 16.17, 17.14, 18.06, 19.06, 20.07, 21.08, 22.09,
23.1, 25]
cl = [-0.0852, 0.0528, 0.1728, 0.2892, 0.4056, 0.522, 0.642, 0.7584,
0.8736, 0.9756, 1.0596, 1.1352, 1.2012, 1.2648, 1.2672, 1.314,
1.3656, 1.3368, 1.2876, 1.2096, 1.14, 1.0824, 0.954, 0.9564, 1,
1.2, 1.4, 1.6]
cd = [0.01464, 0.01272, 0.01368, 0.01608, 0.01632, 0.0168, 0.01764,
0.01872, 0.01944, 0.02076, 0.02292, 0.0258, 0.02976, 0.04068,
0.06528, 0.05424, 0.0534, 0.0804, 0.08976, 0.12336, 0.17676,
0.33828, 0.33828, 0.33828, 0.35, 0.4, 0.45, 0.5]
cm = [-0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346,
-0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284,
-0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242,
-0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633]
self.polar2 = Polar(Re, alpha, cl, cd, cm)
def test_blend1(self):
polar3 = self.polar1.blend(self.polar2, 0.5)
alpha_blend = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09,
5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19,
13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07,
25]
cl_blend = [-0.078, 0.048, 0.158, 0.265, 0.372, 0.479, 0.589, 0.695,
0.801, 0.894, 0.971, 1.041, 1.101, 1.159, 1.162, 1.205,
1.252, 1.225, 1.181, 1.109, 1.045, 0.992, 0.875, 0.877,
1.200]
cd_blend = [0.0134, 0.0117, 0.0125, 0.0147, 0.0150, 0.0154, 0.0162,
0.0172, 0.0178, 0.0190, 0.0210, 0.0237, 0.0273, 0.0373,
0.0598, 0.0497, 0.0490, 0.0737, 0.0822, 0.1131, 0.1620,
0.3101, 0.3101, 0.3101, 0.4000]
cm_blend = [-0.00405, -0.00475, -0.00165, -0.0099, -0.0249, -0.0314,
-0.03755, -0.043, -0.0481, -0.04555, -0.03625, -0.0301,
-0.02825, -0.0303, -0.03415, -0.0362, -0.0378, -0.03955,
-0.06905, -0.11125, -0.11985, -0.11115,-0.10245, -0.09375,
-0.072]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend1_w_airfoil(self):
af1 = Airfoil([self.polar1])
af2 = Airfoil([self.polar2])
af3 = af1.blend(af2, 0.5)
polar3 = af3.polars[0] # kind of bad practice for me to be accessing this
alpha_blend = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09,
5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19,
13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07,
25]
cl_blend = [-0.078, 0.048, 0.158, 0.265, 0.372, 0.479, 0.589, 0.695,
0.801, 0.894, 0.971, 1.041, 1.101, 1.159, 1.162, 1.205,
1.252, 1.225, 1.181, 1.109, 1.045, 0.992, 0.875, 0.877,
1.200]
cd_blend = [0.0134, 0.0117, 0.0125, 0.0147, 0.0150, 0.0154, 0.0162,
0.0172, 0.0178, 0.0190, 0.0210, 0.0237, 0.0273, 0.0373,
0.0598, 0.0497, 0.0490, 0.0737, 0.0822, 0.1131, 0.1620,
0.3101, 0.3101, 0.3101, 0.4000]
cm_blend = [-0.00405, -0.00475, -0.00165, -0.0099, -0.0249, -0.0314,
-0.03755, -0.043, -0.0481, -0.04555, -0.03625, -0.0301,
-0.02825, -0.0303, -0.03415, -0.0362, -0.0378, -0.03955,
-0.06905, -0.11125, -0.11985, -0.11115,-0.10245, -0.09375,
-0.072]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend2(self):
polar3 = self.polar1.blend(self.polar2, 0.7)
alpha_blend = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11,
6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18,
14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25]
cl_blend = [-0.081, 0.050, 0.164, 0.275, 0.385, 0.496, 0.610, 0.720,
0.830, 0.927, 1.007, 1.078, 1.141, 1.202, 1.204, 1.248,
1.297, 1.270, 1.224, 1.149, 1.083, 1.028, 0.906, 0.909,
1.360]
cd_blend = [0.0139, 0.0121, 0.0130, 0.0153, 0.0155, 0.0160, 0.0168,
0.0178, 0.0185, 0.0197, 0.0218, 0.0245, 0.0283, 0.0386,
0.0620, 0.0515, 0.0507, 0.0764, 0.0852, 0.1172, 0.1679,
0.3214, 0.3214, 0.3214, 0.4400]
cm_blend = [-0.00391, -0.00461, -0.00303, -0.00522, -0.02358,
-0.03012, -0.03637, -0.042, -0.04706, -0.04761,
-0.03791, -0.0309, -0.02819, -0.02954, -0.03337,
-0.03616, -0.0372, -0.03945, -0.057347, -0.10607,
-0.12159, -0.11289, -0.10419, -0.09549, -0.06852]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend3(self):
polar3 = self.polar1.blend(self.polar2, 0.2)
alpha_blend = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11,
6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18,
14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25]
cl_blend = [-0.074, 0.046, 0.150, 0.251, 0.352, 0.452, 0.556, 0.657,
0.757, 0.846, 0.918, 0.984, 1.041, 1.096, 1.098, 1.139,
1.184, 1.159, 1.116, 1.048, 0.988, 0.938, 0.827, 0.829,
0.960]
cd_blend = [0.0127, 0.0110, 0.0119, 0.0139, 0.0141, 0.0146, 0.0153,
0.0162, 0.0168, 0.0180, 0.0199, 0.0224, 0.0258, 0.0353,
0.0566, 0.0470, 0.0463, 0.0697, 0.0778, 0.1069, 0.1532,
0.2932, 0.2932, 0.2932, 0.3400]
cm_blend = [-0.00426, -0.00496, 0.00042, -0.01692, -0.02688,
-0.03332, -0.03932, -0.0445, -0.04966, -0.04246,
-0.03376, -0.0289, -0.02834, -0.03144, -0.03532,
-0.03626, -0.0387, -0.0397, -0.0866, -0.11902,
-0.11724, -0.10854, -0.09984, -0.09114, -0.07722]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
class Test3DStall(unittest.TestCase):
def setUp(self):
alpha = [-9.000, -8.000, -7.000, -6.000, -5.000, -4.000, -3.000,
-2.000, -1.000, 0.000, 1.000, 2.000, 3.000, 4.000, 5.000,
6.000, 7.000, 8.000, 9.000, 10.000, 11.000, 12.000, 13.000,
14.000, 15.000, 16.000, 17.000, 18.000, 19.000, 20.000,
30.000, 40.000, 50.000]
cl = [-0.802, -0.721, -0.611, -0.506, -0.408, -0.313, -0.220, -0.133,
-0.060, 0.036, 0.227, 0.342, 0.436, 0.556, 0.692, 0.715, 0.761,
0.830, 0.893, 0.954, 1.013, 1.042, 1.061, 1.083, 1.078, 0.882,
0.811, 0.793, 0.793, 0.798, 0.772, 0.757, 0.700]
cd = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742]
cm = [-0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346,
-0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284,
-0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242,
-0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633,
-0.054, -0.045, -0.036, -0.22, -0.13]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_stall1(self):
R = 2.4
r = 0.25*R
chord = 0.18
Omega = 200*pi/30
Uinf = 10.0
tsr = Omega*R/Uinf
newpolar = self.polar.correction3D(r/R, chord/r, tsr,
alpha_max_corr=30,
alpha_linear_min=-4,
alpha_linear_max=4)
cl_3d = [-0.8466, -0.7523, -0.6420, -0.5342, -0.4302, -0.3284,
-0.2276, -0.1303, -0.0404, 0.0618, 0.2191, 0.3321, 0.4336,
0.5501, 0.6755, 0.7363, 0.8101, 0.8973, 0.9810, 1.0640,
1.1450, 1.2098, 1.2682, 1.3281, 1.3731, 1.3088, 1.3159,
1.3534, 1.4010, 1.4515, 1.9140, 1.8857, 1.6451]
cd_3d = [0.0399, 0.0334, 0.0316, 0.0293, 0.0269, 0.0254, 0.0246,
0.0246, 0.0246, 0.0252, 0.0249, 0.0200, 0.0167, 0.0157,
0.0174, 0.0183, 0.0212, 0.0255, 0.0303, 0.0367, 0.0465,
0.0615, 0.0800, 0.1047, 0.1301, 0.1695, 0.2047, 0.2384,
0.2728, 0.3081, 0.8097, 1.2625, 1.6280]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall1_w_airfoil(self):
R = 2.4
r = 0.25*R
chord = 0.18
Omega = 200*pi/30
Uinf = 10.0
tsr = Omega*R/Uinf
af = Airfoil([self.polar])
newaf = af.correction3D(r/R, chord/r, tsr,
alpha_max_corr=30,
alpha_linear_min=-4,
alpha_linear_max=4)
_, _, cl_grid, cd_grid, cm_grid = newaf.createDataGrid()
newpolar = newaf.polars[0]
cl_3d = [-0.8466, -0.7523, -0.6420, -0.5342, -0.4302, -0.3284,
-0.2276, -0.1303, -0.0404, 0.0618, 0.2191, 0.3321, 0.4336,
0.5501, 0.6755, 0.7363, 0.8101, 0.8973, 0.9810, 1.0640,
1.1450, 1.2098, 1.2682, 1.3281, 1.3731, 1.3088, 1.3159,
1.3534, 1.4010, 1.4515, 1.9140, 1.8857, 1.6451]
cd_3d = [0.0399, 0.0334, 0.0316, 0.0293, 0.0269, 0.0254, 0.0246,
0.0246, 0.0246, 0.0252, 0.0249, 0.0200, 0.0167, 0.0157,
0.0174, 0.0183, 0.0212, 0.0255, 0.0303, 0.0367, 0.0465,
0.0615, 0.0800, 0.1047, 0.1301, 0.1695, 0.2047, 0.2384,
0.2728, 0.3081, 0.8097, 1.2625, 1.6280]
cm_test = [[-0.0037], [-0.0044], [-0.0051], [0.0018], [-0.0216], [-0.0282],
[-0.0346], [-0.0405], [-0.0455], [-0.0507], [-0.0404], [-0.0321],
[-0.0281], [-0.0284], [-0.0322], [-0.0361], [-0.0363], [-0.0393],
[-0.0398], [-0.0983], [-0.1242], [-0.1155], [-0.1068], [-0.0981],
[-0.0894], [-0.0807], [-0.072], [-0.0633], [-0.054], [-0.045],
[-0.036], [-0.22], [-0.13]]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
np.testing.assert_allclose(cm_grid, cm_test, atol=1e-3)
def test_stall2(self):
R = 2.4
r = 0.75*R
chord = 0.28
Omega = 200*pi/30
Uinf = 14.0
tsr = Omega*R/Uinf
newpolar = self.polar.correction3D(r/R, chord/r, tsr,
alpha_max_corr=30,
alpha_linear_min=-4,
alpha_linear_max=4)
cl_3d = [-0.81340155, -0.72876051, -0.61903798, -0.51322348,
-0.41336822, -0.31696485, -0.22214149, -0.13269893,
-0.05485453, 0.04222704, 0.22525537, 0.33917483,
0.43518608, 0.55464051, 0.68785835, 0.72023796,
0.77302335, 0.84665343, 0.91485674, 0.98191931, 1.04592758,
1.08446883, 1.11313747, 1.14423161, 1.15194066, 0.98921407,
0.93776667, 0.93384528, 0.94558296, 0.96199091, 1.05910388,
1.04054486, 0.93735382]
cd_3d = [0.03050922, 0.02712935, 0.02589588, 0.02453937, 0.02341344,
0.02320787, 0.02359745, 0.02497252, 0.02653913, 0.02751806,
0.02430795, 0.01935093, 0.01663156, 0.01552516, 0.01698944,
0.01853615, 0.02107760, 0.02443710, 0.02784230, 0.03217433,
0.03929881, 0.05021192, 0.06322801, 0.08159739, 0.09837902,
0.11798276, 0.13692472, 0.15565820, 0.17470667, 0.19368328,
0.44408310, 0.71034295, 0.96437541]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall3(self):
R = 5.0
r = 0.5*R
chord = 0.5
Omega = 100*pi/30
Uinf = 10.0
tsr = Omega*R/Uinf
newpolar = self.polar.correction3D(r/R, chord/r, tsr,
alpha_max_corr=30,
alpha_linear_min=-4,
alpha_linear_max=4)
cl_3d = [-0.8240, -0.7363, -0.6264, -0.5199, -0.4188, -0.3206, -0.2239,
-0.1319, -0.0502, 0.0485, 0.2233, 0.3369, 0.4347, 0.5532,
0.6839, 0.7254, 0.7849, 0.8629, 0.9361, 1.0082, 1.0777,
1.1246, 1.1628, 1.2031, 1.2228, 1.0916, 1.0589, 1.0682,
1.0914, 1.1188, 1.3329, 1.3112, 1.1640]
cd_3d = [0.0335, 0.0291, 0.0277, 0.0261, 0.0245, 0.0239, 0.0239,
0.0249, 0.0259, 0.0268, 0.0245, 0.0195, 0.0167, 0.0156,
0.0171, 0.0185, 0.0211, 0.0248, 0.0286, 0.0336, 0.0416,
0.0538, 0.0686, 0.0890, 0.1085, 0.1345, 0.1586, 0.1822,
0.2061, 0.2303, 0.5612, 0.8872, 1.1769]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall4_cm(self):
R = 5.0
r = 0.5*R
chord = 0.5
Omega = 100*pi/30
Uinf = 10.0
tsr = Omega*R/Uinf
newpolar = self.polar2.correction3D(r/R, chord/r, tsr,
alpha_max_corr=30,
alpha_linear_min=-4,
alpha_linear_max=4)
cl_3d = [-0.8240, -0.7363, -0.6264, -0.5199, -0.4188, -0.3206, -0.2239,
-0.1319, -0.0502, 0.0485, 0.2233, 0.3369, 0.4347, 0.5532,
0.6839, 0.7254, 0.7849, 0.8629, 0.9361, 1.0082, 1.0777,
1.1246, 1.1628, 1.2031, 1.2228, 1.0916, 1.0589, 1.0682,
1.0914, 1.1188, 1.3329, 1.3112, 1.1640]
cd_3d = [0.0335, 0.0291, 0.0277, 0.0261, 0.0245, 0.0239, 0.0239,
0.0249, 0.0259, 0.0268, 0.0245, 0.0195, 0.0167, 0.0156,
0.0171, 0.0185, 0.0211, 0.0248, 0.0286, 0.0336, 0.0416,
0.0538, 0.0686, 0.0890, 0.1085, 0.1345, 0.1586, 0.1822,
0.2061, 0.2303, 0.5612, 0.8872, 1.1769]
# cm = [-0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346,
# -0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284,
# -0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242,
# -0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633,
# -0.054, -0.045, -0.036, -0.22, -0.13]
cm_zeros = np.zeros(len(cd_3d))
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cm, cm_zeros, atol=1e-3)
class TestExtrap(unittest.TestCase):
def setUp(self):
alpha = [-10.1, -8.2, -6.1, -4.1, -2.1, 0.1, 2, 4.1, 6.2, 8.1, 10.2,
11.3, 12.1, 13.2, 14.2, 15.3, 16.3, 17.1, 18.1, 19.1, 20.1]
cl = [-0.6300, -0.5600, -0.6400, -0.4200, -0.2100, 0.0500, 0.3000,
0.5400, 0.7900, 0.9000, 0.9300, 0.9200, 0.9500, 0.9900, 1.0100,
1.0200, 1.0000, 0.9400, 0.8500, 0.7000, 0.6600]
cd = [0.0390, 0.0233, 0.0131, 0.0134, 0.0119, 0.0122, 0.0116, 0.0144,
0.0146, 0.0162, 0.0274, 0.0303, 0.0369, 0.0509, 0.0648, 0.0776,
0.0917, 0.0994, 0.2306, 0.3142, 0.3186]
cm = [-0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346, -0.0405,
-0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284, -0.0322,
-0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242, -0.1155]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_extrap1(self):
cdmax = 1.29
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [-180, -170, -160, -150, -140, -130, -120, -110, -100,
-90, -80, -70, -60, -50, -40, -30, -20, -10.1, -8.2,
-6.1, -4.1, -2.1, 0.1, 2, 4.1, 6.2, 8.1, 10.2, 11.3,
12.1, 13.2, 14.2, 15.3, 16.3, 17.1, 18.1, 19.1, 20.1,
30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 160, 170, 180]
cl_extrap = [0.0000, 0.2299, 0.4597, 0.4907, 0.5053, 0.4805, 0.4102,
0.2985, 0.1565, 0.0000, -0.1565, -0.2985, -0.4102,
-0.4805, -0.5053, -0.4907, -0.4637, -0.6300, -0.5600,
-0.6400, -0.4200, -0.2100, 0.0500, 0.3000, 0.5400,
0.7900, 0.9000, 0.9300, 0.9200, 0.9500, 0.9900, 1.0100,
1.0200, 1.0000, 0.9400, 0.8500, 0.7000, 0.6600, 0.7010,
0.7219, 0.6864, 0.5860, 0.4264, 0.2235, 0.0000, -0.1565,
-0.2985, -0.4102, -0.4805, -0.5053, -0.4907, -0.4597,
-0.2299, 0.0000]
cd_extrap = [0.1770, 0.2132, 0.3173, 0.4758, 0.6686, 0.8708, 1.0560,
1.1996, 1.2818, 1.2900, 1.2818, 1.1996, 1.0560, 0.8708,
0.6686, 0.4758, 0.3158, 0.0390, 0.0233, 0.0131, 0.0134,
0.0119, 0.0122, 0.0116, 0.0144, 0.0146, 0.0162, 0.0274,
0.0303, 0.0369, 0.0509, 0.0648, 0.0776, 0.0917, 0.0994,
0.2306, 0.3142, 0.3186, 0.4758, 0.6686, 0.8708, 1.0560,
1.1996, 1.2818, 1.2900, 1.2818, 1.1996, 1.0560, 0.8708,
0.6686, 0.4758, 0.3173, 0.2132, 0.1770]
cm_extrap = [0.0000, 0.4000, 0.2431, 0.2568, 0.2865, 0.3185, 0.3458,
0.3632, 0.3672, 0.3559, 0.3443, 0.3182, 0.2808, 0.2362,
0.1886, 0.1414, 0.0942, -0.0044, -0.0051, 0.0018, -0.0216,
-0.0282, -0.0346, -0.0405, -0.0455, -0.0507, -0.0404, -0.0321,
-0.0281, -0.0284, -0.0322, -0.0361, -0.0363, -0.0393, -0.0398,
-0.0983, -0.1242, -0.1155, -0.1710, -0.2202, -0.2637, -0.3002,
-0.3284, -0.3471, -0.3559, -0.3672, -0.3632, -0.3458, -0.3185,
-0.2865, -0.2568, -0.2431, -0.5000, 0.0000]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap1_w_airfoil(self):
cdmax = 1.29
af = Airfoil([self.polar2])
newaf = af.extrapolate(cdmax=cdmax)
newpolar = newaf.polars[0]
alpha_extrap = [-180, -170, -160, -150, -140, -130, -120, -110, -100,
-90, -80, -70, -60, -50, -40, -30, -20, -10.1, -8.2,
-6.1, -4.1, -2.1, 0.1, 2, 4.1, 6.2, 8.1, 10.2, 11.3,
12.1, 13.2, 14.2, 15.3, 16.3, 17.1, 18.1, 19.1, 20.1,
30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 160, 170, 180]
cl_extrap = [0.0000, 0.2299, 0.4597, 0.4907, 0.5053, 0.4805, 0.4102,
0.2985, 0.1565, 0.0000, -0.1565, -0.2985, -0.4102,
-0.4805, -0.5053, -0.4907, -0.4637, -0.6300, -0.5600,
-0.6400, -0.4200, -0.2100, 0.0500, 0.3000, 0.5400,
0.7900, 0.9000, 0.9300, 0.9200, 0.9500, 0.9900, 1.0100,
1.0200, 1.0000, 0.9400, 0.8500, 0.7000, 0.6600, 0.7010,
0.7219, 0.6864, 0.5860, 0.4264, 0.2235, 0.0000, -0.1565,
-0.2985, -0.4102, -0.4805, -0.5053, -0.4907, -0.4597,
-0.2299, 0.0000]
cd_extrap = [0.1770, 0.2132, 0.3173, 0.4758, 0.6686, 0.8708, 1.0560,
1.1996, 1.2818, 1.2900, 1.2818, 1.1996, 1.0560, 0.8708,
0.6686, 0.4758, 0.3158, 0.0390, 0.0233, 0.0131, 0.0134,
0.0119, 0.0122, 0.0116, 0.0144, 0.0146, 0.0162, 0.0274,
0.0303, 0.0369, 0.0509, 0.0648, 0.0776, 0.0917, 0.0994,
0.2306, 0.3142, 0.3186, 0.4758, 0.6686, 0.8708, 1.0560,
1.1996, 1.2818, 1.2900, 1.2818, 1.1996, 1.0560, 0.8708,
0.6686, 0.4758, 0.3173, 0.2132, 0.1770]
cm_extrap = np.linspace(0, 0, len(cd_extrap))
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap2(self):
cdmax = 1.0
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [-180, -170, -160, -150, -140, -130, -120, -110, -100,
-90, -80, -70, -60, -50, -40, -30, -20, -10.1, -8.2,
-6.1, -4.1, -2.1, 0.1, 2, 4.1, 6.2, 8.1, 10.2, 11.3,
12.1, 13.2, 14.2, 15.3, 16.3, 17.1, 18.1, 19.1, 20.1,
30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 160, 170, 180]
cl_extrap = [0.0000, 0.2299, 0.4597, 0.4411, 0.4287, 0.3943, 0.3297,
0.2364, 0.1225, 0.0000, -0.1225, -0.2364, -0.3297,
-0.3943, -0.4287, -0.4411, -0.4637, -0.6300, -0.5600,
-0.6400, -0.4200, -0.2100, 0.0500, 0.3000, 0.5400,
0.7900, 0.9000, 0.9300, 0.9200, 0.9500, 0.9900, 1.0100,
1.0200, 1.0000, 0.9400, 0.8500, 0.7000, 0.6600, 0.6302,
0.6124, 0.5633, 0.4710, 0.3378, 0.1750, 0.0000, -0.1225,
-0.2364, -0.3297, -0.3943, -0.4287, -0.4411, -0.4597,
-0.2299, 0.0000]
cd_extrap = [0.2135, 0.2404, 0.3176, 0.4349, 0.5767, 0.7241, 0.8568,
0.9560, 1.0069, 1.0000, 1.0069, 0.9560, 0.8568, 0.7241,
0.5767, 0.4349, 0.3158, 0.0390, 0.0233, 0.0131, 0.0134,
0.0119, 0.0122, 0.0116, 0.0144, 0.0146, 0.0162, 0.0274,
0.0303, 0.0369, 0.0509, 0.0648, 0.0776, 0.0917, 0.0994,
0.2306, 0.3142, 0.3186, 0.4349, 0.5767, 0.7241, 0.8568,
0.9560, 1.0069, 1.0000, 1.0069, 0.9560, 0.8568, 0.7241,
0.5767, 0.4349, 0.3176, 0.2404, 0.2135]
cm_extrap = [0.0000, 0.4000, 0.2432, 0.2354, 0.2500, 0.2695, 0.2864,
0.2961, 0.2956, 0.2834, 0.2776, 0.2603, 0.2337, 0.2013,
0.1663, 0.1310, 0.0942, -0.0044, -0.0051, 0.0018, -0.0216,
-0.0282, -0.0346, -0.0405, -0.0455, -0.0507, -0.0404, -0.0321,
-0.0281, -0.0284, -0.0322, -0.0361, -0.0363, -0.0393, -0.0398,
-0.0983, -0.1242, -0.1155, -0.1577, -0.1930, -0.2239, -0.2494,
-0.2683, -0.2798, -0.2834, -0.2956, -0.2961, -0.2864, -0.2695,
-0.2500, -0.2354, -0.2432, -0.5000, 0.0000]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap3(self):
cdmax = 1.5
newpolar = self.polar.extrapolate(cdmax)
alpha_extrap = [-180, -170, -160, -150, -140, -130, -120, -110, -100,
-90, -80, -70, -60, -50, -40, -30, -20, -10.1, -8.2,
-6.1, -4.1, -2.1, 0.1, 2, 4.1, 6.2, 8.1, 10.2, 11.3,
12.1, 13.2, 14.2, 15.3, 16.3, 17.1, 18.1, 19.1, 20.1,
30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 160, 170, 180]
cl_extrap = [0.0000, 0.2299, 0.4597, 0.5266, 0.5608, 0.5429, 0.4685,
0.3434, 0.1810, 0.0000, -0.1810, -0.3434, -0.4685,
-0.5429, -0.5608, -0.5266, -0.4637, -0.6300, -0.5600,
-0.6400, -0.4200, -0.2100, 0.0500, 0.3000, 0.5400, 0.7900,
0.9000, 0.9300, 0.9200, 0.9500, 0.9900, 1.0100, 1.0200,
1.0000, 0.9400, 0.8500, 0.7000, 0.6600, 0.7523, 0.8012,
0.7756, 0.6693, 0.4906, 0.2586, 0.0000, -0.1810, -0.3434,
-0.4685, -0.5429, -0.5608, -0.5266, -0.4597, -0.2299,
0.0000]
cd_extrap = [0.1506, 0.1936, 0.3170, 0.5054, 0.7351, 0.9771, 1.2003,
1.3760, 1.4809, 1.5000, 1.4809, 1.3760, 1.2003, 0.9771,
0.7351, 0.5054, 0.3158, 0.0390, 0.0233, 0.0131, 0.0134,
0.0119, 0.0122, 0.0116, 0.0144, 0.0146, 0.0162, 0.0274,
0.0303, 0.0369, 0.0509, 0.0648, 0.0776, 0.0917, 0.0994,
0.2306, 0.3142, 0.3186, 0.5054, 0.7351, 0.9771, 1.2003,
1.3760, 1.4809, 1.5000, 1.4809, 1.3760, 1.2003, 0.9771,
0.7351, 0.5054, 0.3170, 0.1936, 0.1506]
cm_extrap = [0.0000, 0.4000, 0.2431, 0.2723, 0.3130, 0.3540, 0.3888,
0.4118, 0.4190, 0.4084, 0.3926, 0.3602, 0.3148, 0.2614,
0.2049, 0.1488, 0.0942, -0.0044, -0.0051, 0.0018, -0.0216,
-0.0282, -0.0346, -0.0405, -0.0455, -0.0507, -0.0404, -0.0321,
-0.0281, -0.0284, -0.0322, -0.0361, -0.0363, -0.0393, -0.0398,
-0.0983, -0.1242, -0.1155, -0.1807, -0.2399, -0.2925, -0.3370,
-0.3719, -0.3959, -0.4084, -0.4190, -0.4118, -0.3888, -0.3540,
-0.3130, -0.2723, -0.2431, -0.5000, 0.0000]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = | np.interp(alpha_extrap, newpolar.alpha, newpolar.cd) | numpy.interp |
"""
Author: <NAME>
Copyright:
Secure Systems Group, Aalto University
https://ssg.aalto.fi/
This code is released under Apache 2.0 license
http://www.apache.org/licenses/LICENSE-2.0
"""
import onnx
import struct
import random
import numpy as np
import onnx
from onnx import numpy_helper
model = onnx.load("R2_S.onnx")
def build(inp, shape):
return np.array(inp).reshape(shape)
tensor_dict = {}
for t in model.graph.initializer:
tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t)
input_tensor = onnx.TensorProto()
with open('S.tensor', 'rb') as fid:
content = fid.read()
input_tensor.ParseFromString(content)
tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor)
tensor_dict["8"] = np.reshape(tensor_dict["1"], (10,784))
# do fractionals
fractional = 1000
downscale = 1
single = ["2","4","6","8"]
double = ["3","5","7"]
for s in single:
tensor_dict[s] = np.multiply(tensor_dict[s], fractional)
for s in double:
tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional)
"""
for s in tensor_dict:
tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape)
"""
# compute
tensor_dict["11temp"] = np.matmul(tensor_dict["8"], tensor_dict["2"].T)
tensor_dict["11add"] = np.add(tensor_dict["11temp"], tensor_dict["3"])
tensor_dict["11"] = np.divide(tensor_dict["11add"],fractional*downscale)
tensor_dict["12"] = np.maximum(tensor_dict["11"],0)
tensor_dict["15temp"] = np.matmul(tensor_dict["12"], tensor_dict["4"].T)
tensor_dict["15add"] = np.add(tensor_dict["15temp"], tensor_dict["5"])
tensor_dict["15"] = | np.divide(tensor_dict["15add"],fractional*downscale) | numpy.divide |
import pickle
import sys
import logging
import time
import random
import numpy as np
from nn.utils import timer
from nn.cnn.lenet5_layers import (
Convolution2D,
MaxPooling2D,
FullyConnected,
Flatten,
ReLu,
Softmax
)
logger = logging.getLogger(__name__)
class LeNet5:
'''
LeNet-5:
input: 28x28
conv1: (5x5x6)@s1p2 -> 28x28x6 {(28-5+2x2)/1+1}
maxpool2: (2x2)@s2 -> 14x14x6 {(28-2)/2+1}
conv3: (5x5x16)@s1p0 -> 10x10x16 {(14-5)/1+1}
maxpool4: (2x2)@s2 -> 5x5x16 {(10-2)/2+1}
conv5: (5x5x120)@s1p0 -> 1x1x120 {(5-5)/1+1}
fc6: 120 -> 84
fc7: 84 -> 10
softmax: 10 -> 10
'''
def __init__(self, lr=0.01, smc=None):
self.lr = lr # 0.01
self.smc = smc
self.layers = []
self.layers.append(
Convolution2D(inputs_channel=1, num_filters=6, kernel_size=5, padding=2, stride=1, learning_rate=self.lr,
name='conv1', smc=smc))
self.layers.append(ReLu())
self.layers.append(MaxPooling2D(pool_size=2, stride=2, name='maxpool2'))
self.layers.append(
Convolution2D(inputs_channel=6, num_filters=16, kernel_size=5, padding=0, stride=1, learning_rate=self.lr,
name='conv3'))
self.layers.append(ReLu())
self.layers.append(MaxPooling2D(pool_size=2, stride=2, name='maxpool4'))
self.layers.append(
Convolution2D(inputs_channel=16, num_filters=120, kernel_size=5, padding=0, stride=1, learning_rate=self.lr,
name='conv5'))
self.layers.append(ReLu())
self.layers.append(Flatten())
self.layers.append(FullyConnected(num_inputs=120, num_outputs=84, learning_rate=lr, name='fc6'))
self.layers.append(ReLu())
self.layers.append(FullyConnected(num_inputs=84, num_outputs=10, learning_rate=lr, name='fc7'))
self.layers.append(Softmax())
self.lay_num = len(self.layers)
def cross_entropy(self, inputs, labels):
out_num = labels.shape[0]
p = np.sum(labels.reshape(1, out_num) * inputs)
loss = - | np.log(p) | numpy.log |
import numpy as np
import os
import struct
from array import array as pyarray
import pickle
import tarfile
def load_mnist(dataset="training", digits=np.arange(10), path=".", size = 60000):
if dataset == "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset == "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = pyarray("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = pyarray("B", fimg.read())
fimg.close()
ind = [ k for k in range(size) if lbl[k] in digits ]
N = size #int(len(ind) * size/100.)
images = np.zeros((N, rows, cols), dtype=np.uint8)
labels = | np.zeros((N, 1), dtype=np.int8) | numpy.zeros |
"""
pyart.io._sigmet_noaa_hh
========================
Functions needed for reading Sigmet files from the airborne radar located on
NOAA's Hurricane Hunter aircraft.
.. autosummary::
:toctree: generated/
_decode_noaa_hh_hdr
_georeference_yprime
"""
import numpy as np
from ._sigmetfile import bin4_to_angle, bin2_to_angle
def _decode_noaa_hh_hdr(
raw_extended_headers, filemetadata, azimuth, elevation,
position_source='irs', heading_source='irs'):
"""
Extract data from Sigmet extended headers produced by NOAA
Hurricane Hunter airborne radars.
Parameters
----------
raw_extended_headers : ndarray
Raw Sigmet extended headers.
filemetadata : FileMetadata
FileMetadata class from which metadata will be derived.
azimuth : dict
Dictionary of azimuth angles recorded in Sigmet file.
elevation : dict
Dictionary of elevation angles recorded in Sigmet file.
position_source: {'irs', 'gps', 'aamps'}, optional
Instrument from which to derive position parameters.
heading_source: {'irs', 'aamps'}
Instrument from which to derive heading parameters.
Returns
-------
latitude : dict
Dictionary containing latitude data and metadata.
longitude : dict
Dictionary containing longitude data and metadata.
altitude : dict
Dictionary containing altitude data and metadata.
heading_params : dict
Dictionary of dictionary containing aircraft heading data and
metadata. Contains 'heading', 'roll', pitch', 'drift', 'rotation',
'tilt' and 'georefs_applied' dictionaries.
"""
xhdr = np.rec.fromstring(raw_extended_headers[..., :68].tostring(),
dtype=list(NOAA_HH_EXTENDED_HEADER))
# rotation and tilt from azimuth/elevation angles
rotation = filemetadata('rotation')
tilt = filemetadata('tilt')
rotation_data = 90. - elevation['data'].copy()
rotation_data[rotation_data < 0] += 360.
rotation['data'] = rotation_data
tilt_data = azimuth['data'].copy()
tilt_data[tilt_data > 180] -= 360.
tilt['data'] = tilt_data
# airborne parameters
heading = filemetadata('heading')
roll = filemetadata('roll')
pitch = filemetadata('pitch')
drift = filemetadata('drift')
if heading_source == 'irs':
heading_data = bin2_to_angle(xhdr['irs_heading'])
roll_data = bin2_to_angle(xhdr['irs_roll'])
pitch_data = bin2_to_angle(xhdr['irs_pitch'])
drift_data = bin2_to_angle(xhdr['irs_drift'])
elif heading_source == 'aamps':
heading_data = bin2_to_angle(xhdr['aamps_heading'])
roll_data = bin2_to_angle(xhdr['aamps_roll'])
pitch_data = bin2_to_angle(xhdr['aamps_pitch'])
drift_data = bin2_to_angle(xhdr['aamps_drift'])
else:
raise ValueError('Unknown heading_source')
heading['data'] = heading_data
roll['data'] = roll_data
pitch['data'] = pitch_data
drift['data'] = drift_data
# georeferenced azimuth and elevation
az, elev = _georeference_yprime(
roll_data, pitch_data, heading_data, drift_data, rotation_data,
tilt_data)
azimuth['data'] = az
elevation['data'] = elev
georefs_applied = filemetadata('georefs_applied')
georefs_applied['data'] = np.ones(az.shape, dtype='int8')
# positions: latitude, longitude, altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
if position_source == 'gps':
lat_data = bin4_to_angle(xhdr['gps_lat'])
lon_data = bin4_to_angle(xhdr['gps_long'])
alt_data = xhdr['gps_alt'] / 100.
elif position_source == 'aamps':
lat_data = bin4_to_angle(xhdr['aamps_lat'])
lon_data = bin4_to_angle(xhdr['aamps_long'])
alt_data = xhdr['aamps_alt'] / 100.
elif position_source == 'irs':
lat_data = bin4_to_angle(xhdr['irs_lat'])
lon_data = bin4_to_angle(xhdr['irs_long'])
alt_data = xhdr['gps_alt'] / 100.
else:
raise ValueError('Invalid position_source')
latitude['data'] = lat_data
longitude['data'] = lon_data
altitude['data'] = alt_data
extended_header_params = {
'heading': heading,
'roll': roll,
'pitch': pitch,
'drift': drift,
'rotation': rotation,
'tilt': tilt,
'georefs_applied': georefs_applied}
return (latitude, longitude, altitude, extended_header_params)
def _georeference_yprime(roll, pitch, heading, drift, rotation, tilt):
"""
Compute georeferenced azimuth and elevation angles for a Y-prime radar.
This is the georeferencing needed for the tail doppler radar on the
NOAA P3 aircraft.
"""
# Adapted from Radx's SigmetRadxFile::_computeAzEl method found in
# SigmetRadxFile.cc
# Transforms defined in Wen-Chau Lee et al, JTech, 1994, 11, 572-578.
# Convert to radians and use variable names from Wen-Chau Lee paper
R = | np.radians(roll) | numpy.radians |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.