repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
cwn | cwn-main/data/sr_utils.py | import networkx as nx
import torch
from torch_geometric.utils import to_undirected
def load_sr_dataset(path):
"""Load the Strongly Regular Graph Dataset from the supplied path."""
nx_graphs = nx.read_graph6(path)
graphs = list()
for nx_graph in nx_graphs:
n = nx_graph.number_of_nodes()
edge_index = to_undirected(torch.tensor(list(nx_graph.edges()), dtype=torch.long).transpose(1,0))
graphs.append((edge_index, n))
return graphs
| 485 | 29.375 | 105 | py |
cwn | cwn-main/data/dummy_complexes.py | import torch
from data.complex import Cochain, Complex
from torch_geometric.data import Data
# TODO: make the features for these dummy complexes disjoint to stress tests even more
def convert_to_graph(complex):
"""Extracts the underlying graph of a cochain complex."""
assert 0 in complex.cochains
assert complex.cochains[0].num_cells > 0
cochain = complex.cochains[0]
x = cochain.x
y = complex.y
edge_attr = None
if cochain.upper_index is None:
edge_index = torch.LongTensor([[], []])
else:
edge_index = cochain.upper_index
if 1 in complex.cochains and complex.cochains[1].x is not None and cochain.shared_coboundaries is not None:
edge_attr = torch.index_select(complex.cochains[1].x, 0, cochain.shared_coboundaries)
if edge_attr is None:
edge_attr = torch.FloatTensor([[]])
graph = Data(x=x, edge_index=edge_index, y=y, edge_attr=edge_attr)
return graph
def get_testing_complex_list():
"""Returns a list of cell complexes used for testing. The list contains many edge cases."""
return [get_fullstop_complex(), get_pyramid_complex(), get_house_complex(), get_kite_complex(), get_square_complex(),
get_square_dot_complex(), get_square_complex(), get_fullstop_complex(), get_house_complex(),
get_kite_complex(), get_pyramid_complex(), get_bridged_complex(), get_square_dot_complex(), get_colon_complex(),
get_filled_square_complex(), get_molecular_complex(), get_fullstop_complex(), get_colon_complex(),
get_bridged_complex(), get_colon_complex(), get_fullstop_complex(), get_fullstop_complex(), get_colon_complex()]
def get_mol_testing_complex_list():
"""Returns a list of cell complexes used for testing. The list contains many edge cases."""
return [get_house_complex(), get_kite_complex(), get_square_complex(), get_fullstop_complex(), get_bridged_complex(),
get_square_dot_complex(), get_square_complex(), get_filled_square_complex(), get_colon_complex(), get_bridged_complex(),
get_kite_complex(), get_square_dot_complex(), get_colon_complex(), get_molecular_complex(), get_bridged_complex(),
get_filled_square_complex(), get_molecular_complex(), get_fullstop_complex(), get_colon_complex()]
def get_house_complex():
"""
Returns the `house graph` below with dummy features.
The `house graph` (3-2-4 is a filled triangle):
4
/ \
3---2
| |
0---1
.
4 5
. 2 .
3 1
. 0 .
.
/0\
.---.
| |
.---.
"""
v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [2, 3], [0, 3], [3, 4], [2, 4]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).view(-1)], 0)
e_up_index = torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
e_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
e_down_index = torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4]],
dtype=torch.long)
e_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4],
dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4], [5], [6]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, upper_index=e_up_index, lower_index=e_down_index,
shared_coboundaries=e_shared_coboundaries, shared_boundaries=e_shared_boundaries,
boundary_index=e_boundary_index, y=ye)
t_boundaries = [[2, 4, 5]]
t_boundary_index = torch.stack([
torch.LongTensor(t_boundaries).view(-1),
torch.LongTensor([0, 0, 0]).view(-1)], 0)
t_x = torch.tensor([[1]], dtype=torch.float)
yt = torch.tensor([2], dtype=torch.long)
t_cochain = Cochain(dim=2, x=t_x, y=yt, boundary_index=t_boundary_index)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, t_cochain, y=y)
def get_bridged_complex():
"""
Returns the `bridged graph` below with dummy features.
The `bridged graph` (0-1-4-3, 1-2-3-4, 0-1-2-3 are filled rings):
3---2
|\ |
| 4 |
| \|
0---1
.-2-.
|4 |
3 . 1
| 5|
.-0-.
.---.
|\1 |
| . |
| 0\|
.---.
.---.
| |
| 2 |
| |
.---.
"""
v_up_index = torch.tensor( [[0, 1, 0, 3, 1, 2, 1, 4, 2, 3, 3, 4],
[1, 0, 3, 0, 2, 1, 4, 1, 3, 2, 4, 3]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 5, 5, 2, 2, 4, 4], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [2, 3], [0, 3], [3, 4], [1, 4]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).view(-1)], 0)
e_up_index = torch.tensor( [[0, 1, 0, 2, 0, 3, 0, 3, 0, 4, 0, 5, 1, 2, 1, 2, 1, 3, 1, 4, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 3, 5, 4, 5, 4, 5],
[1, 0, 2, 0, 3, 0, 3, 0, 4, 0, 5, 0, 2, 1, 2, 1, 3, 1, 4, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 3, 5, 4, 5, 4]], dtype=torch.long)
e_shared_coboundaries = torch.tensor([2, 2, 2, 2, 0, 0, 2, 2, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], dtype=torch.long)
e_down_index = torch.tensor( [[0, 1, 0, 3, 0, 5, 1, 2, 1, 5, 2, 3, 2, 4, 3, 4, 4, 5],
[1, 0, 3, 0, 5, 0, 2, 1, 5, 1, 3, 2, 4, 2, 4, 3, 5, 4]], dtype=torch.long)
e_shared_boundaries = torch.tensor([1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 3, 3, 3, 3, 3, 3, 4, 4], dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4], [5], [6]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, upper_index=e_up_index, lower_index=e_down_index,
shared_coboundaries=e_shared_coboundaries, shared_boundaries=e_shared_boundaries,
boundary_index=e_boundary_index, y=ye)
t_boundaries = [[0, 3, 4, 5], [1, 2, 4, 5], [0, 1, 2, 3]]
t_boundary_index = torch.stack([
torch.LongTensor(t_boundaries).view(-1),
torch.LongTensor([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]).view(-1)], 0)
t_down_index = torch.tensor( [[0, 1, 0, 1, 0, 2, 0, 2, 1, 2, 1, 2],
[1, 0, 1, 0, 2, 0, 2, 0, 2, 1, 2, 1]], dtype=torch.long)
t_shared_boundaries = torch.tensor([4, 4, 5, 5, 0, 0, 3, 3, 1, 1, 2, 2], dtype=torch.long)
t_x = torch.tensor([[1], [2], [3]], dtype=torch.float)
yt = torch.tensor([2, 2, 2], dtype=torch.long)
t_cochain = Cochain(dim=2, x=t_x, y=yt, boundary_index=t_boundary_index, lower_index=t_down_index, shared_boundaries=t_shared_boundaries)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, t_cochain, y=y)
def get_fullstop_complex():
"""
Returns the `fullstop graph` below with dummy features.
The `fullstop graph` is a single isolated node:
0
"""
v_x = torch.tensor([[1]], dtype=torch.float)
yv = torch.tensor([0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, y=yv)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, y=y)
def get_colon_complex():
"""
Returns the `colon graph` below with dummy features.
The `colon graph` is made up of two isolated nodes:
1
0
"""
v_x = torch.tensor([[1], [2]], dtype=torch.float)
yv = torch.tensor([0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, y=yv)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, y=y)
def get_square_complex():
"""
Returns the `square graph` below with dummy features.
The `square graph`:
3---2
| |
0---1
. 2 .
3 1
. 0 .
.---.
| |
.---.
"""
v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3],
[1, 0, 3, 0, 2, 1, 3, 2]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [2, 3], [0, 3]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3]).view(-1)], 0)
e_down_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3],
[1, 0, 3, 0, 2, 1, 3, 2]], dtype=torch.long)
e_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 3, 3], dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1], dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, lower_index=e_down_index, shared_boundaries=e_shared_boundaries, y=ye,
boundary_index=e_boundary_index)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, y=y)
def get_square_dot_complex():
"""
Returns the `square-dot graph` below with dummy features.
The `square-dot graph`:
3---2
| |
0---1 4
. 2 .
3 1
. 0 . .
.---.
| |
.---. .
"""
v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3],
[1, 0, 3, 0, 2, 1, 3, 2]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [2, 3], [0, 3]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3]).view(-1)], 0)
e_down_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3],
[1, 0, 3, 0, 2, 1, 3, 2]], dtype=torch.long)
e_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 3, 3], dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1], dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, lower_index=e_down_index, shared_boundaries=e_shared_boundaries, y=ye,
boundary_index=e_boundary_index)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, y=y)
def get_kite_complex():
"""
Returns the `kite graph` below with dummy features.
The `kite graph`:
2---3---4
/ \ /
0---1
. 4 . 5 .
2 1 3
. 0 .
.---.---.
/0\1/
.---.
"""
v_up_index = torch.tensor([[0, 1, 0, 2, 1, 2, 1, 3, 2, 3, 3, 4],
[1, 0, 2, 0, 2, 1, 3, 1, 3, 2, 4, 3]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 2, 2, 1, 1, 3, 3, 4, 4, 5, 5], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [0, 2], [1, 3], [2, 3], [3, 4]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).view(-1)], 0)
e_down_index = torch.tensor([[0, 1, 0, 3, 1, 3, 0, 2, 1, 2, 2, 4, 1, 4, 3, 4, 3, 5, 4, 5],
[1, 0, 3, 0, 3, 1, 2, 0, 2, 1, 4, 2, 4, 1, 4, 3, 5, 3, 5, 4]],
dtype=torch.long)
e_shared_boundaries = torch.tensor([1, 1, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
dtype=torch.long)
e_up_index = torch.tensor([[0, 1, 0, 2, 1, 2, 1, 3, 1, 4, 3, 4],
[1, 0, 2, 0, 2, 1, 3, 1, 4, 1, 4, 3]], dtype=torch.long)
e_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4], [5], [6]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, lower_index=e_down_index, shared_boundaries=e_shared_boundaries,
upper_index=e_up_index, shared_coboundaries=e_shared_coboundaries, y=ye,
boundary_index=e_boundary_index)
t_boundaries = [[0, 1, 2], [1, 3, 4]]
t_boundary_index = torch.stack([
torch.LongTensor(t_boundaries).view(-1),
torch.LongTensor([0, 0, 0, 1, 1, 1]).view(-1)], 0)
t_down_index = torch.tensor([[0, 1],
[1, 0]], dtype=torch.long)
t_shared_boundaries = torch.tensor([1, 1], dtype=torch.long)
t_x = torch.tensor([[1], [2]], dtype=torch.float)
yt = torch.tensor([2, 2], dtype=torch.long)
t_cochain = Cochain(dim=2, x=t_x, lower_index=t_down_index, shared_boundaries=t_shared_boundaries, y=yt,
boundary_index=t_boundary_index)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, t_cochain, y=y)
def get_pyramid_complex():
"""
Returns the `pyramid` below with dummy features.
The `pyramid` (corresponds to a 4-clique):
3
/|\
/_2_\
0-----1
.
5 4 3
2.1
. 0 .
3
/ \
/ \
2-----1
/ \ / \
/ \ / \
3-----0-----3
.
/ \
4 3
.--1--.
/ 2 0 \
4 \ / 3
.--5--.--5--.
3
/ \
/ 2 \
2-----1
/ \ 0 / \
/ 3 \ / 1 \
3-----0-----3
.
/|\
/_0_\
.-----.
"""
v_up_index = torch.tensor([[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],
[1, 0, 2, 0, 3, 0, 2, 1, 3, 1, 3, 2]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 2, 2, 5, 5, 1, 1, 3, 3, 4, 4], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
yv = torch.tensor([3, 3, 3, 3], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [0, 2], [1, 3], [2, 3], [0, 3]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).view(-1)], 0)
e_up_index = torch.tensor(
[[0, 1, 0, 2, 1, 2, 0, 5, 0, 3, 3, 5, 1, 3, 1, 4, 3, 4, 2, 4, 2, 5, 4, 5],
[1, 0, 2, 0, 2, 1, 5, 0, 3, 0, 5, 3, 3, 1, 4, 1, 4, 3, 4, 2, 5, 2, 5, 4]],
dtype=torch.long)
e_shared_coboundaries = torch.tensor(
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3], dtype=torch.long)
e_down_index = torch.tensor(
[[0, 1, 0, 2, 0, 3, 0, 5, 1, 2, 1, 3, 1, 4, 2, 4, 2, 5, 3, 4, 3, 5, 4, 5],
[1, 0, 2, 0, 3, 0, 5, 0, 2, 1, 3, 1, 4, 1, 4, 2, 5, 2, 4, 3, 5, 3, 5, 4]],
dtype=torch.long)
e_shared_boundaries = torch.tensor(
[1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 1, 1, 2, 2, 2, 2, 0, 0, 3, 3, 3, 3, 3, 3], dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4], [5], [6]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, lower_index=e_down_index, upper_index=e_up_index,
shared_boundaries=e_shared_boundaries, shared_coboundaries=e_shared_coboundaries, y=ye,
boundary_index=e_boundary_index)
t_boundaries = [[0, 1, 2], [0, 3, 5], [1, 3, 4], [2, 4, 5]]
t_boundary_index = torch.stack([
torch.LongTensor(t_boundaries).view(-1),
torch.LongTensor([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]).view(-1)], 0)
t_up_index = torch.tensor([[0, 1, 0, 2, 1, 2, 0, 3, 1, 3, 2, 3],
[1, 0, 2, 0, 2, 1, 3, 0, 3, 1, 3, 2]], dtype=torch.long)
t_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
t_down_index = torch.tensor([[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],
[1, 0, 2, 0, 3, 0, 2, 1, 3, 1, 3, 2]], dtype=torch.long)
t_shared_boundaries = torch.tensor([0, 0, 1, 1, 2, 2, 3, 3, 5, 5, 4, 4], dtype=torch.long)
t_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
yt = torch.tensor([2, 2, 2, 2], dtype=torch.long)
t_cochain = Cochain(dim=2, x=t_x, lower_index=t_down_index, upper_index=t_up_index,
shared_boundaries=t_shared_boundaries, shared_coboundaries=t_shared_coboundaries, y=yt,
boundary_index=t_boundary_index)
p_boundaries = [[0, 1, 2, 3]]
p_boundary_index = torch.stack([
torch.LongTensor(p_boundaries).view(-1),
torch.LongTensor([0, 0, 0, 0]).view(-1)], 0)
p_x = torch.tensor([[1]], dtype=torch.float)
yp = torch.tensor([3], dtype=torch.long)
p_cochain = Cochain(dim=3, x=p_x, y=yp, boundary_index=p_boundary_index)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, t_cochain, p_cochain, y=y)
def get_filled_square_complex():
"""This is a cell / cubical complex formed of a single filled square.
3---2
| |
0---1
. 2 .
3 1
. 0 .
.---.
| 0 |
.---.
"""
v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3],
[1, 0, 3, 0, 2, 1, 3, 2]], dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2], dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [2, 3], [0, 3]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3]).view(-1)], 0)
e_down_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3],
[1, 0, 3, 0, 2, 1, 3, 2]], dtype=torch.long)
e_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 3, 3], dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1], dtype=torch.long)
e_upper_index = torch.tensor([[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],
[1, 0, 2, 0, 3, 0, 2, 1, 3, 1, 3, 2]], dtype=torch.long)
e_shared_coboundaries = torch.tensor([0]*12, dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, lower_index=e_down_index, shared_boundaries=e_shared_boundaries,
upper_index=e_upper_index, y=ye, shared_coboundaries=e_shared_coboundaries, boundary_index=e_boundary_index)
c_boundary_index = torch.LongTensor(
[[0, 1, 2, 3],
[0, 0, 0, 0]]
)
c_x = torch.tensor([[1]], dtype=torch.float)
yc = torch.tensor([2], dtype=torch.long)
c_cochain = Cochain(dim=2, x=c_x, y=yc, boundary_index=c_boundary_index)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, c_cochain, y=y)
def get_molecular_complex():
"""This is a molecule with filled rings.
3---2---4---5
| | |
0---1-------6---7
. 2 . 4 . 5 .
3 1 6
. 0 . 7 . 8 .
.---. --- . --- .
| 0 | 1 |
.---. --------- . ---- .
"""
v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 1, 6, 2, 3, 2, 4, 4, 5, 5, 6, 6, 7],
[1, 0, 3, 0, 2, 1, 6, 1, 3, 2, 4, 2, 5, 4, 6, 5, 7, 6]],
dtype=torch.long)
v_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 7, 7, 2, 2, 4, 4, 5, 5, 6, 6, 8, 8],
dtype=torch.long)
v_x = torch.tensor([[1], [2], [3], [4], [5], [6], [7], [8]], dtype=torch.float)
yv = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
v_cochain = Cochain(dim=0, x=v_x, upper_index=v_up_index, shared_coboundaries=v_shared_coboundaries, y=yv)
e_boundaries = [[0, 1], [1, 2], [2, 3], [0, 3], [1, 6], [2, 4], [4, 5], [5, 6], [6, 7]]
e_boundary_index = torch.stack([
torch.LongTensor(e_boundaries).view(-1),
torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 7, 7, 4, 4, 5, 5, 6, 6, 8, 8]).view(-1)], 0)
e_down_index = torch.tensor(
[[0, 1, 0, 3, 1, 2, 2, 3, 1, 4, 2, 4, 4, 5, 5, 6, 6, 7, 6, 8, 7, 8, 0, 7, 1, 7],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 1, 4, 2, 5, 4, 6, 5, 7, 6, 8, 6, 8, 7, 7, 0, 7, 1]],
dtype=torch.long)
e_shared_boundaries = torch.tensor(
[1, 1, 0, 0, 2, 2, 3, 3, 2, 2, 2, 2, 4, 4, 5, 5, 6, 6, 6, 6, 6, 6, 1, 1, 1, 1],
dtype=torch.long)
e_x = torch.tensor([[1], [2], [3], [4], [5], [6], [7], [8], [9]], dtype=torch.float)
ye = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
e_upper_index_c1 = torch.tensor([[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],
[1, 0, 2, 0, 3, 0, 2, 1, 3, 1, 3, 2]], dtype=torch.long)
e_upper_index_c2 = torch.tensor([[1, 4, 1, 5, 1, 6, 1, 7, 4, 5, 4, 6, 4, 7, 5, 6, 5, 7, 6, 7],
[4, 1, 5, 1, 6, 1, 7, 1, 5, 4, 6, 4, 7, 4, 6, 5, 7, 5, 7, 6]],
dtype=torch.long)
e_upper_index = torch.cat((e_upper_index_c1, e_upper_index_c2), dim=-1)
e_shared_coboundaries = torch.tensor([0]*12 + [1]*20, dtype=torch.long)
e_cochain = Cochain(dim=1, x=e_x, lower_index=e_down_index, shared_boundaries=e_shared_boundaries,
upper_index=e_upper_index, y=ye, shared_coboundaries=e_shared_coboundaries, boundary_index=e_boundary_index)
c_boundary_index = torch.LongTensor(
[[0, 1, 2, 3, 1, 4, 5, 6, 7],
[0, 0, 0, 0, 1, 1, 1, 1, 1]]
)
c_x = torch.tensor([[1], [2]], dtype=torch.float)
c_down_index = torch.tensor([[0, 1],
[1, 0]], dtype=torch.long)
c_shared_boundaries = torch.tensor([1, 1], dtype=torch.long)
yc = torch.tensor([2, 2], dtype=torch.long)
c_cochain = Cochain(dim=2, x=c_x, y=yc, boundary_index=c_boundary_index, lower_index=c_down_index,
shared_boundaries=c_shared_boundaries)
y = torch.LongTensor([v_x.shape[0]])
return Complex(v_cochain, e_cochain, c_cochain, y=y)
| 23,206 | 39.220104 | 168 | py |
cwn | cwn-main/data/test_utils.py | import torch
from torch_geometric.data import Data
from data.utils import compute_clique_complex_with_gudhi, compute_ring_2complex
from data.utils import convert_graph_dataset_with_gudhi, convert_graph_dataset_with_rings
from data.complex import ComplexBatch
from data.dummy_complexes import convert_to_graph, get_testing_complex_list
import pytest
# TODO: Gudhi does not preserve the order of the edges in edge_index. It uses a lexicographic order
# Once we care about edge_features at initialisation, we need to make the order the same.
# Define here below the `house graph` and the expected connectivity to be constructed.
# The `house graph` (3-2-4 is a filled triangle):
# 4
# / \
# 3---2
# | |
# 0---1
#
# .
# 4 5
# . 2 .
# 3 1
# . 0 .
#
# .
# /0\
# .---.
# | |
# .---.
@pytest.fixture
def house_edge_index():
return torch.tensor([[0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4],
[1, 3, 0, 2, 1, 3, 4, 0, 2, 4, 2, 3]], dtype=torch.long)
def test_gudhi_clique_complex(house_edge_index):
'''
4
/ \
3---2
| |
0---1
.
5 4
. 3 .
1 2
. 0 .
.
/0\
.---.
| |
.---.
'''
house = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house.num_nodes = house_edge_index.max().item() + 1
house_complex = compute_clique_complex_with_gudhi(house.x, house.edge_index, house.num_nodes,
y=house.y)
# Check the number of simplices
assert house_complex.nodes.num_cells_down is None
assert house_complex.nodes.num_cells_up == 6
assert house_complex.edges.num_cells_down == 5
assert house_complex.edges.num_cells_up == 1
assert house_complex.two_cells.num_cells_down == 6
assert house_complex.two_cells.num_cells_up == 0
# Check the returned parameters
v_params = house_complex.get_cochain_params(dim=0)
assert torch.equal(v_params.x, house.x)
assert v_params.down_index is None
expected_v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
assert torch.equal(v_params.up_index, expected_v_up_index)
expected_v_up_attr = torch.tensor([[1], [1], [3], [3], [3], [3],
[5], [5], [6], [6], [7], [7]], dtype=torch.float)
assert torch.equal(v_params.kwargs['up_attr'], expected_v_up_attr)
assert v_params.kwargs['down_attr'] is None
assert v_params.kwargs['boundary_attr'] is None
e_params = house_complex.get_cochain_params(dim=1)
expected_e_x = torch.tensor([[1], [3], [3], [5], [6], [7]], dtype=torch.float)
assert torch.equal(e_params.x, expected_e_x)
expected_e_up_index = torch.tensor([[3, 4, 3, 5, 4, 5],
[4, 3, 5, 3, 5, 4]], dtype=torch.long)
assert torch.equal(e_params.up_index, expected_e_up_index)
expected_e_up_attr = torch.tensor([[9], [9], [9], [9], [9], [9]], dtype=torch.float)
assert torch.equal(e_params.kwargs['up_attr'], expected_e_up_attr)
expected_e_down_index = torch.tensor([[0, 1, 0, 2, 2, 3, 2, 4, 3, 4, 1, 3, 1, 5, 3, 5, 4, 5],
[1, 0, 2, 0, 3, 2, 4, 2, 4, 3, 3, 1, 5, 1, 5, 3, 5, 4]],
dtype=torch.long)
assert torch.equal(e_params.down_index, expected_e_down_index)
expected_e_down_attr = torch.tensor([[0], [0], [1], [1], [2], [2], [2], [2], [2], [2],
[3], [3], [3], [3], [3], [3], [4], [4]],
dtype=torch.float)
assert torch.equal(e_params.kwargs['down_attr'], expected_e_down_attr)
assert torch.equal(e_params.kwargs['boundary_attr'], house.x)
assert list(e_params.kwargs['boundary_index'].size()) == [2, 2*house_complex.edges.num_cells]
assert torch.equal(e_params.kwargs['boundary_index'][1], torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]))
assert torch.equal(e_params.kwargs['boundary_index'][0], torch.LongTensor([0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4]))
t_params = house_complex.get_cochain_params(dim=2)
expected_t_x = torch.tensor([[9]], dtype=torch.float)
assert torch.equal(t_params.x, expected_t_x)
assert t_params.down_index is None
assert t_params.up_index is None
assert torch.equal(t_params.kwargs['boundary_attr'], expected_e_x)
assert list(t_params.kwargs['boundary_index'].size()) == [2, 3*house_complex.two_cells.num_cells]
assert torch.equal(t_params.kwargs['boundary_index'][1], torch.LongTensor([0, 0, 0]))
assert torch.equal(t_params.kwargs['boundary_index'][0], torch.LongTensor([3, 4, 5]))
assert torch.equal(house_complex.y, house.y)
def test_gudhi_clique_complex_dataset_conversion(house_edge_index):
house1 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house2 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house3 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
dataset = [house1, house2, house3]
complexes, dim, num_features = convert_graph_dataset_with_gudhi(dataset, expansion_dim=3)
assert dim == 2
assert len(num_features) == 3
for i in range(len(num_features)):
assert num_features[i] == 1
assert len(complexes) == 3
for i in range(len(complexes)):
# Do some basic checks for each complex.
assert complexes[i].dimension == 2
assert complexes[i].nodes.boundary_index is None
assert list(complexes[i].edges.boundary_index.size()) == [2, 2*6]
assert list(complexes[i].two_cells.boundary_index.size()) == [2, 3*1]
assert complexes[i].edges.lower_index.size(1) == 18
assert torch.equal(complexes[i].nodes.x, house1.x)
assert torch.equal(complexes[i].y, house1.y)
def test_gudhi_clique_complex_dataset_conversion_with_down_adj_excluded(house_edge_index):
house1 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house2 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house3 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
dataset = [house1, house2, house3]
complexes, dim, num_features = convert_graph_dataset_with_gudhi(dataset, expansion_dim=3,
include_down_adj=False)
assert dim == 2
assert len(num_features) == 3
for i in range(len(num_features)):
assert num_features[i] == 1
assert len(complexes) == 3
for i in range(len(complexes)):
# Do some basic checks for each complex.
assert complexes[i].dimension == 2
assert complexes[i].nodes.boundary_index is None
assert list(complexes[i].edges.boundary_index.size()) == [2, 2*6]
assert list(complexes[i].two_cells.boundary_index.size()) == [2, 3*1]
assert complexes[i].edges.lower_index is None
assert torch.equal(complexes[i].nodes.x, house1.x)
assert torch.equal(complexes[i].y, house1.y)
def test_gudhi_integration_with_batching_without_adj(house_edge_index):
house1 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1),
y=torch.tensor([1]))
house2 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1),
y=torch.tensor([1]))
house3 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1),
y=torch.tensor([1]))
dataset = [house1, house2, house3]
# Without down-adj
complexes, dim, num_features = convert_graph_dataset_with_gudhi(dataset, expansion_dim=3,
include_down_adj=False)
batch = ComplexBatch.from_complex_list(complexes)
assert batch.dimension == 2
assert batch.edges.lower_index is None
assert batch.nodes.boundary_index is None
assert list(batch.edges.boundary_index.size()) == [2, 3*2*6]
assert list(batch.two_cells.boundary_index.size()) == [2, 1*3*3]
def test_gudhi_integration_with_batching_with_adj(house_edge_index):
house1 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1),
y=torch.tensor([1]))
house2 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1),
y=torch.tensor([1]))
house3 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1),
y=torch.tensor([1]))
dataset = [house1, house2, house3]
# Without down-adj
complexes, dim, num_features = convert_graph_dataset_with_gudhi(dataset, expansion_dim=3,
include_down_adj=True)
batch = ComplexBatch.from_complex_list(complexes)
assert batch.dimension == 2
assert batch.edges.lower_index.size(1) == 18*3
assert list(batch.edges.boundary_index.size()) == [2, 3*2*6]
assert list(batch.two_cells.boundary_index.size()) == [2, 1*3*3]
def test_construction_of_ring_2complex(house_edge_index):
house = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house.num_nodes = house_edge_index.max().item() + 1
house_complex = compute_ring_2complex(house.x, house.edge_index, None, house.num_nodes,
max_k=4, y=house.y, init_rings=True)
# Check the number of cells
assert house_complex.nodes.num_cells_down is None
assert house_complex.nodes.num_cells_up == 6
assert house_complex.nodes.boundary_index is None
assert house_complex.edges.num_cells_down == 5
assert house_complex.edges.num_cells_up == 2
assert list(house_complex.edges.boundary_index.size()) == [2, 2*6]
assert house_complex.cochains[2].num_cells == 2
assert house_complex.cochains[2].num_cells_down == 6
assert house_complex.cochains[2].num_cells_up == 0
assert list(house_complex.cochains[2].boundary_index.size()) == [2, 3+4]
# Check the returned parameters
v_params = house_complex.get_cochain_params(dim=0)
assert torch.equal(v_params.x, house.x)
assert v_params.down_index is None
expected_v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
assert torch.equal(v_params.up_index, expected_v_up_index)
expected_v_up_attr = torch.tensor([[1], [1], [3], [3], [3], [3],
[5], [5], [6], [6], [7], [7]], dtype=torch.float)
assert torch.equal(v_params.kwargs['up_attr'], expected_v_up_attr)
assert v_params.kwargs['down_attr'] is None
assert v_params.kwargs['boundary_attr'] is None
e_params = house_complex.get_cochain_params(dim=1)
expected_e_x = torch.tensor([[1], [3], [3], [5], [6], [7]], dtype=torch.float)
assert torch.equal(e_params.x, expected_e_x)
expected_e_up_index = torch.tensor([[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3, 3, 4, 3, 5, 4, 5],
[1, 0, 2, 0, 3, 0, 2, 1, 3, 1, 3, 2, 4, 3, 5, 3, 5, 4]], dtype=torch.long)
assert torch.equal(e_params.up_index, expected_e_up_index)
expected_e_up_attr = torch.tensor([[6], [6], [6], [6], [6], [6], [6], [6], [6], [6], [6], [6], [9], [9], [9], [9], [9], [9]], dtype=torch.float)
assert torch.equal(e_params.kwargs['up_attr'], expected_e_up_attr)
expected_e_down_index = torch.tensor([[0, 1, 0, 2, 2, 3, 2, 4, 3, 4, 1, 3, 1, 5, 3, 5, 4, 5],
[1, 0, 2, 0, 3, 2, 4, 2, 4, 3, 3, 1, 5, 1, 5, 3, 5, 4]],
dtype=torch.long)
assert torch.equal(e_params.down_index, expected_e_down_index)
expected_e_down_attr = torch.tensor([[0], [0], [1], [1], [2], [2], [2], [2], [2], [2],
[3], [3], [3], [3], [3], [3], [4], [4]],
dtype=torch.float)
assert torch.equal(e_params.kwargs['down_attr'], expected_e_down_attr)
assert torch.equal(e_params.kwargs['boundary_attr'], house.x)
assert list(e_params.kwargs['boundary_index'].size()) == [2, 2*house_complex.edges.num_cells]
assert torch.equal(e_params.kwargs['boundary_index'][1], torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]))
assert torch.equal(e_params.kwargs['boundary_index'][0], torch.LongTensor([0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4]))
t_params = house_complex.get_cochain_params(dim=2)
expected_t_x = torch.tensor([[6], [9]], dtype=torch.float)
assert torch.equal(t_params.x, expected_t_x)
expected_t_down_index = torch.tensor([[0, 1],
[1, 0]],
dtype=torch.long)
assert torch.equal(t_params.down_index, expected_t_down_index)
expected_t_down_attr = torch.tensor([[5], [5]], dtype=torch.float)
assert torch.equal(t_params.kwargs['down_attr'], expected_t_down_attr)
assert t_params.up_index is None
assert torch.equal(t_params.kwargs['boundary_attr'], expected_e_x)
expected_t_boundary_index = torch.tensor([[0, 1, 2, 3, 3, 4, 5],
[0, 0, 0, 0, 1, 1, 1]], dtype=torch.long)
assert torch.equal(t_params.kwargs['boundary_index'], expected_t_boundary_index)
assert torch.equal(house_complex.y, house.y)
def test_construction_of_ring_2complex_with_edge_feats(house_edge_index):
'''
4
/ \
3---2
| |
0---1
.
5 4
. 3 .
1 2
. 0 .
.
/0\
.---.
| 1 |
.---.
'''
edge_attr = torch.FloatTensor(
[[0.0, 1.0],
[0.0, 3.0],
[0.0, 1.0],
[1.0, 2.0],
[1.0, 2.0],
[2.0, 3.0],
[2.0, 4.0],
[0.0, 3.0],
[2.0, 3.0],
[3.0, 4.0],
[2.0, 4.0],
[3.0, 4.0]])
house = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]),
edge_attr=edge_attr)
house.num_nodes = house_edge_index.max().item() + 1
house_complex = compute_ring_2complex(house.x, house.edge_index, house.edge_attr, house.num_nodes,
max_k=4, y=house.y, init_rings=False)
# Check the number of cells
assert house_complex.nodes.num_cells_down is None
assert house_complex.nodes.num_cells_up == 6
assert house_complex.nodes.boundary_index is None
assert house_complex.edges.num_cells_down == 5
assert house_complex.edges.num_cells_up == 2
assert list(house_complex.edges.boundary_index.size()) == [2, 2*6]
assert house_complex.cochains[2].num_cells == 2
assert house_complex.cochains[2].num_cells_down == 6
assert house_complex.cochains[2].num_cells_up == 0
assert list(house_complex.cochains[2].boundary_index.size()) == [2, 3+4]
# Check the returned parameters
v_params = house_complex.get_cochain_params(dim=0)
assert torch.equal(v_params.x, house.x)
assert v_params.down_index is None
expected_v_up_index = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
assert torch.equal(v_params.up_index, expected_v_up_index)
expected_v_up_attr = torch.tensor([[0.0, 1.0], [0.0, 1.0], [0.0, 3.0], [0.0, 3.0],
[1.0, 2.0], [1.0, 2.0], [2.0, 3.0], [2.0, 3.0],
[2.0, 4.0], [2.0, 4.0], [3.0, 4.0], [3.0, 4.0]], dtype=torch.float)
assert torch.equal(v_params.kwargs['up_attr'], expected_v_up_attr)
assert v_params.kwargs['down_attr'] is None
assert v_params.kwargs['boundary_attr'] is None
e_params = house_complex.get_cochain_params(dim=1)
expected_e_x = torch.FloatTensor(
[[0.0, 1.0],
[0.0, 3.0],
[1.0, 2.0],
[2.0, 3.0],
[2.0, 4.0],
[3.0, 4.0]])
assert torch.equal(e_params.x, expected_e_x)
expected_e_up_index = torch.tensor([[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3, 3, 4, 3, 5, 4, 5],
[1, 0, 2, 0, 3, 0, 2, 1, 3, 1, 3, 2, 4, 3, 5, 3, 5, 4]], dtype=torch.long)
assert torch.equal(e_params.up_index, expected_e_up_index)
assert e_params.kwargs['up_attr'] is None
expected_e_down_index = torch.tensor([[0, 1, 0, 2, 2, 3, 2, 4, 3, 4, 1, 3, 1, 5, 3, 5, 4, 5],
[1, 0, 2, 0, 3, 2, 4, 2, 4, 3, 3, 1, 5, 1, 5, 3, 5, 4]],
dtype=torch.long)
assert torch.equal(e_params.down_index, expected_e_down_index)
expected_e_down_attr = torch.tensor([[0], [0], [1], [1], [2], [2], [2], [2], [2], [2],
[3], [3], [3], [3], [3], [3], [4], [4]],
dtype=torch.float)
assert torch.equal(e_params.kwargs['down_attr'], expected_e_down_attr)
assert torch.equal(e_params.kwargs['boundary_attr'], house.x)
assert list(e_params.kwargs['boundary_index'].size()) == [2, 2*house_complex.edges.num_cells]
assert torch.equal(e_params.kwargs['boundary_index'][1], torch.LongTensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]))
assert torch.equal(e_params.kwargs['boundary_index'][0], torch.LongTensor([0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4]))
t_params = house_complex.get_cochain_params(dim=2)
assert t_params.x is None
expected_t_down_index = torch.tensor([[0, 1],
[1, 0]],
dtype=torch.long)
assert torch.equal(t_params.down_index, expected_t_down_index)
expected_t_down_attr = torch.tensor([[2.0, 3.0], [2.0, 3.0]], dtype=torch.float)
assert torch.equal(t_params.kwargs['down_attr'], expected_t_down_attr)
assert t_params.up_index is None
assert torch.equal(t_params.kwargs['boundary_attr'], expected_e_x)
expected_t_boundary_index = torch.tensor([[0, 1, 2, 3, 3, 4, 5],
[0, 0, 0, 0, 1, 1, 1]], dtype=torch.long)
assert torch.equal(t_params.kwargs['boundary_index'], expected_t_boundary_index)
assert torch.equal(house_complex.y, house.y)
def test_construction_of_ring_2complex_with_larger_k_size(house_edge_index):
# Here we check that the max ring size does not have any effect when it is larger
# then the largest ring present in the original graph
house = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house.num_nodes = house_edge_index.max().item() + 1
house_cell_a = compute_ring_2complex(house.x, house.edge_index, None, house.num_nodes,
max_k=4, y=house.y, init_rings=True)
house_cell_b = compute_ring_2complex(house.x, house.edge_index, None, house.num_nodes,
max_k=10, y=house.y, init_rings=True)
# Check the number of cells
assert house_cell_a.nodes.num_cells_down is None
assert house_cell_b.nodes.num_cells_down is None
assert house_cell_a.nodes.num_cells_up == house_cell_b.nodes.num_cells_up
assert house_cell_a.nodes.boundary_index is None
assert house_cell_b.nodes.boundary_index is None
assert house_cell_a.edges.num_cells_down == house_cell_b.edges.num_cells_down
assert house_cell_a.edges.num_cells_up == house_cell_b.edges.num_cells_up
assert list(house_cell_a.edges.boundary_index.size()) == list(house_cell_b.edges.boundary_index.size())
assert house_cell_a.two_cells.num_cells == 2 # We have 2 rings in the house complex
assert house_cell_a.two_cells.num_cells == house_cell_b.two_cells.num_cells
assert house_cell_a.two_cells.num_cells_down == house_cell_b.two_cells.num_cells_down
assert house_cell_a.two_cells.num_cells_up == house_cell_b.two_cells.num_cells_up
assert list(house_cell_a.two_cells.boundary_index.size()) == list(house_cell_b.two_cells.boundary_index.size())
# Check the returned node parameters
v_params_a = house_cell_a.get_cochain_params(dim=0)
v_params_b = house_cell_b.get_cochain_params(dim=0)
assert torch.equal(v_params_a.x, v_params_b.x)
assert v_params_a.down_index is None
assert v_params_b.down_index is None
assert torch.equal(v_params_a.up_index, v_params_b.up_index)
assert torch.equal(v_params_a.kwargs['up_attr'], v_params_b.kwargs['up_attr'])
assert v_params_a.kwargs['down_attr'] is None
assert v_params_b.kwargs['down_attr'] is None
assert v_params_a.kwargs['boundary_attr'] is None
assert v_params_b.kwargs['boundary_attr'] is None
# Check the returned edge parameters
e_params_a = house_cell_a.get_cochain_params(dim=1)
e_params_b = house_cell_b.get_cochain_params(dim=1)
assert torch.equal(e_params_a.x, e_params_b.x)
assert torch.equal(e_params_a.up_index, e_params_b.up_index)
assert torch.equal(e_params_a.kwargs['up_attr'], e_params_b.kwargs['up_attr'])
assert torch.equal(e_params_a.down_index, e_params_b.down_index)
assert torch.equal(e_params_a.kwargs['down_attr'], e_params_b.kwargs['down_attr'])
assert torch.equal(e_params_a.kwargs['boundary_attr'], e_params_b.kwargs['boundary_attr'])
assert list(e_params_a.kwargs['boundary_index'].size()) == list(e_params_b.kwargs['boundary_index'].size())
assert torch.equal(e_params_a.kwargs['boundary_index'][1], e_params_b.kwargs['boundary_index'][1])
assert torch.equal(e_params_a.kwargs['boundary_index'][0], e_params_b.kwargs['boundary_index'][0])
# Check the returned ring parameters
t_params_a = house_cell_a.get_cochain_params(dim=2)
t_params_b = house_cell_b.get_cochain_params(dim=2)
assert t_params_a.x.size(0) == 2
assert torch.equal(t_params_a.x, t_params_b.x)
assert torch.equal(t_params_a.down_index, t_params_b.down_index)
assert torch.equal(t_params_a.kwargs['down_attr'], t_params_b.kwargs['down_attr'])
assert t_params_a.up_index is None
assert t_params_b.up_index is None
assert t_params_a.kwargs['up_attr'] is None
assert t_params_b.kwargs['up_attr'] is None
assert torch.equal(t_params_a.kwargs['boundary_attr'], t_params_b.kwargs['boundary_attr'])
assert torch.equal(t_params_a.kwargs['boundary_index'], t_params_b.kwargs['boundary_index'])
# Check label
assert torch.equal(house_cell_a.y, house_cell_b.y)
def test_construction_of_ring_2complex_with_smaller_k_size(house_edge_index):
# Here we check that when we consider rings up to length 3, then the output cell complex
# exactly corresponds to a 2-simplicial complex extracted with the alternative routine
house = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house.num_nodes = house_edge_index.max().item() + 1
house_cell = compute_ring_2complex(house.x, house.edge_index, None, house.num_nodes,
max_k=3, y=house.y, init_rings=True)
house_simp = compute_clique_complex_with_gudhi(house.x, house.edge_index, house.num_nodes,
y=house.y)
# Check the number of cells
assert house_cell.nodes.num_cells_down is None
assert house_simp.nodes.num_cells_down is None
assert house_cell.nodes.num_cells_up == house_simp.nodes.num_cells_up
assert house_cell.nodes.boundary_index is None
assert house_simp.nodes.boundary_index is None
assert house_cell.edges.num_cells_down == house_simp.edges.num_cells_down
assert house_cell.edges.num_cells_up == house_simp.edges.num_cells_up
assert list(house_cell.edges.boundary_index.size()) == list(house_simp.edges.boundary_index.size())
assert house_cell.two_cells.num_cells == 1
assert house_cell.two_cells.num_cells == house_simp.two_cells.num_cells
assert house_cell.two_cells.num_cells_down == house_simp.two_cells.num_cells_down
assert house_cell.two_cells.num_cells_up == house_simp.two_cells.num_cells_up
assert list(house_cell.two_cells.boundary_index.size()) == list(house_simp.two_cells.boundary_index.size())
# Check the returned node parameters
v_params_a = house_cell.get_cochain_params(dim=0)
v_params_b = house_simp.get_cochain_params(dim=0)
assert torch.equal(v_params_a.x, v_params_b.x)
assert v_params_a.down_index is None
assert v_params_b.down_index is None
assert torch.equal(v_params_a.up_index, v_params_b.up_index)
assert torch.equal(v_params_a.kwargs['up_attr'], v_params_b.kwargs['up_attr'])
assert v_params_a.kwargs['down_attr'] is None
assert v_params_b.kwargs['down_attr'] is None
assert v_params_a.kwargs['boundary_attr'] is None
assert v_params_b.kwargs['boundary_attr'] is None
# Check the returned edge parameters
e_params_a = house_cell.get_cochain_params(dim=1)
e_params_b = house_simp.get_cochain_params(dim=1)
assert torch.equal(e_params_a.x, e_params_b.x)
assert torch.equal(e_params_a.up_index, e_params_b.up_index)
assert torch.equal(e_params_a.kwargs['up_attr'], e_params_b.kwargs['up_attr'])
assert torch.equal(e_params_a.down_index, e_params_b.down_index)
assert torch.equal(e_params_a.kwargs['down_attr'], e_params_b.kwargs['down_attr'])
assert torch.equal(e_params_a.kwargs['boundary_attr'], e_params_b.kwargs['boundary_attr'])
assert list(e_params_a.kwargs['boundary_index'].size()) == list(e_params_b.kwargs['boundary_index'].size())
assert torch.equal(e_params_a.kwargs['boundary_index'][1], e_params_b.kwargs['boundary_index'][1])
assert torch.equal(e_params_a.kwargs['boundary_index'][0], e_params_b.kwargs['boundary_index'][0])
# Check the returned ring parameters
t_params_a = house_cell.get_cochain_params(dim=2)
t_params_b = house_simp.get_cochain_params(dim=2)
assert t_params_a.x.size(0) == 1
assert torch.equal(t_params_a.x, t_params_b.x)
assert t_params_a.down_index is None
assert t_params_b.down_index is None
assert t_params_a.kwargs['down_attr'] is None
assert t_params_b.kwargs['down_attr'] is None
assert t_params_a.up_index is None
assert t_params_b.up_index is None
assert t_params_a.kwargs['up_attr'] is None
assert t_params_b.kwargs['up_attr'] is None
assert torch.equal(t_params_a.kwargs['boundary_attr'], t_params_b.kwargs['boundary_attr'])
assert torch.equal(t_params_a.kwargs['boundary_index'], t_params_b.kwargs['boundary_index'])
# Check label
assert torch.equal(house_cell.y, house_simp.y)
def test_ring_2complex_dataset_conversion(house_edge_index):
house1 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house2 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
house3 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), y=torch.tensor([1]))
dataset = [house1, house2, house3]
complexes, dim, num_features = convert_graph_dataset_with_rings(dataset, include_down_adj=True,
init_rings=True)
assert dim == 2
assert len(num_features) == 3
for i in range(len(num_features)):
assert num_features[i] == 1
assert len(complexes) == 3
for i in range(len(complexes)):
# Do some basic checks for each complex.
# Checks the number of rings in `boundary_index`
assert complexes[i].cochains[2].boundary_index[:, 1].max().item() == 1
assert complexes[i].dimension == 2
assert complexes[i].nodes.boundary_index is None
assert list(complexes[i].edges.boundary_index.size()) == [2, 2*6]
assert list(complexes[i].two_cells.boundary_index.size()) == [2, 3+4]
assert complexes[i].edges.lower_index.size(1) == 18
assert torch.equal(complexes[i].nodes.x, house1.x)
assert torch.equal(complexes[i].y, house1.y)
def test_ring_2complex_dataset_conversion_with_edge_feats(house_edge_index):
edge_attr = torch.FloatTensor(
[[0.0, 1.0],
[0.0, 3.0],
[0.0, 1.0],
[1.0, 2.0],
[1.0, 2.0],
[2.0, 3.0],
[2.0, 4.0],
[0.0, 3.0],
[2.0, 3.0],
[3.0, 4.0],
[2.0, 4.0],
[3.0, 4.0]])
e_x = torch.FloatTensor(
[[0.0, 1.0],
[0.0, 3.0],
[1.0, 2.0],
[2.0, 3.0],
[2.0, 4.0],
[3.0, 4.0]])
house1 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), edge_attr=edge_attr, y=torch.tensor([1]))
house2 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), edge_attr=edge_attr, y=torch.tensor([1]))
house3 = Data(edge_index=house_edge_index, x=torch.arange(0, 5, dtype=torch.float).view(5, 1), edge_attr=edge_attr, y=torch.tensor([1]))
dataset = [house1, house2, house3]
complexes, dim, num_features = convert_graph_dataset_with_rings(dataset,
init_edges=True, include_down_adj=True, init_rings=False)
assert dim == 2
assert len(num_features) == 3
assert num_features[0] == 1
assert num_features[1] == 2
assert num_features[2] == 0
assert len(complexes) == 3
for i in range(len(complexes)):
# Do some basic checks for each complex.
assert complexes[i].dimension == 2
assert complexes[i].nodes.boundary_index is None
assert list(complexes[i].edges.boundary_index.size()) == [2, 2*6]
assert list(complexes[i].two_cells.boundary_index.size()) == [2, 3+4]
assert complexes[i].edges.lower_index.size(1) == 18
assert torch.equal(complexes[i].nodes.x, house1.x)
assert torch.equal(complexes[i].edges.x, e_x)
assert complexes[i].two_cells.x is None
assert torch.equal(complexes[i].y, house1.y)
def test_simp_complex_conversion_completes():
graphs = list(map(convert_to_graph, get_testing_complex_list()))
_ = convert_graph_dataset_with_gudhi(graphs, expansion_dim=3)
def test_cell_complex_conversion_completes():
graphs = list(map(convert_to_graph, get_testing_complex_list()))
_ = convert_graph_dataset_with_rings(graphs, init_rings=True)
| 31,625 | 48.883281 | 148 | py |
cwn | cwn-main/data/datasets/test_flow.py | import numpy as np
import torch
from scipy.spatial import Delaunay
from data.datasets.flow_utils import load_flow_dataset, create_hole, is_inside_rectangle
def test_create_hole():
# This seed contains some edge cases.
np.random.seed(4)
points = np.random.uniform(size=(400, 2))
tri = Delaunay(points)
hole1 = np.array([[0.2, 0.2], [0.4, 0.4]])
points, triangles = create_hole(points, tri.simplices, hole1)
assert triangles.max() == len(points) - 1
assert triangles.min() == 0
# Check all points are outside the hole
for i in range(len(points)):
assert not is_inside_rectangle(points[i], hole1)
# Double check each point appears in some triangle.
for i in range(len(points)):
assert np.sum(triangles == i) > 0
def test_flow_util_dataset_loading():
# Fix seed for reproducibility
np.random.seed(0)
train, test, _ = load_flow_dataset(num_points=300, num_train=20, num_test=10)
assert len(train) == 20
assert len(test) == 10
label_count = {0: 0, 1: 0}
for cochain in train + test:
# checks x values (flow direction) are either +1 or -1
assert (torch.sum(cochain.x == 1) + torch.sum(cochain.x == -1)
== torch.count_nonzero(cochain.x))
# checks the upper/lower orientation features are consistent
# in shape with the upper/lower indices
assert len(cochain.upper_orient) == cochain.upper_index.size(1)
assert len(cochain.lower_orient) == cochain.lower_index.size(1)
# checks the upper and lower indices are consistent with the number of edges
assert cochain.upper_index.max() < cochain.x.size(0), print(cochain.upper_index.max(),
cochain.x.size(0))
assert cochain.lower_index.max() < cochain.x.size(0), print(cochain.lower_index.max(),
cochain.x.size(0))
# checks the values for orientations are either +1 (coherent) or -1 (not coherent)
assert (torch.sum(cochain.upper_orient == 1)
+ torch.sum(cochain.upper_orient == -1) == cochain.upper_orient.numel())
assert (torch.sum(cochain.lower_orient == 1)
+ torch.sum(cochain.lower_orient == -1) == cochain.lower_orient.numel())
label_count[cochain.y.item()] += 1
# checks distribution of labels
assert label_count[0] == 20 // 2 + 10 // 2
assert label_count[1] == 20 // 2 + 10 // 2
| 2,416 | 36.184615 | 94 | py |
cwn | cwn-main/data/datasets/peptides_structural.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 2 21:37:42 2023
@author: renz
"""
import hashlib
import os.path as osp
import os
import pickle
import shutil
import pandas as pd
import torch
from ogb.utils import smiles2graph
from ogb.utils.torch_util import replace_numpy_with_torchtensor
from ogb.utils.url import decide_download
from torch_geometric.data import Data, download_url
from torch_geometric.data import InMemoryDataset
from data.utils import convert_graph_dataset_with_rings
from data.datasets import InMemoryComplexDataset
from tqdm import tqdm
class PeptidesStructuralDataset(InMemoryComplexDataset):
"""
PyG dataset of 15,535 small peptides represented as their molecular
graph (SMILES) with 11 regression targets derived from the peptide's
3D structure.
The original amino acid sequence representation is provided in
'peptide_seq' and the distance between atoms in 'self_dist_matrix' field
of the dataset file, but not used here as any part of the input.
The 11 regression targets were precomputed from molecule XYZ:
Inertia_mass_[a-c]: The principal component of the inertia of the
mass, with some normalizations. Sorted
Inertia_valence_[a-c]: The principal component of the inertia of the
Hydrogen atoms. This is basically a measure of the 3D
distribution of hydrogens. Sorted
length_[a-c]: The length around the 3 main geometric axis of
the 3D objects (without considering atom types). Sorted
Spherocity: SpherocityIndex descriptor computed by
rdkit.Chem.rdMolDescriptors.CalcSpherocityIndex
Plane_best_fit: Plane of best fit (PBF) descriptor computed by
rdkit.Chem.rdMolDescriptors.CalcPBF
Args:
root (string): Root directory where the dataset should be saved.
smiles2graph (callable): A callable function that converts a SMILES
string into a graph object. We use the OGB featurization.
* The default smiles2graph requires rdkit to be installed *
"""
def __init__(self, root, max_ring_size, smiles2graph=smiles2graph,
transform=None, pre_transform=None, pre_filter=None,
include_down_adj=False, init_method='sum', n_jobs=2):
self.original_root = root
self.smiles2graph = smiles2graph
self.folder = osp.join(root, 'peptides-structural')
self.url = 'https://www.dropbox.com/s/464u3303eu2u4zp/peptide_structure_dataset.csv.gz?dl=1'
self.version = '9786061a34298a0684150f2e4ff13f47' # MD5 hash of the intended dataset file
self.url_stratified_split = 'https://www.dropbox.com/s/9dfifzft1hqgow6/splits_random_stratified_peptide_structure.pickle?dl=1'
self.md5sum_stratified_split = '5a0114bdadc80b94fc7ae974f13ef061'
# Check version and update if necessary.
release_tag = osp.join(self.folder, self.version)
if osp.isdir(self.folder) and (not osp.exists(release_tag)):
print(f"{self.__class__.__name__} has been updated.")
if input("Will you update the dataset now? (y/N)\n").lower() == 'y':
shutil.rmtree(self.folder)
self.name = 'peptides_structural'
self._max_ring_size = max_ring_size
self._use_edge_features = True
self._n_jobs = n_jobs
super(PeptidesStructuralDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim=2, init_method=init_method, include_down_adj=include_down_adj,
cellular=True, num_classes=1)
self.data, self.slices, idx, self.num_tasks = self.load_dataset()
self.train_ids = idx['train']
self.val_ids = idx['val']
self.test_ids = idx['test']
self.num_node_type = 9
self.num_edge_type = 3
@property
def raw_file_names(self):
return 'peptide_structure_dataset.csv.gz'
@property
def processed_file_names(self):
return [f'{self.name}_complex.pt', f'{self.name}_idx.pt', f'{self.name}_tasks.pt']
@property
def processed_dir(self):
"""Overwrite to change name based on edge and simple feats"""
directory = super(PeptidesStructuralDataset, self).processed_dir
suffix1 = f"_{self._max_ring_size}rings" if self._cellular else ""
suffix2 = "-E" if self._use_edge_features else ""
return directory + suffix1 + suffix2
def _md5sum(self, path):
hash_md5 = hashlib.md5()
with open(path, 'rb') as f:
buffer = f.read()
hash_md5.update(buffer)
return hash_md5.hexdigest()
def download(self):
if decide_download(self.url):
path = download_url(self.url, self.raw_dir)
# Save to disk the MD5 hash of the downloaded file.
hash = self._md5sum(path)
if hash != self.version:
raise ValueError("Unexpected MD5 hash of the downloaded file")
open(osp.join(self.root, hash), 'w').close()
# Download train/val/test splits.
path_split1 = download_url(self.url_stratified_split, self.root)
assert self._md5sum(path_split1) == self.md5sum_stratified_split
old_split_file = osp.join(self.root,
"splits_random_stratified_peptide_structure.pickle?dl=1")
new_split_file = osp.join(self.root,
"splits_random_stratified_peptide_structure.pickle")
old_df_name = osp.join(self.raw_dir,
'peptide_structure_dataset.csv.gz?dl=1')
new_df_name = osp.join(self.raw_dir,
'peptide_structure_dataset.csv.gz')
os.rename(old_split_file, new_split_file)
os.rename(old_df_name, new_df_name)
else:
print('Stop download.')
exit(-1)
def load_dataset(self):
"""Load the dataset from here and process it if it doesn't exist"""
print("Loading dataset from disk...")
data, slices = torch.load(self.processed_paths[0])
idx = torch.load(self.processed_paths[1])
tasks = torch.load(self.processed_paths[2])
return data, slices, idx, tasks
def process(self):
data_df = pd.read_csv(osp.join(self.raw_dir,
'peptide_structure_dataset.csv.gz'))
smiles_list = data_df['smiles']
target_names = ['Inertia_mass_a', 'Inertia_mass_b', 'Inertia_mass_c',
'Inertia_valence_a', 'Inertia_valence_b',
'Inertia_valence_c', 'length_a', 'length_b', 'length_c',
'Spherocity', 'Plane_best_fit']
# Normalize to zero mean and unit standard deviation.
data_df.loc[:, target_names] = data_df.loc[:, target_names].apply(
lambda x: (x - x.mean()) / x.std(), axis=0)
print('Converting SMILES strings into graphs...')
data_list = []
for i in tqdm(range(len(smiles_list))):
data = Data()
smiles = smiles_list[i]
y = data_df.iloc[i][target_names]
graph = self.smiles2graph(smiles)
assert (len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert (len(graph['node_feat']) == graph['num_nodes'])
data.__num_nodes__ = int(graph['num_nodes'])
data.edge_index = torch.from_numpy(graph['edge_index']).to(
torch.int64)
data.edge_attr = torch.from_numpy(graph['edge_feat']).to(
torch.int64)
data.x = torch.from_numpy(graph['node_feat']).to(torch.int64)
data.y = torch.Tensor([y])
data_list.append(data)
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
split_idx = self.get_idx_split()
# NB: the init method would basically have no effect if
# we use edge features and do not initialize rings.
print(f"Converting the {self.name} dataset to a cell complex...")
complexes, _, _ = convert_graph_dataset_with_rings(
data_list,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_method=self._init_method,
init_edges=self._use_edge_features,
init_rings=False,
n_jobs=self._n_jobs)
print(f'Saving processed dataset in {self.processed_paths[0]}...')
torch.save(self.collate(complexes, self.max_dim), self.processed_paths[0])
print(f'Saving idx in {self.processed_paths[1]}...')
torch.save(split_idx, self.processed_paths[1])
print(f'Saving num_tasks in {self.processed_paths[2]}...')
torch.save(11, self.processed_paths[2])
def get_idx_split(self):
""" Get dataset splits.
Returns:
Dict with 'train', 'val', 'test', splits indices.
"""
split_file = osp.join(self.root,
"splits_random_stratified_peptide_structure.pickle")
with open(split_file, 'rb') as f:
splits = pickle.load(f)
split_dict = replace_numpy_with_torchtensor(splits)
split_dict['valid'] = split_dict['val']
return split_dict
def load_pep_s_graph_dataset(root):
raw_dir = osp.join(root, 'raw')
data_df = pd.read_csv(osp.join(raw_dir,
'peptide_structure_dataset.csv.gz'))
smiles_list = data_df['smiles']
target_names = ['Inertia_mass_a', 'Inertia_mass_b', 'Inertia_mass_c',
'Inertia_valence_a', 'Inertia_valence_b',
'Inertia_valence_c', 'length_a', 'length_b', 'length_c',
'Spherocity', 'Plane_best_fit']
# Normalize to zero mean and unit standard deviation.
data_df.loc[:, target_names] = data_df.loc[:, target_names].apply(
lambda x: (x - x.mean()) / x.std(), axis=0)
print('Converting SMILES strings into graphs...')
data_list = []
for i in tqdm(range(len(smiles_list))):
data = Data()
smiles = smiles_list[i]
y = data_df.iloc[i][target_names]
graph = smiles2graph(smiles)
assert (len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert (len(graph['node_feat']) == graph['num_nodes'])
data.__num_nodes__ = int(graph['num_nodes'])
data.edge_index = torch.from_numpy(graph['edge_index']).to(
torch.int64)
data.edge_attr = torch.from_numpy(graph['edge_feat']).to(
torch.int64)
data.x = torch.from_numpy(graph['node_feat']).to(torch.int64)
data.y = torch.Tensor([y])
data_list.append(data)
dataset = InMemoryDataset.collate(data_list)
#get split file
split_file = osp.join(root,
"splits_random_stratified_peptide_structure.pickle")
with open(split_file, 'rb') as f:
splits = pickle.load(f)
split_dict = replace_numpy_with_torchtensor(splits)
split_dict['valid'] = split_dict['val']
return dataset, split_dict['train'], split_dict['valid'], split_dict['test'] | 11,359 | 41.546816 | 134 | py |
cwn | cwn-main/data/datasets/peptides_functional.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 2 21:37:42 2023
@author: renz
"""
import hashlib
import os.path as osp
import os
import pickle
import shutil
import pandas as pd
import torch
from ogb.utils import smiles2graph
from ogb.utils.torch_util import replace_numpy_with_torchtensor
from ogb.utils.url import decide_download
from torch_geometric.data import Data, download_url
from torch_geometric.data import InMemoryDataset
from data.utils import convert_graph_dataset_with_rings
from data.datasets import InMemoryComplexDataset
from tqdm import tqdm
class PeptidesFunctionalDataset(InMemoryComplexDataset):
"""
PyG dataset of 15,535 peptides represented as their molecular graph
(SMILES) with 10-way multi-task binary classification of their
functional classes.
The goal is use the molecular representation of peptides instead
of amino acid sequence representation ('peptide_seq' field in the file,
provided for possible baseline benchmarking but not used here) to test
GNNs' representation capability.
The 10 classes represent the following functional classes (in order):
['antifungal', 'cell_cell_communication', 'anticancer',
'drug_delivery_vehicle', 'antimicrobial', 'antiviral',
'antihypertensive', 'antibacterial', 'antiparasitic', 'toxic']
Args:
root (string): Root directory where the dataset should be saved.
smiles2graph (callable): A callable function that converts a SMILES
string into a graph object. We use the OGB featurization.
* The default smiles2graph requires rdkit to be installed *
"""
def __init__(self, root, max_ring_size, smiles2graph=smiles2graph,
transform=None, pre_transform=None, pre_filter=None,
include_down_adj=False, init_method='sum', n_jobs=2):
self.original_root = root
self.smiles2graph = smiles2graph
self.folder = osp.join(root, 'peptides-functional')
self.url = 'https://www.dropbox.com/s/ol2v01usvaxbsr8/peptide_multi_class_dataset.csv.gz?dl=1'
self.version = '701eb743e899f4d793f0e13c8fa5a1b4' # MD5 hash of the intended dataset file
self.url_stratified_split = 'https://www.dropbox.com/s/j4zcnx2eipuo0xz/splits_random_stratified_peptide.pickle?dl=1'
self.md5sum_stratified_split = '5a0114bdadc80b94fc7ae974f13ef061'
# Check version and update if necessary.
release_tag = osp.join(self.folder, self.version)
if osp.isdir(self.folder) and (not osp.exists(release_tag)):
print(f"{self.__class__.__name__} has been updated.")
if input("Will you update the dataset now? (y/N)\n").lower() == 'y':
shutil.rmtree(self.folder)
self.name = 'peptides_functional'
self._max_ring_size = max_ring_size
self._use_edge_features = True
self._n_jobs = n_jobs
super(PeptidesFunctionalDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim=2, init_method=init_method, include_down_adj=include_down_adj,
cellular=True, num_classes=1)
self.data, self.slices, idx, self.num_tasks = self.load_dataset()
self.train_ids = idx['train']
self.val_ids = idx['val']
self.test_ids = idx['test']
self.num_node_type = 9
self.num_edge_type = 3
@property
def raw_file_names(self):
return 'peptide_multi_class_dataset.csv.gz'
@property
def processed_file_names(self):
return [f'{self.name}_complex.pt', f'{self.name}_idx.pt', f'{self.name}_tasks.pt']
@property
def processed_dir(self):
"""Overwrite to change name based on edge and simple feats"""
directory = super(PeptidesFunctionalDataset, self).processed_dir
suffix1 = f"_{self._max_ring_size}rings" if self._cellular else ""
suffix2 = "-E" if self._use_edge_features else ""
return directory + suffix1 + suffix2
def _md5sum(self, path):
hash_md5 = hashlib.md5()
with open(path, 'rb') as f:
buffer = f.read()
hash_md5.update(buffer)
return hash_md5.hexdigest()
def download(self):
if decide_download(self.url):
path = download_url(self.url, self.raw_dir)
# Save to disk the MD5 hash of the downloaded file.
hash = self._md5sum(path)
if hash != self.version:
raise ValueError("Unexpected MD5 hash of the downloaded file")
open(osp.join(self.root, hash), 'w').close()
# Download train/val/test splits.
path_split1 = download_url(self.url_stratified_split, self.root)
assert self._md5sum(path_split1) == self.md5sum_stratified_split
old_df_name = osp.join(self.raw_dir,
'peptide_multi_class_dataset.csv.gz?dl=1')
new_df_name = osp.join(self.raw_dir,
'peptide_multi_class_dataset.csv.gz')
old_split_file = osp.join(self.root,
"splits_random_stratified_peptide.pickle?dl=1")
new_split_file = osp.join(self.root,
"splits_random_stratified_peptide.pickle")
os.rename(old_df_name, new_df_name)
os.rename(old_split_file, new_split_file)
else:
print('Stop download.')
exit(-1)
def load_dataset(self):
"""Load the dataset from here and process it if it doesn't exist"""
print("Loading dataset from disk...")
data, slices = torch.load(self.processed_paths[0])
idx = torch.load(self.processed_paths[1])
tasks = torch.load(self.processed_paths[2])
return data, slices, idx, tasks
def process(self):
data_df = pd.read_csv(osp.join(self.raw_dir,
'peptide_multi_class_dataset.csv.gz'))
smiles_list = data_df['smiles']
print('Converting SMILES strings into graphs...')
data_list = []
for i in tqdm(range(len(smiles_list))):
data = Data()
smiles = smiles_list[i]
graph = self.smiles2graph(smiles)
assert (len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert (len(graph['node_feat']) == graph['num_nodes'])
data.__num_nodes__ = int(graph['num_nodes'])
data.edge_index = torch.from_numpy(graph['edge_index']).to(
torch.int64)
data.edge_attr = torch.from_numpy(graph['edge_feat']).to(
torch.int64)
data.x = torch.from_numpy(graph['node_feat']).to(torch.int64)
data.y = torch.Tensor([eval(data_df['labels'].iloc[i])])
data_list.append(data)
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
split_idx = self.get_idx_split()
# NB: the init method would basically have no effect if
# we use edge features and do not initialize rings.
print(f"Converting the {self.name} dataset to a cell complex...")
complexes, _, _ = convert_graph_dataset_with_rings(
data_list,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_method=self._init_method,
init_edges=self._use_edge_features,
init_rings=False,
n_jobs=self._n_jobs)
print(f'Saving processed dataset in {self.processed_paths[0]}...')
torch.save(self.collate(complexes, self.max_dim), self.processed_paths[0])
print(f'Saving idx in {self.processed_paths[1]}...')
torch.save(split_idx, self.processed_paths[1])
print(f'Saving num_tasks in {self.processed_paths[2]}...')
torch.save(10, self.processed_paths[2])
def get_idx_split(self):
""" Get dataset splits.
Returns:
Dict with 'train', 'val', 'test', splits indices.
"""
split_file = osp.join(self.root,
"splits_random_stratified_peptide.pickle")
with open(split_file, 'rb') as f:
splits = pickle.load(f)
split_dict = replace_numpy_with_torchtensor(splits)
split_dict['valid'] = split_dict['val']
return split_dict
def load_pep_f_graph_dataset(root):
raw_dir = osp.join(root, 'raw')
data_df = pd.read_csv(osp.join(raw_dir,
'peptide_multi_class_dataset.csv.gz'))
smiles_list = data_df['smiles']
target_names = ['Inertia_mass_a', 'Inertia_mass_b', 'Inertia_mass_c',
'Inertia_valence_a', 'Inertia_valence_b',
'Inertia_valence_c', 'length_a', 'length_b', 'length_c',
'Spherocity', 'Plane_best_fit']
# Normalize to zero mean and unit standard deviation.
data_df.loc[:, target_names] = data_df.loc[:, target_names].apply(
lambda x: (x - x.mean()) / x.std(), axis=0)
print('Converting SMILES strings into graphs...')
data_list = []
for i in tqdm(range(len(smiles_list))):
data = Data()
smiles = smiles_list[i]
y = data_df.iloc[i][target_names]
graph = smiles2graph(smiles)
assert (len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert (len(graph['node_feat']) == graph['num_nodes'])
data.__num_nodes__ = int(graph['num_nodes'])
data.edge_index = torch.from_numpy(graph['edge_index']).to(
torch.int64)
data.edge_attr = torch.from_numpy(graph['edge_feat']).to(
torch.int64)
data.x = torch.from_numpy(graph['node_feat']).to(torch.int64)
data.y = torch.Tensor([y])
data_list.append(data)
dataset = InMemoryDataset.collate(data_list)
#get split file
split_file = osp.join(root,
"splits_random_stratified_peptide.pickle")
with open(split_file, 'rb') as f:
splits = pickle.load(f)
split_dict = replace_numpy_with_torchtensor(splits)
split_dict['valid'] = split_dict['val']
return dataset, split_dict['train'], split_dict['valid'], split_dict['test'] | 10,424 | 39.564202 | 124 | py |
cwn | cwn-main/data/datasets/plot_ringtree_dataset.py | import networkx as nx
import matplotlib.pyplot as plt
from data.datasets.ring_utils import generate_ring_transfer_graph_dataset
from torch_geometric.utils import convert
def visualise_ringtree_dataset():
dataset = generate_ring_transfer_graph_dataset(nodes=10, samples=100, classes=5)
data = dataset[0]
graph = convert.to_networkx(data, to_undirected=True)
plt.figure()
nx.draw_networkx(graph)
plt.show()
if __name__ == "__main__":
visualise_ringtree_dataset()
| 495 | 23.8 | 84 | py |
cwn | cwn-main/data/datasets/cluster.py | import pickle
from data.datasets import InMemoryComplexDataset
from data.utils import convert_graph_dataset_with_gudhi
from torch_geometric.datasets import GNNBenchmarkDataset
class ClusterDataset(InMemoryComplexDataset):
"""This is the Cluster dataset from the Benchmarking GNNs paper.
The dataset contains multiple graphs and we have to do node classification on all these graphs.
"""
def __init__(self, root, transform=None,
pre_transform=None, pre_filter=None, max_dim=2):
self.name = 'CLUSTER'
super(ClusterDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim=max_dim)
self.max_dim = max_dim
self._data_list, idx = self.load_dataset()
self.train_ids = idx[0]
self.val_ids = idx[1]
self.test_ids = idx[2]
@property
def raw_file_names(self):
name = self.name
# The processed graph files are our raw files.
# I've obtained this from inside the GNNBenchmarkDataset class
return [f'{name}_train.pt', f'{name}_val.pt', f'{name}_test.pt']
@property
def processed_file_names(self):
return ['complex_train.pkl', 'complex_val.pkl', 'complex_test.pkl']
def download(self):
# Instantiating this will download and process the graph dataset.
GNNBenchmarkDataset('./datasets/', 'CLUSTER')
def load_dataset(self):
"""Load the dataset from here and process it if it doesn't exist"""
data_list, idx = [], []
start = 0
for path in self.processed_paths:
with open(path, 'rb') as handle:
data_list.extend(pickle.load(handle))
idx.append(list(range(start, len(data_list))))
start = len(data_list)
return data_list, idx
def process(self):
# At this stage, the graph dataset is already downloaded and processed
print(f"Processing cellular complex dataset for {self.name}")
train_data = GNNBenchmarkDataset('./datasets/', 'CLUSTER', split='train')
val_data = GNNBenchmarkDataset('./datasets/', 'CLUSTER', split='val')
test_data = GNNBenchmarkDataset('./datasets/', 'CLUSTER', split='test')
# For testing
# train_data = list(train_data)[:3]
# val_data = list(val_data)[:3]
# test_data = list(test_data)[:3]
print("Converting the train dataset with gudhi...")
train_complexes, _, _ = convert_graph_dataset_with_gudhi(train_data,
expansion_dim=self.max_dim, include_down_adj=self.include_down_adj)
print("Converting the validation dataset with gudhi...")
val_complexes, _, _ = convert_graph_dataset_with_gudhi(val_data, expansion_dim=self.max_dim, include_down_adj=self.include_down_adj)
print("Converting the test dataset with gudhi...")
test_complexes, _, _ = convert_graph_dataset_with_gudhi(test_data,
expansion_dim=self.max_dim)
complexes = [train_complexes, val_complexes, test_complexes]
for i, path in enumerate(self.processed_paths):
with open(path, 'wb') as handle:
pickle.dump(complexes[i], handle)
| 3,283 | 41.102564 | 140 | py |
cwn | cwn-main/data/datasets/csl.py | import os.path as osp
import numpy as np
import torch
from data.datasets import InMemoryComplexDataset
from data.utils import convert_graph_dataset_with_rings
from torch_geometric.datasets import GNNBenchmarkDataset
from torch_geometric.utils import remove_self_loops
class CSLDataset(InMemoryComplexDataset):
"""This is the CSL (Circular Skip Link) dataset from the Benchmarking GNNs paper.
The dataset contains 10 isomorphism classes of regular graphs that must be classified.
"""
def __init__(self, root, transform=None,
pre_transform=None, pre_filter=None, max_ring_size=6, fold=0, init_method='sum',
n_jobs=2):
self.name = 'CSL'
self._max_ring_size = max_ring_size
self._n_jobs = n_jobs
super(CSLDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim=2, cellular=True, init_method=init_method,
num_classes=10)
assert 0 <= fold <= 4
self.fold = fold
self.data, self.slices = self.load_dataset()
self.num_node_type = 1
self.num_edge_type = 1
# These cross-validation splits have been taken from
# https://github.com/graphdeeplearning/benchmarking-gnns/tree/master/data/CSL
train_filename = osp.join(self.root, 'splits', 'CSL_train.txt')
valid_filename = osp.join(self.root, 'splits', 'CSL_val.txt')
test_filename = osp.join(self.root, 'splits', 'CSL_test.txt')
self.train_ids = np.loadtxt(train_filename, dtype=int, delimiter=',')[fold].tolist()
self.val_ids = np.loadtxt(valid_filename, dtype=int, delimiter=',')[fold].tolist()
self.test_ids = np.loadtxt(test_filename, dtype=int, delimiter=',')[fold].tolist()
# Make sure the split ratios are as expected (3:1:1)
assert len(self.train_ids) == 3 * len(self.test_ids)
assert len(self.val_ids) == len(self.test_ids)
# Check all splits contain numbers that are smaller than the total number of graphs
assert max(self.train_ids) < 150
assert max(self.val_ids) < 150
assert max(self.test_ids) < 150
@property
def raw_file_names(self):
return ['data.pt']
@property
def processed_file_names(self):
return ['complexes.pt']
def download(self):
# Instantiating this will download and process the graph dataset.
GNNBenchmarkDataset(self.raw_dir, 'CSL')
def load_dataset(self):
"""Load the dataset from here and process it if it doesn't exist"""
print("Loading dataset from disk...")
data, slices = torch.load(self.processed_paths[0])
return data, slices
def process(self):
# At this stage, the graph dataset is already downloaded and processed
print(f"Processing cell complex dataset for {self.name}")
# This dataset has no train / val / test splits and we must use cross-validation
data = GNNBenchmarkDataset(self.raw_dir, 'CSL')
assert len(data) == 150
# Check that indeed there are no features
assert data[0].x is None
assert data[0].edge_attr is None
print("Populating graph with features")
# Initialise everything with zero as in the Benchmarking GNNs code
# https://github.com/graphdeeplearning/benchmarking-gnns/blob/ef8bd8c7d2c87948bc1bdd44099a52036e715cd0/data/CSL.py#L144
new_data = []
for i, datum in enumerate(data):
edge_index = datum.edge_index
num_nodes = datum.num_nodes
# Make sure we have no self-loops in this dataset
edge_index, _ = remove_self_loops(edge_index)
num_edges = edge_index.size(1)
vx = torch.zeros((num_nodes, 1), dtype=torch.long)
edge_attr = torch.zeros(num_edges, dtype=torch.long)
setattr(datum, 'edge_index', edge_index)
setattr(datum, 'x', vx)
setattr(datum, 'edge_attr', edge_attr)
new_data.append(datum)
assert new_data[0].x is not None
assert new_data[0].edge_attr is not None
print("Converting the train dataset to a cell complex...")
complexes, _, _ = convert_graph_dataset_with_rings(
new_data,
max_ring_size=self._max_ring_size,
include_down_adj=False,
init_edges=True,
init_rings=False,
n_jobs=self._n_jobs)
path = self.processed_paths[0]
print(f'Saving processed dataset in {path}....')
torch.save(self.collate(complexes, 2), path)
@property
def processed_dir(self):
"""Overwrite to change name based on edges"""
directory = super(CSLDataset, self).processed_dir
suffix1 = f"_{self._max_ring_size}rings" if self._cellular else ""
return directory + suffix1
| 4,912 | 39.270492 | 127 | py |
cwn | cwn-main/data/datasets/flow_utils.py | import numpy as np
import random
import torch
import networkx as nx
import itertools
from scipy.spatial import Delaunay
from scipy import sparse
from data.complex import Cochain
from data.parallel import ProgressParallel
from joblib import delayed
def is_inside_rectangle(x, rect):
return rect[0, 0] <= x[0] <= rect[1, 0] and rect[0, 1] <= x[1] <= rect[1, 1]
def sample_point_from_rect(points, rect):
samples = []
for i in range(len(points)):
if is_inside_rectangle(points[i], rect):
samples.append(i)
return random.choice(samples)
def create_hole(points, triangles, hole):
kept_triangles = []
removed_vertices = set()
# Find the points and triangles to remove
for i in range(len(triangles)):
simplex = triangles[i]
assert len(simplex) == 3
xs = points[simplex]
remove_triangle = False
for j in range(3):
vertex = simplex[j]
if is_inside_rectangle(xs[j], hole):
remove_triangle = True
removed_vertices.add(vertex)
if not remove_triangle:
kept_triangles.append(i)
# Remove the triangles and points inside the holes
triangles = triangles[np.array(kept_triangles)]
# Remove the points that are not part of any triangles anymore.
# This can happen in some very rare cases
for i in range(len(points)):
if np.sum(triangles == i) == 0:
removed_vertices.add(i)
points = np.delete(points, list(removed_vertices), axis=0)
# Renumber the indices of the triangles' vertices
for vertex in sorted(removed_vertices, reverse=True):
triangles[triangles >= vertex] -= 1
return points, triangles
def create_graph_from_triangulation(points, triangles):
# Create a graph from from this containing only the non-removed triangles
G = nx.Graph()
edge_idx = 0
edge_to_tuple = {}
tuple_to_edge = {}
for i in range(len(triangles)):
vertices = triangles[i]
for j in range(3):
if vertices[j] not in G:
G.add_node(vertices[j], point=points[vertices[j]])
for v1, v2 in itertools.combinations(vertices, 2):
if not G.has_edge(v1, v2):
G.add_edge(v1, v2, index=edge_idx)
edge_to_tuple[edge_idx] = (min(v1, v2), max(v1, v2))
tuple_to_edge[(min(v1, v2), max(v1, v2))] = edge_idx
edge_idx += 1
assert G.has_edge(v2, v1)
G.graph['edge_to_tuple'] = edge_to_tuple
G.graph['tuple_to_edge'] = tuple_to_edge
G.graph['points'] = points
G.graph['triangles'] = triangles
return G
def extract_boundary_matrices(G: nx.Graph):
"""Compute the boundary and co-boundary matrices for the edges of the complex. """
edge_to_tuple = G.graph['edge_to_tuple']
tuple_to_edge = G.graph['tuple_to_edge']
triangles = G.graph['triangles']
B1 = np.zeros((G.number_of_nodes(), G.number_of_edges()), dtype=float)
for edge_id in range(G.number_of_edges()):
nodes = edge_to_tuple[edge_id]
min_node = min(nodes)
max_node = max(nodes)
B1[min_node, edge_id] = -1
B1[max_node, edge_id] = 1
assert np.all(np.sum(np.abs(B1), axis=-1) > 0)
assert np.all(np.sum(np.abs(B1), axis=0) == 2)
assert np.all(np.sum(B1, axis=0) == 0)
def extract_edge_and_orientation(triangle, i):
assert i <= 2
n1 = triangle[i]
if i < 2:
n2 = triangle[i + 1]
else:
n2 = triangle[0]
if n1 < n2:
orientation = 1
else:
orientation = -1
return tuple_to_edge[(min(n1, n2), max(n1, n2))], orientation
B2 = np.zeros((G.number_of_edges(), len(triangles)), dtype=float)
for i in range(len(triangles)):
edge1, orientation1 = extract_edge_and_orientation(triangles[i], 0)
edge2, orientation2 = extract_edge_and_orientation(triangles[i], 1)
edge3, orientation3 = extract_edge_and_orientation(triangles[i], 2)
assert edge1 != edge2 and edge1 != edge3 and edge2 != edge3
B2[edge1, i] = orientation1
B2[edge2, i] = orientation2
B2[edge3, i] = orientation3
assert np.all(np.sum(np.abs(B2), axis=0) == 3)
assert np.all(np.sum(np.abs(B2), axis=-1) > 0)
return B1, B2
def generate_trajectory(start_rect, end_rect, ckpt_rect, G: nx.Graph):
points = G.graph['points']
tuple_to_edge = G.graph['tuple_to_edge']
start_vertex = sample_point_from_rect(points, start_rect)
end_vertex = sample_point_from_rect(points, end_rect)
ckpt_vertex = sample_point_from_rect(points, ckpt_rect)
x = np.zeros((len(tuple_to_edge), 1))
vertex = start_vertex
end_point = points[end_vertex]
ckpt_point = points[ckpt_vertex]
path = [vertex]
explored = set()
ckpt_reached = False
while vertex != end_vertex:
explored.add(vertex)
if vertex == ckpt_vertex:
ckpt_reached = True
nv = np.array([nghb for nghb in G.neighbors(vertex)
if nghb not in explored])
if len(nv) == 0:
# If we get stuck because everything around was explored
# Then just try to generate another trajectory.
return generate_trajectory(start_rect, end_rect, ckpt_rect, G)
npoints = points[nv]
if ckpt_reached:
dist = np.sum((npoints - end_point[None, :]) ** 2, axis=-1)
else:
dist = np.sum((npoints - ckpt_point[None, :]) ** 2, axis=-1)
# prob = softmax(-dist**2)
# vertex = nv[np.random.choice(len(prob), p=prob)]
coin_toss = np.random.uniform()
if coin_toss < 0.1:
vertex = nv[np.random.choice(len(dist))]
else:
vertex = nv[np.argmin(dist)]
path.append(vertex)
# Set the flow value according to the orientation
if path[-2] < path[-1]:
x[tuple_to_edge[(path[-2], path[-1])], 0] = 1
else:
x[tuple_to_edge[(path[-1], path[-2])], 0] = -1
return x, path
def extract_adj_from_boundary(B, G=None):
A = sparse.csr_matrix(B.T).dot(sparse.csr_matrix(B))
n = A.shape[0]
if G is not None:
assert n == G.number_of_edges()
# Subtract self-loops, which we do not count.
connections = A.count_nonzero() - np.sum(A.diagonal() != 0)
index = torch.empty((2, connections), dtype=torch.long)
orient = torch.empty(connections)
connection = 0
cA = A.tocoo()
for i, j, v in zip(cA.row, cA.col, cA.data):
if j >= i:
continue
assert v == 1 or v == -1, print(v)
index[0, connection] = i
index[1, connection] = j
orient[connection] = np.sign(v)
index[0, connection + 1] = j
index[1, connection + 1] = i
orient[connection + 1] = np.sign(v)
connection += 2
assert connection == connections
return index, orient
def build_cochain(B1, B2, T2, x, class_id, G=None):
# Change the orientation of the boundary matrices
B1 = sparse.csr_matrix(B1).dot(sparse.csr_matrix(T2)).toarray()
B2 = sparse.csr_matrix(T2).dot(sparse.csr_matrix(B2)).toarray()
# Extract the adjacencies in pyG edge_index format.
lower_index, lower_orient = extract_adj_from_boundary(B1, G)
upper_index, upper_orient = extract_adj_from_boundary(B2.T, G)
index_dict = {
'lower_index': lower_index,
'lower_orient': lower_orient,
'upper_index': upper_index,
'upper_orient': upper_orient,
}
# Change the orientation of the features
x = sparse.csr_matrix(T2).dot(sparse.csr_matrix(x)).toarray()
x = torch.tensor(x, dtype=torch.float32)
return Cochain(dim=1, x=x, **index_dict, y=torch.tensor([class_id]))
def generate_flow_cochain(class_id, G, B1, B2, T2):
assert 0 <= class_id <= 1
# Define the start, midpoint and and stop regions for the trajectories.
start_rect = np.array([[0.0, 0.8], [0.2, 1.0]])
end_rect = np.array([[0.8, 0.0], [1.0, 0.2]])
bot_ckpt_rect = np.array([[0.0, 0.0], [0.2, 0.2]])
top_ckpt_rect = np.array([[0.8, 0.8], [1.0, 1.0]])
ckpts = [bot_ckpt_rect, top_ckpt_rect]
# Generate flow
x, _ = generate_trajectory(start_rect, end_rect, ckpts[class_id], G)
return build_cochain(B1, B2, T2, x, class_id, G)
def get_orient_matrix(size, orientation):
"""Creates a change of orientation operator of the specified size."""
if orientation == 'default':
return np.identity(size)
elif orientation == 'random':
diag = 2*np.random.randint(0, 2, size=size) - 1
return np.diag(diag).astype(np.long)
else:
raise ValueError(f'Unsupported orientation {orientation}')
def load_flow_dataset(num_points=1000, num_train=1000, num_test=200,
train_orientation='default', test_orientation='default', n_jobs=2):
points = np.random.uniform(low=-0.05, high=1.05, size=(num_points, 2))
tri = Delaunay(points)
# Double check each point appears in some triangle.
for i in range(len(points)):
assert np.sum(tri.simplices == i) > 0
hole1 = np.array([[0.2, 0.2], [0.4, 0.4]])
hole2 = np.array([[0.6, 0.6], [0.8, 0.8]])
points, triangles = create_hole(points, tri.simplices, hole1)
# Double check each point appears in some triangle.
for i in range(len(points)):
assert np.sum(triangles == i) > 0
points, triangles = create_hole(points, triangles, hole2)
# Double check each point appears in some triangle.
for i in range(len(points)):
assert np.sum(triangles == i) > 0
assert np.min(triangles) == 0
assert np.max(triangles) == len(points) - 1
G = create_graph_from_triangulation(points, triangles)
assert G.number_of_nodes() == len(points)
B1, B2 = extract_boundary_matrices(G)
classes = 2
assert B1.shape[1] == B2.shape[0]
num_edges = B1.shape[1]
# Process these in parallel because it's slow
samples_per_class = num_train // classes
parallel = ProgressParallel(n_jobs=n_jobs, use_tqdm=True, total=num_train)
train_samples = parallel(delayed(generate_flow_cochain)(
class_id=min(i // samples_per_class, 1), G=G, B1=B1, B2=B2,
T2=get_orient_matrix(num_edges, train_orientation)) for i in range(num_train))
samples_per_class = num_test // classes
parallel = ProgressParallel(n_jobs=n_jobs, use_tqdm=True, total=num_test)
test_samples = parallel(delayed(generate_flow_cochain)(
class_id=min(i // samples_per_class, 1), G=G, B1=B1, B2=B2,
T2=get_orient_matrix(num_edges, test_orientation)) for i in range(num_test))
return train_samples, test_samples, G
| 10,807 | 30.976331 | 89 | py |
cwn | cwn-main/data/datasets/sr.py | import os
import torch
import pickle
from data.sr_utils import load_sr_dataset
from data.utils import compute_clique_complex_with_gudhi, compute_ring_2complex
from data.utils import convert_graph_dataset_with_rings, convert_graph_dataset_with_gudhi
from data.datasets import InMemoryComplexDataset
from definitions import ROOT_DIR
from torch_geometric.data import Data
import os.path as osp
import errno
def makedirs(path):
try:
os.makedirs(osp.expanduser(osp.normpath(path)))
except OSError as e:
if e.errno != errno.EEXIST and osp.isdir(path):
raise e
def load_sr_graph_dataset(name, root=os.path.join(ROOT_DIR, 'datasets'), prefer_pkl=False):
raw_dir = os.path.join(root, 'SR_graphs', 'raw')
load_from = os.path.join(raw_dir, '{}.g6'.format(name))
load_from_pkl = os.path.join(raw_dir, '{}.pkl'.format(name))
if prefer_pkl and osp.exists(load_from_pkl):
print(f"Loading SR graph {name} from pickle dump...")
with open(load_from_pkl, 'rb') as handle:
data = pickle.load(handle)
else:
data = load_sr_dataset(load_from)
graphs = list()
for datum in data:
edge_index, num_nodes = datum
x = torch.ones(num_nodes, 1, dtype=torch.float32)
graph = Data(x=x, edge_index=edge_index, y=None, edge_attr=None, num_nodes=num_nodes)
graphs.append(graph)
train_ids = list(range(len(graphs)))
val_ids = list(range(len(graphs)))
test_ids = list(range(len(graphs)))
return graphs, train_ids, val_ids, test_ids
class SRDataset(InMemoryComplexDataset):
"""A dataset of complexes obtained by lifting Strongly Regular graphs."""
def __init__(self, root, name, max_dim=2, num_classes=16, train_ids=None, val_ids=None, test_ids=None,
include_down_adj=False, max_ring_size=None, n_jobs=2, init_method='sum'):
self.name = name
self._num_classes = num_classes
self._n_jobs = n_jobs
assert max_ring_size is None or max_ring_size > 3
self._max_ring_size = max_ring_size
cellular = (max_ring_size is not None)
if cellular:
assert max_dim == 2
super(SRDataset, self).__init__(root, max_dim=max_dim, num_classes=num_classes,
include_down_adj=include_down_adj, cellular=cellular, init_method=init_method)
self.data, self.slices = torch.load(self.processed_paths[0])
self.train_ids = list(range(self.len())) if train_ids is None else train_ids
self.val_ids = list(range(self.len())) if val_ids is None else val_ids
self.test_ids = list(range(self.len())) if test_ids is None else test_ids
@property
def processed_dir(self):
"""This is overwritten, so the cellular complex data is placed in another folder"""
directory = super(SRDataset, self).processed_dir
suffix = f"_{self._max_ring_size}rings" if self._cellular else ""
suffix += f"_down_adj" if self.include_down_adj else ""
return directory + suffix
@property
def processed_file_names(self):
return ['{}_complex_list.pt'.format(self.name)]
def process(self):
graphs, _, _, _ = load_sr_graph_dataset(self.name, prefer_pkl=True)
exp_dim = self.max_dim
if self._cellular:
print(f"Converting the {self.name} dataset to a cell complex...")
complexes, max_dim, num_features = convert_graph_dataset_with_rings(
graphs,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_method=self._init_method,
init_edges=True,
init_rings=True,
n_jobs=self._n_jobs)
else:
print(f"Converting the {self.name} dataset with gudhi...")
complexes, max_dim, num_features = convert_graph_dataset_with_gudhi(
graphs,
expansion_dim=exp_dim,
include_down_adj=self.include_down_adj,
init_method=self._init_method)
if self._max_ring_size is not None:
assert max_dim <= 2
if max_dim != self.max_dim:
self.max_dim = max_dim
makedirs(self.processed_dir)
# Now we save in opt format.
path = self.processed_paths[0]
torch.save(self.collate(complexes, self.max_dim), path)
| 4,522 | 39.747748 | 107 | py |
cwn | cwn-main/data/datasets/ring_utils.py | import numpy as np
import torch
import random
from torch_geometric.data import Data
from sklearn.preprocessing import LabelBinarizer
# TODO: Add a graph dataset for ring lookup.
def generate_ring_lookup_graph(nodes):
"""This generates a dictionary lookup ring. No longer being used for now."""
# Assign all the other nodes in the ring a unique key and value
keys = np.arange(1, nodes)
vals = np.random.permutation(nodes - 1)
oh_keys = np.array(LabelBinarizer().fit_transform(keys))
oh_vals = np.array(LabelBinarizer().fit_transform(vals))
oh_all = np.concatenate((oh_keys, oh_vals), axis=-1)
x = np.empty((nodes, oh_all.shape[1]))
x[1:, :] = oh_all
# Assign the source node one of these random keys and set the value to -1
key_idx = random.randint(0, nodes - 2)
val = vals[key_idx]
x[0, :] = 0
x[0, :oh_keys.shape[1]] = oh_keys[key_idx]
x = torch.tensor(x, dtype=torch.float32)
edge_index = []
for i in range(nodes-1):
edge_index.append([i, i + 1])
edge_index.append([i + 1, i])
# Add the edges that close the ring
edge_index.append([0, nodes - 1])
edge_index.append([nodes - 1, 0])
edge_index = np.array(edge_index, dtype=np.long).T
edge_index = torch.tensor(edge_index, dtype=torch.long)
# Create a mask for the target node of the graph
mask = torch.zeros(nodes, dtype=torch.bool)
mask[0] = 1
# Add the label of the graph as a graph label
y = torch.tensor([val], dtype=torch.long)
return Data(x=x, edge_index=edge_index, mask=mask, y=y)
def generate_ringlookup_graph_dataset(nodes, samples=10000):
# Generate the dataset
dataset = []
for i in range(samples):
graph = generate_ring_lookup_graph(nodes)
dataset.append(graph)
return dataset
def generate_ring_transfer_graph(nodes, target_label):
opposite_node = nodes // 2
# Initialise the feature matrix with a constant feature vector
# TODO: Modify the experiment to use another random constant feature per graph
x = np.ones((nodes, len(target_label)))
x[0, :] = 0.0
x[opposite_node, :] = target_label
x = torch.tensor(x, dtype=torch.float32)
edge_index = []
for i in range(nodes-1):
edge_index.append([i, i + 1])
edge_index.append([i + 1, i])
# Add the edges that close the ring
edge_index.append([0, nodes - 1])
edge_index.append([nodes - 1, 0])
edge_index = np.array(edge_index, dtype=np.long).T
edge_index = torch.tensor(edge_index, dtype=torch.long)
# Create a mask for the target node of the graph
mask = torch.zeros(nodes, dtype=torch.bool)
mask[0] = 1
# Add the label of the graph as a graph label
y = torch.tensor([np.argmax(target_label)], dtype=torch.long)
return Data(x=x, edge_index=edge_index, mask=mask, y=y)
def generate_ring_transfer_graph_dataset(nodes, classes=5, samples=10000):
# Generate the dataset
dataset = []
samples_per_class = samples // classes
for i in range(samples):
label = i // samples_per_class
target_class = np.zeros(classes)
target_class[label] = 1.0
graph = generate_ring_transfer_graph(nodes, target_class)
dataset.append(graph)
return dataset
| 3,276 | 30.509615 | 82 | py |
cwn | cwn-main/data/datasets/zinc.py | import torch
import os.path as osp
from data.utils import convert_graph_dataset_with_rings
from data.datasets import InMemoryComplexDataset
from torch_geometric.datasets import ZINC
class ZincDataset(InMemoryComplexDataset):
"""This is ZINC from the Benchmarking GNNs paper. This is a graph regression task."""
def __init__(self, root, max_ring_size, use_edge_features=False, transform=None,
pre_transform=None, pre_filter=None, subset=True,
include_down_adj=False, n_jobs=2):
self.name = 'ZINC'
self._max_ring_size = max_ring_size
self._use_edge_features = use_edge_features
self._subset = subset
self._n_jobs = n_jobs
super(ZincDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim=2, cellular=True,
include_down_adj=include_down_adj, num_classes=1)
self.data, self.slices, idx = self.load_dataset()
self.train_ids = idx[0]
self.val_ids = idx[1]
self.test_ids = idx[2]
self.num_node_type = 28
self.num_edge_type = 4
@property
def raw_file_names(self):
return ['train.pt', 'val.pt', 'test.pt']
@property
def processed_file_names(self):
name = self.name
return [f'{name}_complex.pt', f'{name}_idx.pt']
def download(self):
# Instantiating this will download and process the graph dataset.
ZINC(self.raw_dir, subset=self._subset)
def load_dataset(self):
"""Load the dataset from here and process it if it doesn't exist"""
print("Loading dataset from disk...")
data, slices = torch.load(self.processed_paths[0])
idx = torch.load(self.processed_paths[1])
return data, slices, idx
def process(self):
# At this stage, the graph dataset is already downloaded and processed
print(f"Processing cell complex dataset for {self.name}")
train_data = ZINC(self.raw_dir, subset=self._subset, split='train')
val_data = ZINC(self.raw_dir, subset=self._subset, split='val')
test_data = ZINC(self.raw_dir, subset=self._subset, split='test')
data_list = []
idx = []
start = 0
print("Converting the train dataset to a cell complex...")
train_complexes, _, _ = convert_graph_dataset_with_rings(
train_data,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_edges=self._use_edge_features,
init_rings=False,
n_jobs=self._n_jobs)
data_list += train_complexes
idx.append(list(range(start, len(data_list))))
start = len(data_list)
print("Converting the validation dataset to a cell complex...")
val_complexes, _, _ = convert_graph_dataset_with_rings(
val_data,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_edges=self._use_edge_features,
init_rings=False,
n_jobs=self._n_jobs)
data_list += val_complexes
idx.append(list(range(start, len(data_list))))
start = len(data_list)
print("Converting the test dataset to a cell complex...")
test_complexes, _, _ = convert_graph_dataset_with_rings(
test_data,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_edges=self._use_edge_features,
init_rings=False,
n_jobs=self._n_jobs)
data_list += test_complexes
idx.append(list(range(start, len(data_list))))
path = self.processed_paths[0]
print(f'Saving processed dataset in {path}....')
torch.save(self.collate(data_list, 2), path)
path = self.processed_paths[1]
print(f'Saving idx in {path}....')
torch.save(idx, path)
@property
def processed_dir(self):
"""Overwrite to change name based on edges"""
directory = super(ZincDataset, self).processed_dir
suffix0 = "_full" if self._subset is False else ""
suffix1 = f"_{self._max_ring_size}rings" if self._cellular else ""
suffix2 = "-E" if self._use_edge_features else ""
return directory + suffix0 + suffix1 + suffix2
def load_zinc_graph_dataset(root, subset=True):
raw_dir = osp.join(root, 'ZINC', 'raw')
train_data = ZINC(raw_dir, subset=subset, split='train')
val_data = ZINC(raw_dir, subset=subset, split='val')
test_data = ZINC(raw_dir, subset=subset, split='test')
data = train_data + val_data + test_data
if subset:
assert len(train_data) == 10000
assert len(val_data) == 1000
assert len(test_data) == 1000
else:
assert len(train_data) == 220011
assert len(val_data) == 24445
assert len(test_data) == 5000
idx = []
start = 0
idx.append(list(range(start, len(train_data))))
start = len(train_data)
idx.append(list(range(start, start + len(val_data))))
start = len(train_data) + len(val_data)
idx.append(list(range(start, start + len(test_data))))
return data, idx[0], idx[1], idx[2]
| 5,282 | 36.468085 | 91 | py |
cwn | cwn-main/data/datasets/dataset.py | """
The code is based on https://github.com/rusty1s/pytorch_geometric/blob/76d61eaa9fc8702aa25f29dfaa5134a169d0f1f6/torch_geometric/data/dataset.py#L19
and https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/data/in_memory_dataset.py
Copyright (c) 2020 Matthias Fey <[email protected]>
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import copy
import re
from abc import ABC
import torch
import os.path as osp
from torch_geometric.data import Dataset
from itertools import repeat, product
from data.complex import Complex, Cochain
from torch import Tensor
def __repr__(obj):
if obj is None:
return 'None'
return re.sub('(<.*?)\\s.*(>)', r'\1\2', obj.__repr__())
class ComplexDataset(Dataset, ABC):
"""Base class for cochain complex datasets.
This class mirrors
https://github.com/rusty1s/pytorch_geometric/blob/76d61eaa9fc8702aa25f29dfaa5134a169d0f1f6/torch_geometric/data/dataset.py#L19
"""
def __init__(self, root=None, transform=None, pre_transform=None, pre_filter=None,
max_dim: int = None, num_classes: int = None, init_method: str = 'sum',
cellular: bool = False):
# These have to be initialised before calling the super class.
self._max_dim = max_dim
self._num_features = [None for _ in range(max_dim+1)]
self._init_method = init_method
self._cellular = cellular
super(ComplexDataset, self).__init__(root, transform, pre_transform, pre_filter)
self._num_classes = num_classes
self.train_ids = None
self.val_ids = None
self.test_ids = None
@property
def max_dim(self):
return self._max_dim
@max_dim.setter
def max_dim(self, value):
self._max_dim = value
@property
def num_classes(self):
return self._num_classes
@property
def processed_dir(self):
"""This is overwritten, so the cellular complex data is placed in another folder"""
prefix = "cell_" if self._cellular else ""
return osp.join(self.root, f'{prefix}complex_dim{self.max_dim}_{self._init_method}')
def num_features_in_dim(self, dim):
if dim > self.max_dim:
raise ValueError('`dim` {} larger than max allowed dimension {}.'.format(dim, self.max_dim))
if self._num_features[dim] is None:
self._look_up_num_features()
return self._num_features[dim]
def _look_up_num_features(self):
for complex in self:
for dim in range(complex.dimension + 1):
if self._num_features[dim] is None:
self._num_features[dim] = complex.cochains[dim].num_features
else:
assert self._num_features[dim] == complex.cochains[dim].num_features
def get_idx_split(self):
idx_split = {
'train': self.train_ids,
'valid': self.val_ids,
'test': self.test_ids}
return idx_split
class InMemoryComplexDataset(ComplexDataset):
"""Wrapper around ComplexDataset with functionality such as batching and storing the dataset.
This class mirrors
https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/data/in_memory_dataset.py
"""
@property
def raw_file_names(self):
r"""The name of the files to find in the :obj:`self.raw_dir` folder in
order to skip the download."""
raise NotImplementedError
@property
def processed_file_names(self):
r"""The name of the files to find in the :obj:`self.processed_dir`
folder in order to skip the processing."""
raise NotImplementedError
def download(self):
r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
raise NotImplementedError
def process(self):
r"""Processes the dataset to the :obj:`self.processed_dir` folder."""
raise NotImplementedError
def __init__(self, root=None, transform=None, pre_transform=None,
pre_filter=None, max_dim: int = None, num_classes: int = None,
include_down_adj=False, init_method=None, cellular: bool = False):
self.include_down_adj = include_down_adj
super(InMemoryComplexDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim, num_classes, init_method=init_method,
cellular=cellular)
self.data, self.slices = None, None
self.__data_list__ = None
def len(self):
for dim in range(self.max_dim + 1):
for item in self.slices[dim].values():
return len(item) - 1
return 0
def get(self, idx):
if hasattr(self, '__data_list__'):
if self.__data_list__ is None:
self.__data_list__ = self.len() * [None]
else:
data = self.__data_list__[idx]
if data is not None:
return copy.copy(data)
retrieved = [self._get_cochain(dim, idx) for dim in range(0, self.max_dim + 1)]
cochains = [r[0] for r in retrieved if not r[1]]
targets = self.data['labels']
start, end = idx, idx + 1
if torch.is_tensor(targets):
s = list(repeat(slice(None), targets.dim()))
cat_dim = 0
s[cat_dim] = slice(start, end)
else:
# TODO: come up with a better method to handle this
assert targets[start] is None
s = start
target = targets[s]
dim = self.data['dims'][idx].item()
assert dim == len(cochains) - 1
data = Complex(*cochains, y=target)
if hasattr(self, '__data_list__'):
self.__data_list__[idx] = copy.copy(data)
return data
def _get_cochain(self, dim, idx) -> (Cochain, bool):
if dim < 0 or dim > self.max_dim:
raise ValueError(f'The current dataset does not have cochains at dimension {dim}.')
cochain_data = self.data[dim]
cochain_slices = self.slices[dim]
data = Cochain(dim)
if cochain_data.__num_cells__[idx] is not None:
data.num_cells = cochain_data.__num_cells__[idx]
if cochain_data.__num_cells_up__[idx] is not None:
data.num_cells_up = cochain_data.__num_cells_up__[idx]
if cochain_data.__num_cells_down__[idx] is not None:
data.num_cells_down = cochain_data.__num_cells_down__[idx]
elif dim == 0:
data.num_cells_down = None
for key in cochain_data.keys:
item, slices = cochain_data[key], cochain_slices[key]
start, end = slices[idx].item(), slices[idx + 1].item()
data[key] = None
if start != end:
if torch.is_tensor(item):
s = list(repeat(slice(None), item.dim()))
cat_dim = cochain_data.__cat_dim__(key, item)
if cat_dim is None:
cat_dim = 0
s[cat_dim] = slice(start, end)
elif start + 1 == end:
s = slices[start]
else:
s = slice(start, end)
data[key] = item[s]
empty = (data.num_cells is None)
return data, empty
@staticmethod
def collate(data_list, max_dim):
r"""Collates a python list of data objects to the internal storage
format of :class:`InMemoryComplexDataset`."""
def init_keys(dim, keys):
cochain = Cochain(dim)
for key in keys[dim]:
cochain[key] = []
cochain.__num_cells__ = []
cochain.__num_cells_up__ = []
cochain.__num_cells_down__ = []
slc = {key: [0] for key in keys[dim]}
return cochain, slc
def collect_keys(data_list, max_dim):
keys = {dim: set() for dim in range(0, max_dim + 1)}
for complex in data_list:
for dim in keys:
if dim not in complex.cochains:
continue
cochain = complex.cochains[dim]
keys[dim] |= set(cochain.keys)
return keys
keys = collect_keys(data_list, max_dim)
types = {}
cat_dims = {}
tensor_dims = {}
data = {'labels': [], 'dims': []}
slices = {}
for dim in range(0, max_dim + 1):
data[dim], slices[dim] = init_keys(dim, keys)
for complex in data_list:
# Collect cochain-wise items
for dim in range(0, max_dim + 1):
# Get cochain, if present
cochain = None
if dim in complex.cochains:
cochain = complex.cochains[dim]
# Iterate on keys
for key in keys[dim]:
if cochain is not None and hasattr(cochain, key) and cochain[key] is not None:
data[dim][key].append(cochain[key])
if isinstance(cochain[key], Tensor) and cochain[key].dim() > 0:
cat_dim = cochain.__cat_dim__(key, cochain[key])
cat_dim = 0 if cat_dim is None else cat_dim
s = slices[dim][key][-1] + cochain[key].size(cat_dim)
if key not in cat_dims:
cat_dims[key] = cat_dim
else:
assert cat_dim == cat_dims[key]
if key not in tensor_dims:
tensor_dims[key] = cochain[key].dim()
else:
assert cochain[key].dim() == tensor_dims[key]
else:
s = slices[dim][key][-1] + 1
if key not in types:
types[key] = type(cochain[key])
else:
assert type(cochain[key]) is types[key]
else:
s = slices[dim][key][-1] + 0
slices[dim][key].append(s)
# Handle non-keys
# TODO: could they be considered as keys as well?
num = None
num_up = None
num_down = None
if cochain is not None:
if hasattr(cochain, '__num_cells__'):
num = cochain.__num_cells__
if hasattr(cochain, '__num_cells_up__'):
num_up = cochain.__num_cells_up__
if hasattr(cochain, '__num_cells_down__'):
num_down = cochain.__num_cells_down__
data[dim].__num_cells__.append(num)
data[dim].__num_cells_up__.append(num_up)
data[dim].__num_cells_down__.append(num_down)
# Collect complex-wise label(s) and dims
if not hasattr(complex, 'y'):
complex.y = None
if isinstance(complex.y, Tensor):
assert complex.y.size(0) == 1
data['labels'].append(complex.y)
data['dims'].append(complex.dimension)
# Pack lists into tensors
# Cochains
for dim in range(0, max_dim + 1):
for key in keys[dim]:
if types[key] is Tensor and len(data_list) > 1:
if tensor_dims[key] > 0:
cat_dim = cat_dims[key]
data[dim][key] = torch.cat(data[dim][key], dim=cat_dim)
else:
data[dim][key] = torch.stack(data[dim][key])
elif types[key] is Tensor: # Don't duplicate attributes...
data[dim][key] = data[dim][key][0]
elif types[key] is int or types[key] is float:
data[dim][key] = torch.tensor(data[dim][key])
slices[dim][key] = torch.tensor(slices[dim][key], dtype=torch.long)
# Labels and dims
item = data['labels'][0]
if isinstance(item, Tensor) and len(data_list) > 1:
if item.dim() > 0:
cat_dim = 0
data['labels'] = torch.cat(data['labels'], dim=cat_dim)
else:
data['labels'] = torch.stack(data['labels'])
elif isinstance(item, Tensor):
data['labels'] = data['labels'][0]
elif isinstance(item, int) or isinstance(item, float):
data['labels'] = torch.tensor(data['labels'])
data['dims'] = torch.tensor(data['dims'])
return data, slices
def copy(self, idx=None):
if idx is None:
data_list = [self.get(i) for i in range(len(self))]
else:
data_list = [self.get(i) for i in idx]
dataset = copy.copy(self)
dataset.__indices__ = None
dataset.__data_list__ = data_list
dataset.data, dataset.slices = self.collate(data_list)
return dataset
def get_split(self, split):
if split not in ['train', 'valid', 'test']:
raise ValueError(f'Unknown split {split}.')
idx = self.get_idx_split()[split]
if idx is None:
raise AssertionError("No split information found.")
if self.__indices__ is not None:
raise AssertionError("Cannot get the split for a subset of the original dataset.")
return self[idx]
| 14,872 | 38.873995 | 147 | py |
cwn | cwn-main/data/datasets/test_zinc.py | import torch
import os.path as osp
import pytest
from data.data_loading import load_dataset
from data.helper_test import (check_edge_index_are_the_same,
check_edge_attr_are_the_same, get_rings,
get_complex_rings)
from torch_geometric.datasets import ZINC
@pytest.mark.slow
def test_zinc_splits_are_retained():
dataset1 = load_dataset("ZINC", max_ring_size=7, use_edge_features=True)
dataset1_train = dataset1.get_split('train')
dataset1_valid = dataset1.get_split('valid')
dataset1_test = dataset1.get_split('test')
raw_dir = osp.join(dataset1.root, 'raw')
dataset2_train = ZINC(raw_dir, subset=True, split='train')
dataset2_valid = ZINC(raw_dir, subset=True, split='val')
dataset2_test = ZINC(raw_dir, subset=True, split='test')
datasets1 = [dataset1_train, dataset1_valid, dataset1_test]
datasets2 = [dataset2_train, dataset2_valid, dataset2_test]
datasets = zip(datasets1, datasets2)
for datas1, datas2 in datasets:
for i, _ in enumerate(datas1):
data1, data2 = datas1[i], datas2[i]
assert torch.equal(data1.y, data2.y)
assert torch.equal(data1.cochains[0].x, data2.x)
assert data1.cochains[1].x.size(0) == (data2.edge_index.size(1) // 2)
check_edge_index_are_the_same(data1.cochains[0].upper_index, data2.edge_index)
check_edge_attr_are_the_same(data1.cochains[1].boundary_index,
data1.cochains[1].x, data2.edge_index, data2.edge_attr)
@pytest.mark.slow
def test_we_find_only_the_induced_cycles_on_zinc():
max_ring = 7
dataset = load_dataset("ZINC", max_ring_size=max_ring, use_edge_features=True)
# Check only on validation to save time. I've also run once on the whole dataset and passes.
dataset = dataset.get_split('valid')
for complex in dataset:
nx_rings = get_rings(complex.nodes.num_cells, complex.nodes.upper_index,
max_ring=max_ring)
if 2 not in complex.cochains:
assert len(nx_rings) == 0
continue
complex_rings = get_complex_rings(complex.cochains[2].boundary_index, complex.edges.boundary_index)
assert len(complex_rings) > 0
assert len(nx_rings) == complex.cochains[2].num_cells
assert nx_rings == complex_rings
| 2,395 | 38.933333 | 107 | py |
cwn | cwn-main/data/datasets/tu.py | import os
import torch
import pickle
import numpy as np
from definitions import ROOT_DIR
from data.tu_utils import load_data, S2V_to_PyG, get_fold_indices
from data.utils import convert_graph_dataset_with_gudhi, convert_graph_dataset_with_rings
from data.datasets import InMemoryComplexDataset
def load_tu_graph_dataset(name, root=os.path.join(ROOT_DIR, 'datasets'), degree_as_tag=False, fold=0, seed=0):
raw_dir = os.path.join(root, name, 'raw')
load_from = os.path.join(raw_dir, '{}_graph_list_degree_as_tag_{}.pkl'.format(name, degree_as_tag))
if os.path.isfile(load_from):
with open(load_from, 'rb') as handle:
graph_list = pickle.load(handle)
else:
data, num_classes = load_data(raw_dir, name, degree_as_tag)
print('Converting graph data into PyG format...')
graph_list = [S2V_to_PyG(datum) for datum in data]
with open(load_from, 'wb') as handle:
pickle.dump(graph_list, handle)
train_filename = os.path.join(raw_dir, '10fold_idx', 'train_idx-{}.txt'.format(fold + 1))
test_filename = os.path.join(raw_dir, '10fold_idx', 'test_idx-{}.txt'.format(fold + 1))
if os.path.isfile(train_filename) and os.path.isfile(test_filename):
# NB: we consider the loaded test indices as val_ids ones and set test_ids to None
# to make it more convenient to work with the training pipeline
train_ids = np.loadtxt(train_filename, dtype=int).tolist()
val_ids = np.loadtxt(test_filename, dtype=int).tolist()
else:
train_ids, val_ids = get_fold_indices(graph_list, seed, fold)
test_ids = None
return graph_list, train_ids, val_ids, test_ids
class TUDataset(InMemoryComplexDataset):
"""A dataset of complexes obtained by lifting graphs from TUDatasets."""
def __init__(self, root, name, max_dim=2, num_classes=2, degree_as_tag=False, fold=0,
init_method='sum', seed=0, include_down_adj=False, max_ring_size=None):
self.name = name
self.degree_as_tag = degree_as_tag
assert max_ring_size is None or max_ring_size > 3
self._max_ring_size = max_ring_size
cellular = (max_ring_size is not None)
if cellular:
assert max_dim == 2
super(TUDataset, self).__init__(root, max_dim=max_dim, num_classes=num_classes,
init_method=init_method, include_down_adj=include_down_adj, cellular=cellular)
self.data, self.slices = torch.load(self.processed_paths[0])
self.fold = fold
self.seed = seed
train_filename = os.path.join(self.raw_dir, '10fold_idx', 'train_idx-{}.txt'.format(fold + 1))
test_filename = os.path.join(self.raw_dir, '10fold_idx', 'test_idx-{}.txt'.format(fold + 1))
if os.path.isfile(train_filename) and os.path.isfile(test_filename):
# NB: we consider the loaded test indices as val_ids ones and set test_ids to None
# to make it more convenient to work with the training pipeline
self.train_ids = np.loadtxt(train_filename, dtype=int).tolist()
self.val_ids = np.loadtxt(test_filename, dtype=int).tolist()
else:
train_ids, val_ids = get_fold_indices(self, self.seed, self.fold)
self.train_ids = train_ids
self.val_ids = val_ids
self.test_ids = None
# TODO: Add this later to our zip
# tune_train_filename = os.path.join(self.raw_dir, 'tests_train_split.txt'.format(fold + 1))
# self.tune_train_ids = np.loadtxt(tune_train_filename, dtype=int).tolist()
# tune_test_filename = os.path.join(self.raw_dir, 'tests_val_split.txt'.format(fold + 1))
# self.tune_val_ids = np.loadtxt(tune_test_filename, dtype=int).tolist()
# self.tune_test_ids = None
@property
def processed_dir(self):
"""This is overwritten, so the cellular complex data is placed in another folder"""
directory = super(TUDataset, self).processed_dir
suffix = f"_{self._max_ring_size}rings" if self._cellular else ""
suffix += f"_down_adj" if self.include_down_adj else ""
return directory + suffix
@property
def processed_file_names(self):
return ['{}_complex_list.pt'.format(self.name)]
@property
def raw_file_names(self):
# The processed graph files are our raw files.
# They are obtained when running the initial data conversion S2V_to_PyG.
return ['{}_graph_list_degree_as_tag_{}.pkl'.format(self.name, self.degree_as_tag)]
def download(self):
# This will process the raw data into a list of PyG Data objs.
data, num_classes = load_data(self.raw_dir, self.name, self.degree_as_tag)
self._num_classes = num_classes
print('Converting graph data into PyG format...')
graph_list = [S2V_to_PyG(datum) for datum in data]
with open(self.raw_paths[0], 'wb') as handle:
pickle.dump(graph_list, handle)
def process(self):
with open(self.raw_paths[0], 'rb') as handle:
graph_list = pickle.load(handle)
if self._cellular:
print("Converting the dataset accounting for rings...")
complexes, _, _ = convert_graph_dataset_with_rings(graph_list, max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_method=self._init_method,
init_edges=True, init_rings=True)
else:
print("Converting the dataset with gudhi...")
# TODO: eventually remove the following comment
# What about the init_method here? Adding now, although I remember we had handled this
complexes, _, _ = convert_graph_dataset_with_gudhi(graph_list, expansion_dim=self.max_dim,
include_down_adj=self.include_down_adj,
init_method=self._init_method)
torch.save(self.collate(complexes, self.max_dim), self.processed_paths[0])
def get_tune_idx_split(self):
raise NotImplementedError('Not implemented yet')
# idx_split = {
# 'train': self.tune_train_ids,
# 'valid': self.tune_val_ids,
# 'test': self.tune_test_ids}
# return idx_split
| 6,539 | 49.307692 | 110 | py |
cwn | cwn-main/data/datasets/ogb.py | import torch
import os.path as osp
from data.utils import convert_graph_dataset_with_rings
from data.datasets import InMemoryComplexDataset
from ogb.graphproppred import PygGraphPropPredDataset
class OGBDataset(InMemoryComplexDataset):
"""This is OGB graph-property prediction. This are graph-wise classification tasks."""
def __init__(self, root, name, max_ring_size, use_edge_features=False, transform=None,
pre_transform=None, pre_filter=None, init_method='sum',
include_down_adj=False, simple=False, n_jobs=2):
self.name = name
self._max_ring_size = max_ring_size
self._use_edge_features = use_edge_features
self._simple = simple
self._n_jobs = n_jobs
super(OGBDataset, self).__init__(root, transform, pre_transform, pre_filter,
max_dim=2, init_method=init_method,
include_down_adj=include_down_adj, cellular=True)
self.data, self.slices, idx, self.num_tasks = self.load_dataset()
self.train_ids = idx['train']
self.val_ids = idx['valid']
self.test_ids = idx['test']
@property
def raw_file_names(self):
name = self.name.replace('-', '_') # Replacing is to follow OGB folder naming convention
# The processed graph files are our raw files.
return [f'{name}/processed/geometric_data_processed.pt']
@property
def processed_file_names(self):
return [f'{self.name}_complex.pt', f'{self.name}_idx.pt', f'{self.name}_tasks.pt']
@property
def processed_dir(self):
"""Overwrite to change name based on edge and simple feats"""
directory = super(OGBDataset, self).processed_dir
suffix1 = f"_{self._max_ring_size}rings" if self._cellular else ""
suffix2 = "-E" if self._use_edge_features else ""
suffix3 = "-S" if self._simple else ""
return directory + suffix1 + suffix2 + suffix3
def download(self):
# Instantiating this will download and process the graph dataset.
dataset = PygGraphPropPredDataset(self.name, self.raw_dir)
def load_dataset(self):
"""Load the dataset from here and process it if it doesn't exist"""
print("Loading dataset from disk...")
data, slices = torch.load(self.processed_paths[0])
idx = torch.load(self.processed_paths[1])
tasks = torch.load(self.processed_paths[2])
return data, slices, idx, tasks
def process(self):
# At this stage, the graph dataset is already downloaded and processed
dataset = PygGraphPropPredDataset(self.name, self.raw_dir)
split_idx = dataset.get_idx_split()
if self._simple: # Only retain the top two node/edge features
print('Using simple features')
dataset.data.x = dataset.data.x[:,:2]
dataset.data.edge_attr = dataset.data.edge_attr[:,:2]
# NB: the init method would basically have no effect if
# we use edge features and do not initialize rings.
print(f"Converting the {self.name} dataset to a cell complex...")
complexes, _, _ = convert_graph_dataset_with_rings(
dataset,
max_ring_size=self._max_ring_size,
include_down_adj=self.include_down_adj,
init_method=self._init_method,
init_edges=self._use_edge_features,
init_rings=False,
n_jobs=self._n_jobs)
print(f'Saving processed dataset in {self.processed_paths[0]}...')
torch.save(self.collate(complexes, self.max_dim), self.processed_paths[0])
print(f'Saving idx in {self.processed_paths[1]}...')
torch.save(split_idx, self.processed_paths[1])
print(f'Saving num_tasks in {self.processed_paths[2]}...')
torch.save(dataset.num_tasks, self.processed_paths[2])
def load_ogb_graph_dataset(root, name):
raw_dir = osp.join(root, 'raw')
dataset = PygGraphPropPredDataset(name, raw_dir)
idx = dataset.get_idx_split()
return dataset, idx['train'], idx['valid'], idx['test']
| 4,176 | 42.061856 | 97 | py |
cwn | cwn-main/data/datasets/ringlookup.py | import torch
import os.path as osp
from data.datasets import InMemoryComplexDataset
from data.datasets.ring_utils import generate_ringlookup_graph_dataset
from data.utils import convert_graph_dataset_with_rings
class RingLookupDataset(InMemoryComplexDataset):
"""A dataset where the task is to perform dictionary lookup on the features
of a set of nodes forming a ring. The feature of each node is composed of a key and a value
and one must assign to a target node the value of the key its feature encodes.
"""
def __init__(self, root, nodes=10):
self.name = 'RING-LOOKUP'
self._nodes = nodes
super(RingLookupDataset, self).__init__(
root, None, None, None, max_dim=2, cellular=True, num_classes=nodes-1)
self.data, self.slices = torch.load(self.processed_paths[0])
idx = torch.load(self.processed_paths[1])
self.train_ids = idx[0]
self.val_ids = idx[1]
self.test_ids = idx[2]
@property
def processed_dir(self):
"""This is overwritten, so the cellular complex data is placed in another folder"""
return osp.join(self.root, 'complex')
@property
def processed_file_names(self):
return [f'ringlookup-n{self._nodes}.pkl', f'idx-n{self._nodes}.pkl']
@property
def raw_file_names(self):
# No raw files, but must be implemented
return []
def download(self):
# Nothing to download, but must be implemented
pass
def process(self):
train = generate_ringlookup_graph_dataset(self._nodes, samples=10000)
val = generate_ringlookup_graph_dataset(self._nodes, samples=1000)
dataset = train + val
train_ids = list(range(len(train)))
val_ids = list(range(len(train), len(train) + len(val)))
print("Converting dataset to a cell complex...")
complexes, _, _ = convert_graph_dataset_with_rings(
dataset,
max_ring_size=self._nodes,
include_down_adj=False,
init_edges=True,
init_rings=True,
n_jobs=4)
for complex in complexes:
# Add mask for the target node.
mask = torch.zeros(complex.nodes.num_cells, dtype=torch.bool)
mask[0] = 1
setattr(complex.cochains[0], 'mask', mask)
# Make HOF zero
complex.edges.x = torch.zeros_like(complex.edges.x)
complex.two_cells.x = torch.zeros_like(complex.two_cells.x)
assert complex.two_cells.num_cells == 1
path = self.processed_paths[0]
print(f'Saving processed dataset in {path}....')
torch.save(self.collate(complexes, 2), path)
idx = [train_ids, val_ids, None]
path = self.processed_paths[1]
print(f'Saving idx in {path}....')
torch.save(idx, path)
def load_ring_lookup_dataset(nodes=10):
train = generate_ringlookup_graph_dataset(nodes, samples=10000)
val = generate_ringlookup_graph_dataset(nodes, samples=1000)
dataset = train + val
train_ids = list(range(len(train)))
val_ids = list(range(len(train), len(train) + len(val)))
return dataset, train_ids, val_ids, None
| 3,214 | 32.842105 | 98 | py |
cwn | cwn-main/data/datasets/test_ocean.py | import torch
import pytest
from data.datasets.ocean_utils import load_ocean_dataset
@pytest.mark.data
def test_ocean_dataset_generation():
train, test, _ = load_ocean_dataset()
assert len(train) == 160
assert len(test) == 40
for cochain in train + test:
# checks the upper/lower orientation features are consistent
# in shape with the upper/lower indices
assert len(cochain.upper_orient) == cochain.upper_index.size(1)
assert len(cochain.lower_orient) == cochain.lower_index.size(1)
# checks the upper and lower indices are consistent with the number of edges
assert cochain.upper_index.max() < cochain.x.size(0), print(cochain.upper_index.max(),
cochain.x.size(0))
assert cochain.lower_index.max() < cochain.x.size(0), print(cochain.lower_index.max(),
cochain.x.size(0))
# checks the values for orientations are either +1 (coherent) or -1 (not coherent)
assert (torch.sum(cochain.upper_orient == 1)
+ torch.sum(cochain.upper_orient == -1) == cochain.upper_orient.numel())
assert (torch.sum(cochain.lower_orient == 1)
+ torch.sum(cochain.lower_orient == -1) == cochain.lower_orient.numel())
| 1,247 | 43.571429 | 94 | py |
cwn | cwn-main/data/datasets/ringtransfer.py | import torch
import os.path as osp
from data.datasets import InMemoryComplexDataset
from data.datasets.ring_utils import generate_ring_transfer_graph_dataset
from data.utils import convert_graph_dataset_with_rings
class RingTransferDataset(InMemoryComplexDataset):
"""A dataset where the task is to transfer features from a source node to a target node
placed on the other side of a ring.
"""
def __init__(self, root, nodes=10, train=5000, test=500):
self.name = 'RING-TRANSFER'
self._nodes = nodes
self._num_classes = 5
self._train = train
self._test = test
super(RingTransferDataset, self).__init__(root, None, None, None,
max_dim=2, cellular=True, num_classes=self._num_classes)
self.data, self.slices = torch.load(self.processed_paths[0])
idx = torch.load(self.processed_paths[1])
self.train_ids = idx[0]
self.val_ids = idx[1]
self.test_ids = idx[2]
@property
def processed_dir(self):
"""This is overwritten, so the cellular complex data is placed in another folder"""
return osp.join(self.root, 'complex')
@property
def processed_file_names(self):
return [f'ringtree-n{self._nodes}.pkl', f'idx-n{self._nodes}.pkl']
@property
def raw_file_names(self):
# No raw files, but must be implemented
return []
def download(self):
# Nothing to download, but must be implemented
pass
def process(self):
train = generate_ring_transfer_graph_dataset(self._nodes, classes=self._num_classes,
samples=self._train)
val = generate_ring_transfer_graph_dataset(self._nodes, classes=self._num_classes,
samples=self._test)
dataset = train + val
train_ids = list(range(len(train)))
val_ids = list(range(len(train), len(train) + len(val)))
print("Converting dataset to a cell complex...")
complexes, _, _ = convert_graph_dataset_with_rings(
dataset,
max_ring_size=self._nodes,
include_down_adj=False,
init_edges=True,
init_rings=True,
n_jobs=4)
for complex in complexes:
# Add mask for the target node.
mask = torch.zeros(complex.nodes.num_cells, dtype=torch.bool)
mask[0] = 1
setattr(complex.cochains[0], 'mask', mask)
# Make HOF zero
complex.edges.x = torch.zeros_like(complex.edges.x)
complex.two_cells.x = torch.zeros_like(complex.two_cells.x)
path = self.processed_paths[0]
print(f'Saving processed dataset in {path}....')
torch.save(self.collate(complexes, 2), path)
idx = [train_ids, val_ids, None]
path = self.processed_paths[1]
print(f'Saving idx in {path}....')
torch.save(idx, path)
def load_ring_transfer_dataset(nodes=10, train=5000, test=500, classes=5):
train = generate_ring_transfer_graph_dataset(nodes, classes=classes, samples=train)
val = generate_ring_transfer_graph_dataset(nodes, classes=classes, samples=test)
dataset = train + val
train_ids = list(range(len(train)))
val_ids = list(range(len(train), len(train) + len(val)))
return dataset, train_ids, val_ids, None
| 3,322 | 32.908163 | 92 | py |
cwn | cwn-main/data/datasets/dummy.py | import torch
from data.datasets import InMemoryComplexDataset
from data.dummy_complexes import get_testing_complex_list, get_mol_testing_complex_list
class DummyDataset(InMemoryComplexDataset):
"""A dummy dataset using a list of hand-crafted cell complexes with many edge cases."""
def __init__(self, root):
self.name = 'DUMMY'
super(DummyDataset, self).__init__(root, max_dim=3, num_classes=2,
init_method=None, include_down_adj=True, cellular=False)
self.data, self.slices = torch.load(self.processed_paths[0])
self.train_ids = list(range(self.len()))
self.val_ids = list(range(self.len()))
self.test_ids = list(range(self.len()))
@property
def processed_file_names(self):
name = self.name
return [f'{name}_complex_list.pt']
@property
def raw_file_names(self):
# The processed graph files are our raw files.
# They are obtained when running the initial data conversion S2V_to_PyG.
return []
def download(self):
return
@staticmethod
def factory():
complexes = get_testing_complex_list()
for c, complex in enumerate(complexes):
complex.y = torch.LongTensor([c % 2])
return complexes
def process(self):
print("Instantiating complexes...")
complexes = self.factory()
torch.save(self.collate(complexes, self.max_dim), self.processed_paths[0])
class DummyMolecularDataset(InMemoryComplexDataset):
"""A dummy dataset using a list of hand-crafted molecular cell complexes with many edge cases."""
def __init__(self, root, remove_2feats=False):
self.name = 'DUMMYM'
self.remove_2feats = remove_2feats
super(DummyMolecularDataset, self).__init__(root, max_dim=2, num_classes=2,
init_method=None, include_down_adj=True, cellular=True)
self.data, self.slices = torch.load(self.processed_paths[0])
self.train_ids = list(range(self.len()))
self.val_ids = list(range(self.len()))
self.test_ids = list(range(self.len()))
@property
def processed_file_names(self):
name = self.name
remove_2feats = self.remove_2feats
fn = f'{name}_complex_list'
if remove_2feats:
fn += '_removed_2feats'
fn += '.pt'
return [fn]
@property
def raw_file_names(self):
# The processed graph files are our raw files.
# They are obtained when running the initial data conversion S2V_to_PyG.
return []
def download(self):
return
@staticmethod
def factory(remove_2feats=False):
complexes = get_mol_testing_complex_list()
for c, complex in enumerate(complexes):
if remove_2feats:
if 2 in complex.cochains:
complex.cochains[2].x = None
complex.y = torch.LongTensor([c % 2])
return complexes
def process(self):
print("Instantiating complexes...")
complexes = self.factory(self.remove_2feats)
torch.save(self.collate(complexes, self.max_dim), self.processed_paths[0])
| 3,221 | 34.021739 | 101 | py |
cwn | cwn-main/exp/parser.py | import os
import time
import argparse
from definitions import ROOT_DIR
def get_parser():
parser = argparse.ArgumentParser(description='CWN experiment.')
parser.add_argument('--seed', type=int, default=43,
help='random seed to set (default: 43, i.e. the non-meaning of life))')
parser.add_argument('--start_seed', type=int, default=0,
help='The initial seed when evaluating on multiple seeds.')
parser.add_argument('--stop_seed', type=int, default=9,
help='The final seed when evaluating on multiple seeds.')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--model', type=str, default='sparse_cin',
help='model, possible choices: cin, dummy, ... (default: cin)')
parser.add_argument('--use_coboundaries', type=str, default='False',
help='whether to use coboundary features for up-messages in sparse_cin (default: False)')
parser.add_argument('--include_down_adj', action='store_true',
help='whether to use lower adjacencies (i.e. CIN++ networks) (default: False)')
# ^^^ here we explicitly pass it as string as easier to handle in tuning
parser.add_argument('--indrop_rate', type=float, default=0.0,
help='inputs dropout rate for molec models(default: 0.0)')
parser.add_argument('--drop_rate', type=float, default=0.0,
help='dropout rate (default: 0.5)')
parser.add_argument('--drop_position', type=str, default='lin2',
help='where to apply the final dropout (default: lin2, i.e. _before_ lin2)')
parser.add_argument('--nonlinearity', type=str, default='relu',
help='activation function (default: relu)')
parser.add_argument('--readout', type=str, default='sum',
help='readout function (default: sum)')
parser.add_argument('--final_readout', type=str, default='sum',
help='final readout function (default: sum)')
parser.add_argument('--readout_dims', type=int, nargs='+', default=(0, 1, 2),
help='dims at which to apply the final readout (default: 0 1 2, i.e. nodes, edges, 2-cells)')
parser.add_argument('--jump_mode', type=str, default=None,
help='Mode for JK (default: None, i.e. no JK)')
parser.add_argument('--graph_norm', type=str, default='bn', choices=['bn', 'ln', 'id'],
help='Normalization layer to use inside the model')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scheduler', type=str, default='StepLR',
help='learning rate decay scheduler (default: StepLR)')
parser.add_argument('--lr_scheduler_decay_steps', type=int, default=50,
help='number of epochs between lr decay (default: 50)')
parser.add_argument('--lr_scheduler_decay_rate', type=float, default=0.5,
help='strength of lr decay (default: 0.5)')
parser.add_argument('--lr_scheduler_patience', type=float, default=10,
help='patience for `ReduceLROnPlateau` lr decay (default: 10)')
parser.add_argument('--lr_scheduler_min', type=float, default=0.00001,
help='min LR for `ReduceLROnPlateau` lr decay (default: 1e-5)')
parser.add_argument('--num_layers', type=int, default=5,
help='number of message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=64,
help='dimensionality of hidden units in models (default: 300)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="PROTEINS",
help='dataset name (default: PROTEINS)')
parser.add_argument('--task_type', type=str, default='classification',
help='task type, either (bin)classification, regression or isomorphism (default: classification)')
parser.add_argument('--eval_metric', type=str, default='accuracy',
help='evaluation metric (default: accuracy)')
parser.add_argument('--iso_eps', type=int, default=0.01,
help='Threshold to define (non-)isomorphism')
parser.add_argument('--minimize', action='store_true',
help='whether to minimize evaluation metric or not')
parser.add_argument('--max_dim', type=int, default="2",
help='maximum cellular dimension (default: 2, i.e. two_cells)')
parser.add_argument('--max_ring_size', type=int, default=None,
help='maximum ring size to look for (default: None, i.e. do not look for rings)')
parser.add_argument('--result_folder', type=str, default=os.path.join(ROOT_DIR, 'exp', 'results'),
help='filename to output result (default: None, will use `scn/exp/results`)')
parser.add_argument('--exp_name', type=str, default=str(time.time()),
help='name for specific experiment; if not provided, a name based on unix timestamp will be '+\
'used. (default: None)')
parser.add_argument('--dump_curves', action='store_true',
help='whether to dump the training curves to disk')
parser.add_argument('--untrained', action='store_true',
help='whether to skip training')
parser.add_argument('--fold', type=int, default=None,
help='fold index for k-fold cross-validation experiments')
parser.add_argument('--folds', type=int, default=None,
help='The number of folds to run on in cross validation experiments')
parser.add_argument('--init_method', type=str, default='sum',
help='How to initialise features at higher levels (sum, mean)')
parser.add_argument('--train_eval_period', type=int, default=10,
help='How often to evaluate on train.')
parser.add_argument('--tune', action='store_true', help='Use the tuning indexes')
parser.add_argument('--flow_points', type=int, default=400,
help='Number of points to use for the flow experiment')
parser.add_argument('--flow_classes', type=int, default=3,
help='Number of classes for the flow experiment')
parser.add_argument('--train_orient', type=str, default='default',
help='What orientation to use for the training complexes')
parser.add_argument('--test_orient', type=str, default='default',
help='What orientation to use for the testing complexes')
parser.add_argument('--fully_orient_invar', action='store_true',
help='Whether to apply torch.abs from the first layer')
parser.add_argument('--use_edge_features', action='store_true',
help="Use edge features for molecular graphs")
parser.add_argument('--simple_features', action='store_true',
help="Whether to use only a subset of original features, specific to ogb-mol*")
parser.add_argument('--early_stop', action='store_true', help='Stop when minimum LR is reached.')
parser.add_argument('--paraid', type=int, default=0,
help='model id')
parser.add_argument('--preproc_jobs', type=int, default=2,
help='Jobs to use for the dataset preprocessing. For all jobs use "-1".'
'For sequential processing (no parallelism) use "1"')
return parser
def validate_args(args):
"""Performs dataset-dependent sanity checks on the supplied args."""
if args.dataset == 'CSL':
assert args.model == 'embed_sparse_cin'
assert args.task_type == 'classification'
assert not args.minimize
assert args.lr_scheduler == 'ReduceLROnPlateau'
assert args.eval_metric == 'accuracy'
assert args.fold is not None
assert not args.simple_features
assert args.graph_norm == 'ln'
elif args.dataset == 'RING-TRANSFER' or args.dataset == 'RING-LOOKUP':
assert args.model == 'ring_sparse_cin' or args.model == 'gin_ring'
assert args.task_type == 'classification'
assert not args.minimize
assert args.lr_scheduler == 'None'
assert args.eval_metric == 'accuracy'
assert args.fold is None
assert not args.simple_features
assert args.max_ring_size is not None and args.max_ring_size > 3
if args.model == 'ring_sparse_cin':
assert args.graph_norm == 'id'
if args.model == 'gin_ring':
assert args.graph_norm == 'bn'
elif args.dataset.startswith('ZINC'):
assert args.model.startswith('embed')
if args.model == 'embed_cin++':
assert args.include_down_adj is True
assert args.task_type == 'regression'
assert args.minimize
assert args.eval_metric == 'mae'
assert args.lr_scheduler == 'ReduceLROnPlateau'
assert not args.simple_features
elif args.dataset in ['MOLHIV', 'MOLPCBA', 'MOLTOX21', 'MOLTOXCAST', 'MOLMUV',
'MOLBACE', 'MOLBBBP', 'MOLCLINTOX', 'MOLSIDER', 'MOLESOL',
'MOLFREESOLV', 'MOLLIPO']:
assert args.model == 'ogb_embed_sparse_cin' or args.model == "ogb_embed_cin++"
if args.model == 'ogb_embed_cin++':
assert args.include_down_adj is True
assert args.eval_metric == 'ogbg-'+args.dataset.lower()
assert args.jump_mode is None
if args.dataset in ['MOLESOL', 'MOLFREESOLV', 'MOLLIPO']:
assert args.task_type == 'mse_regression'
assert args.minimize
else:
assert args.task_type == 'bin_classification'
assert not args.minimize
elif args.dataset.startswith('sr'):
assert args.model in ['sparse_cin', 'mp_agnostic']
assert args.eval_metric == 'isomorphism'
assert args.task_type == 'isomorphism'
assert args.jump_mode is None
assert args.drop_rate == 0.0
assert args.untrained
assert args.nonlinearity == 'elu'
assert args.readout == 'sum'
assert args.final_readout == 'sum'
assert not args.simple_features
elif args.dataset == 'FLOW' or args.dataset == 'OCEAN':
assert args.model == 'edge_orient' or args.model == 'edge_mpnn'
assert args.eval_metric == 'accuracy'
assert args.task_type == 'classification'
assert args.jump_mode is None
assert args.drop_rate == 0.0
assert not args.untrained
assert not args.simple_features
assert not args.minimize
| 11,349 | 59.695187 | 126 | py |
cwn | cwn-main/exp/run_exp.py | import os
import numpy as np
import copy
import pickle
import torch
import torch.optim as optim
import random
from data.data_loading import DataLoader, load_dataset, load_graph_dataset
from torch_geometric.data import DataLoader as PyGDataLoader
from exp.train_utils import train, eval, Evaluator
from exp.parser import get_parser, validate_args
from mp.graph_models import GIN0, GINWithJK
from mp.models import CIN0, Dummy, SparseCIN, CINpp, EdgeOrient, EdgeMPNN, MessagePassingAgnostic
from mp.molec_models import EmbedSparseCIN, EmbedCINpp, OGBEmbedSparseCIN, OGBEmbedCINpp, EmbedSparseCINNoRings, EmbedGIN
from mp.ring_exp_models import RingSparseCIN, RingGIN
def main(args):
"""The common training and evaluation script used by all the experiments."""
# set device
device = torch.device(
"cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
print("==========================================================")
print("Using device", str(device))
print(f"Fold: {args.fold}")
print(f"Seed: {args.seed}")
print("======================== Args ===========================")
print(args)
print("===================================================")
# Set the seed for everything
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# Set double precision for SR experiments
if args.task_type == 'isomorphism':
assert args.dataset.startswith('sr')
torch.set_default_dtype(torch.float64)
# Create results folder
result_folder = os.path.join(
args.result_folder, f'{args.dataset}-{args.exp_name}', f'seed-{args.seed}')
if args.fold is not None:
result_folder = os.path.join(result_folder, f'fold-{args.fold}')
if not os.path.exists(result_folder):
os.makedirs(result_folder)
filename = os.path.join(result_folder, 'results.txt')
if args.model.startswith('gin'): # load graph dataset
graph_list, train_ids, val_ids, test_ids, num_classes = load_graph_dataset(
args.dataset, fold=args.fold, max_ring_size=args.max_ring_size)
train_graphs = [graph_list[i] for i in train_ids]
val_graphs = [graph_list[i] for i in val_ids]
train_loader = PyGDataLoader(train_graphs, batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers)
valid_loader = PyGDataLoader(val_graphs, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers)
if test_ids is not None:
test_graphs = [graph_list[i] for i in test_ids]
test_loader = PyGDataLoader(test_graphs, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers)
else:
test_loader = None
if args.dataset.startswith('sr'):
num_features = 1
num_classes = args.emb_dim
else:
num_features = graph_list[0].x.shape[1]
else:
# Data loading
dataset = load_dataset(args.dataset, max_dim=args.max_dim, fold=args.fold,
init_method=args.init_method, emb_dim=args.emb_dim,
flow_points=args.flow_points, flow_classes=args.flow_classes,
max_ring_size=args.max_ring_size,
use_edge_features=args.use_edge_features,
include_down_adj=args.include_down_adj,
simple_features=args.simple_features, n_jobs=args.preproc_jobs,
train_orient=args.train_orient, test_orient=args.test_orient)
if args.tune:
split_idx = dataset.get_tune_idx_split()
else:
split_idx = dataset.get_idx_split()
# Instantiate data loaders
train_loader = DataLoader(dataset.get_split('train'), batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers, max_dim=dataset.max_dim)
valid_loader = DataLoader(dataset.get_split('valid'), batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, max_dim=dataset.max_dim)
test_split = split_idx.get("test", None)
test_loader = None
if test_split is not None:
test_loader = DataLoader(dataset.get_split('test'), batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, max_dim=dataset.max_dim)
# Automatic evaluator, takes dataset name as input
evaluator = Evaluator(args.eval_metric, eps=args.iso_eps)
# Use coboundaries?
use_coboundaries = args.use_coboundaries.lower() == 'true'
# Readout dimensions
readout_dims = tuple(sorted(args.readout_dims))
# Instantiate model
# NB: here we assume to have the same number of features per dim
if args.model == 'cin':
model = CIN0(dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'sparse_cin':
model = SparseCIN(dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries in up-msg
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'cin++':
model = CINpp(dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries in up-msg
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'ring_sparse_cin':
model = RingSparseCIN(
dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
max_dim=dataset.max_dim, # max_dim
nonlinearity=args.nonlinearity, # nonlinearity
use_coboundaries=use_coboundaries, # whether to use coboundaries in up-msg
graph_norm=args.graph_norm, # normalization layer
).to(device)
elif args.model == 'gin':
model = GIN0(num_features, # num_input_features
args.num_layers, # num_layers
args.emb_dim, # hidden
num_classes, # num_classes
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'gin_ring':
model = RingGIN(num_features, # num_input_features
args.num_layers, # num_layers
args.emb_dim, # hidden
num_classes, # num_classes
nonlinearity=args.nonlinearity, # nonlinearity
graph_norm=args.graph_norm, # normalization layer
).to(device)
elif args.model == 'gin_jk':
model = GINWithJK(num_features, # num_input_features
args.num_layers, # num_layers
args.emb_dim, # hidden
num_classes, # num_classes
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'mp_agnostic':
model = MessagePassingAgnostic(
dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'dummy':
model = Dummy(dataset.num_features_in_dim(0),
dataset.num_classes,
args.num_layers,
max_dim=dataset.max_dim,
readout=args.readout,
).to(device)
elif args.model == 'edge_orient':
model = EdgeOrient(1,
dataset.num_classes,
args.num_layers,
args.emb_dim, # hidden
readout=args.readout,
nonlinearity=args.nonlinearity, # nonlinearity
dropout_rate=args.drop_rate, # dropout rate
fully_invar=args.fully_orient_invar
).to(device)
elif args.model == 'edge_mpnn':
model = EdgeMPNN(1,
dataset.num_classes,
args.num_layers,
args.emb_dim, # hidden
readout=args.readout,
nonlinearity=args.nonlinearity, # nonlinearity
dropout_rate=args.drop_rate, # dropout rate
fully_invar=args.fully_orient_invar,
).to(device)
elif args.model == 'embed_sparse_cin':
model = EmbedSparseCIN(dataset.num_node_type, # The number of atomic types
dataset.num_edge_type, # The number of bond types
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries,
embed_edge=args.use_edge_features,
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'embed_cin++':
model = EmbedCINpp(atom_types=dataset.num_node_type, # The number of atomic types
bond_types=dataset.num_edge_type, # The number of bond types
out_size=dataset.num_classes, # num_classes
num_layers=args.num_layers, # num_layers
hidden=args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries,
embed_edge=args.use_edge_features,
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'embed_sparse_cin_no_rings':
model = EmbedSparseCINNoRings(dataset.num_node_type, # The number of atomic types
dataset.num_edge_type, # The number of bond types
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries,
embed_edge=args.use_edge_features,
graph_norm=args.graph_norm, # normalization layer
).to(device)
elif args.model == 'embed_gin':
model = EmbedGIN(dataset.num_node_type, # The number of atomic types
dataset.num_edge_type, # The number of bond types
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
apply_dropout_before=args.drop_position, # where to apply dropout
embed_edge=args.use_edge_features,
).to(device)
# TODO: handle this as above
elif args.model == 'ogb_embed_sparse_cin':
model = OGBEmbedSparseCIN(dataset.num_tasks, # out_size
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout_rate
indropout_rate=args.indrop_rate, # in-dropout_rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump_mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries
embed_edge=args.use_edge_features, # whether to use edge feats
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'ogb_embed_cin++':
model = OGBEmbedCINpp(dataset.num_tasks, # out_size
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout_rate
indropout_rate=args.indrop_rate, # in-dropout_rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump_mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries
embed_edge=args.use_edge_features, # whether to use edge feats
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
else:
raise ValueError('Invalid model type {}.'.format(args.model))
print("============= Model Parameters =================")
trainable_params = 0
total_params = 0
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.size())
trainable_params += param.numel()
total_params += param.numel()
print("============= Params stats ==================")
print(f"Trainable params: {trainable_params}")
print(f"Total params : {total_params}")
# instantiate optimiser
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# instantiate learning rate decay
if args.lr_scheduler == 'ReduceLROnPlateau':
mode = 'min' if args.minimize else 'max'
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=mode,
factor=args.lr_scheduler_decay_rate,
patience=args.lr_scheduler_patience,
verbose=True)
elif args.lr_scheduler == 'StepLR':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_scheduler_decay_steps,
gamma=args.lr_scheduler_decay_rate)
elif args.lr_scheduler == 'None':
scheduler = None
else:
raise NotImplementedError(f'Scheduler {args.lr_scheduler} is not currently supported.')
# (!) start training/evaluation
best_val_epoch = 0
valid_curve = []
test_curve = []
train_curve = []
train_loss_curve = []
params = []
if not args.untrained:
for epoch in range(1, args.epochs + 1):
# perform one epoch
print("=====Epoch {}".format(epoch))
print('Training...')
epoch_train_curve = train(model, device, train_loader, optimizer, args.task_type)
train_loss_curve += epoch_train_curve
epoch_train_loss = float(np.mean(epoch_train_curve))
# evaluate model
print('Evaluating...')
if epoch == 1 or epoch % args.train_eval_period == 0:
train_perf, _ = eval(model, device, train_loader, evaluator, args.task_type)
train_curve.append(train_perf)
valid_perf, epoch_val_loss = eval(model, device,
valid_loader, evaluator, args.task_type)#, dataset[split_idx["valid"]])
valid_curve.append(valid_perf)
if test_loader is not None:
test_perf, epoch_test_loss = eval(model, device, test_loader, evaluator,
args.task_type)
else:
test_perf = np.nan
epoch_test_loss = np.nan
test_curve.append(test_perf)
print(f'Train: {train_perf:.3f} | Validation: {valid_perf:.3f} | Test: {test_perf:.3f}'
f' | Train Loss {epoch_train_loss:.3f} | Val Loss {epoch_val_loss:.3f}'
f' | Test Loss {epoch_test_loss:.3f}')
# decay learning rate
if scheduler is not None:
if args.lr_scheduler == 'ReduceLROnPlateau':
scheduler.step(valid_perf)
# We use a strict inequality here like in the benchmarking GNNs paper code
# https://github.com/graphdeeplearning/benchmarking-gnns/blob/master/main_molecules_graph_regression.py#L217
if args.early_stop and optimizer.param_groups[0]['lr'] < args.lr_scheduler_min:
print("\n!! The minimum learning rate has been reached.")
break
else:
scheduler.step()
i = 0
new_params = []
if epoch % args.train_eval_period == 0:
print("====== Slowly changing params ======= ")
for name, param in model.named_parameters():
# print(f"Param {name}: {param.data.view(-1)[0]}")
# new_params.append(param.data.detach().clone().view(-1)[0])
new_params.append(param.data.detach().mean().item())
if len(params) > 0 and epoch % args.train_eval_period == 0:
if abs(params[i] - new_params[i]) < 1e-6:
print(f"Param {name}: {params[i] - new_params[i]}")
i += 1
params = copy.copy(new_params)
if not args.minimize:
best_val_epoch = np.argmax(np.array(valid_curve))
else:
best_val_epoch = np.argmin(np.array(valid_curve))
else:
train_loss_curve.append(np.nan)
train_curve.append(np.nan)
valid_curve.append(np.nan)
test_curve.append(np.nan)
print('Final Evaluation...')
final_train_perf = np.nan
final_val_perf = np.nan
final_test_perf = np.nan
if not args.dataset.startswith('sr'):
final_train_perf, _ = eval(model, device, train_loader, evaluator, args.task_type)
final_val_perf, _ = eval(model, device, valid_loader, evaluator, args.task_type)
if test_loader is not None:
final_test_perf, _ = eval(model, device, test_loader, evaluator, args.task_type)
# save results
curves = {
'train_loss': train_loss_curve,
'train': train_curve,
'val': valid_curve,
'test': test_curve,
'last_val': final_val_perf,
'last_test': final_test_perf,
'last_train': final_train_perf,
'best': best_val_epoch}
msg = (
f'========== Result ============\n'
f'Dataset: {args.dataset}\n'
f'------------ Best epoch -----------\n'
f'Train: {train_curve[best_val_epoch]}\n'
f'Validation: {valid_curve[best_val_epoch]}\n'
f'Test: {test_curve[best_val_epoch]}\n'
f'Best epoch: {best_val_epoch}\n'
'------------ Last epoch -----------\n'
f'Train: {final_train_perf}\n'
f'Validation: {final_val_perf}\n'
f'Test: {final_test_perf}\n'
'-------------------------------\n\n')
print(msg)
msg += str(args)
with open(filename, 'w') as handle:
handle.write(msg)
if args.dump_curves:
with open(os.path.join(result_folder, 'curves.pkl'), 'wb') as handle:
pickle.dump(curves, handle)
return curves
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
validate_args(args)
main(args)
| 26,286 | 52.977413 | 128 | py |
cwn | cwn-main/exp/train_utils.py | import os
import torch
import numpy as np
import logging
from tqdm import tqdm
from sklearn import metrics as met
from data.complex import ComplexBatch
from ogb.graphproppred import Evaluator as OGBEvaluator
cls_criterion = torch.nn.CrossEntropyLoss()
bicls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.L1Loss()
msereg_criterion = torch.nn.MSELoss()
def train(model, device, loader, optimizer, task_type='classification', ignore_unlabeled=False):
"""
Performs one training epoch, i.e. one optimization pass over the batches of a data loader.
"""
if task_type == 'classification':
loss_fn = cls_criterion
elif task_type == 'bin_classification':
loss_fn = bicls_criterion
elif task_type == 'regression':
loss_fn = reg_criterion
elif task_type == 'mse_regression':
loss_fn = msereg_criterion
else:
raise NotImplementedError('Training on task type {} not yet supported.'.format(task_type))
curve = list()
model.train()
num_skips = 0
for step, batch in enumerate(tqdm(loader, desc="Training iteration")):
batch = batch.to(device)
if isinstance(batch, ComplexBatch):
num_samples = batch.cochains[0].x.size(0)
for dim in range(1, batch.dimension+1):
num_samples = min(num_samples, batch.cochains[dim].num_cells)
else:
# This is graph.
num_samples = batch.x.size(0)
if num_samples <= 1:
# Skip batch if it only comprises one sample (could cause problems with BN)
num_skips += 1
if float(num_skips) / len(loader) >= 0.25:
logging.warning("Warning! 25% of the batches were skipped this epoch")
continue
# (DEBUG)
if num_samples < 10:
logging.warning("Warning! BatchNorm applied on a batch "
"with only {} samples".format(num_samples))
optimizer.zero_grad()
pred = model(batch)
if isinstance(loss_fn, torch.nn.CrossEntropyLoss):
targets = batch.y.view(-1,)
else:
targets = batch.y.to(torch.float32).view(pred.shape)
# In some ogbg-mol* datasets we may have null targets.
# When the cross entropy loss is used and targets are of shape (N,)
# the maks is broadcasted automatically to the shape of the predictions.
mask = ~torch.isnan(targets)
loss = loss_fn(pred[mask], targets[mask])
loss.backward()
optimizer.step()
curve.append(loss.detach().cpu().item())
return curve
def infer(model, device, loader):
"""
Runs inference over all the batches of a data loader.
"""
model.eval()
y_pred = list()
for step, batch in enumerate(tqdm(loader, desc="Inference iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)
y_pred.append(pred.detach().cpu())
y_pred = torch.cat(y_pred, dim=0).numpy()
return y_pred
def eval(model, device, loader, evaluator, task_type):
"""
Evaluates a model over all the batches of a data loader.
"""
if task_type == 'classification':
loss_fn = cls_criterion
elif task_type == 'bin_classification':
loss_fn = bicls_criterion
elif task_type == 'regression':
loss_fn = reg_criterion
elif task_type == 'mse_regression':
loss_fn = msereg_criterion
else:
loss_fn = None
model.eval()
y_true = []
y_pred = []
losses = []
for step, batch in enumerate(tqdm(loader, desc="Eval iteration")):
# Cast features to double precision if that is used
if torch.get_default_dtype() == torch.float64:
for dim in range(batch.dimension + 1):
batch.cochains[dim].x = batch.cochains[dim].x.double()
assert batch.cochains[dim].x.dtype == torch.float64, batch.cochains[dim].x.dtype
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)
if task_type != 'isomorphism':
if isinstance(loss_fn, torch.nn.CrossEntropyLoss):
targets = batch.y.view(-1,)
y_true.append(batch.y.detach().cpu())
else:
targets = batch.y.to(torch.float32).view(pred.shape)
y_true.append(batch.y.view(pred.shape).detach().cpu())
mask = ~torch.isnan(targets) # In some ogbg-mol* datasets we may have null targets.
loss = loss_fn(pred[mask], targets[mask])
losses.append(loss.detach().cpu().item())
else:
assert loss_fn is None
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim=0).numpy() if len(y_true) > 0 else None
y_pred = torch.cat(y_pred, dim=0).numpy()
input_dict = {'y_pred': y_pred, 'y_true': y_true}
mean_loss = float(np.mean(losses)) if len(losses) > 0 else np.nan
return evaluator.eval(input_dict), mean_loss
class Evaluator(object):
def __init__(self, metric, **kwargs):
if metric == 'isomorphism':
self.eval_fn = self._isomorphism
self.eps = kwargs.get('eps', 0.01)
self.p_norm = kwargs.get('p', 2)
elif metric == 'accuracy':
self.eval_fn = self._accuracy
elif metric == 'ap':
self.eval_fn = self._ap
elif metric == 'mae':
self.eval_fn = self._mae
elif metric.startswith('ogbg-mol'):
self._ogb_evaluator = OGBEvaluator(metric)
self._key = self._ogb_evaluator.eval_metric
self.eval_fn = self._ogb
else:
raise NotImplementedError('Metric {} is not yet supported.'.format(metric))
def eval(self, input_dict):
return self.eval_fn(input_dict)
def _isomorphism(self, input_dict):
# NB: here we return the failure percentage... the smaller the better!
preds = input_dict['y_pred']
assert preds is not None
assert preds.dtype == np.float64
preds = torch.tensor(preds, dtype=torch.float64)
mm = torch.pdist(preds, p=self.p_norm)
wrong = (mm < self.eps).sum().item()
metric = wrong / mm.shape[0]
return metric
def _accuracy(self, input_dict, **kwargs):
y_true = input_dict['y_true']
y_pred = np.argmax(input_dict['y_pred'], axis=1)
assert y_true is not None
assert y_pred is not None
metric = met.accuracy_score(y_true, y_pred)
return metric
def _ap(self, input_dict, **kwargs):
y_true = input_dict['y_true']
y_pred = input_dict['y_pred']
assert y_true is not None
assert y_pred is not None
metric = met.average_precision_score(y_true, y_pred)
return metric
def _mae(self, input_dict, **kwargs):
y_true = input_dict['y_true']
y_pred = input_dict['y_pred']
assert y_true is not None
assert y_pred is not None
metric = met.mean_absolute_error(y_true, y_pred)
return metric
def _ogb(self, input_dict, **kwargs):
assert 'y_true' in input_dict
assert input_dict['y_true'] is not None
assert 'y_pred' in input_dict
assert input_dict['y_pred'] is not None
return self._ogb_evaluator.eval(input_dict)[self._key]
| 7,530 | 34.523585 | 100 | py |
cwn | cwn-main/exp/evaluate_sr_cwn_emb_mag.py | import os
import sys
import torch
import numpy as np
import random
from definitions import ROOT_DIR
from exp.prepare_sr_tests import prepare
from mp.models import MessagePassingAgnostic, SparseCIN
from data.data_loading import DataLoader, load_dataset
__families__ = [
'sr16622',
'sr251256',
'sr261034',
'sr281264',
'sr291467',
'sr351668',
'sr351899',
'sr361446',
'sr401224'
]
def compute_embeddings(family, baseline, seed):
# Set the seed for everything
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# Perform the check in double precision
torch.set_default_dtype(torch.float64)
# Please set the parameters below to the ones used in SR experiments.
hidden = 16
num_layers = 3
max_ring_size = 6
use_coboundaries = True
nonlinearity = 'elu'
graph_norm = 'id'
readout = 'sum'
final_readout = 'sum'
readout_dims = (0,1,2)
init = 'sum'
jobs = 64
device = torch.device("cuda:" + str(0)) if torch.cuda.is_available() else torch.device("cpu")
# Build and dump dataset if needed
prepare(family, jobs, max_ring_size, False, init, None)
# Load reference dataset
complexes = load_dataset(family, max_dim=2, max_ring_size=max_ring_size, init_method=init)
data_loader = DataLoader(complexes, batch_size=8, shuffle=False, num_workers=16, max_dim=2)
# Instantiate model
if not baseline:
model = SparseCIN(num_input_features=1, num_classes=complexes.num_classes, num_layers=num_layers, hidden=hidden,
use_coboundaries=use_coboundaries, nonlinearity=nonlinearity, graph_norm=graph_norm,
readout=readout, final_readout=final_readout, readout_dims=readout_dims)
else:
hidden = 256
model = MessagePassingAgnostic(num_input_features=1, num_classes=complexes.num_classes, hidden=hidden,
nonlinearity=nonlinearity, readout=readout)
model = model.to(device)
model.eval()
# Compute complex embeddings
with torch.no_grad():
embeddings = list()
for batch in data_loader:
batch.nodes.x = batch.nodes.x.double()
batch.edges.x = batch.edges.x.double()
batch.two_cells.x = batch.two_cells.x.double()
out = model.forward(batch.to(device))
embeddings.append(out)
embeddings = torch.cat(embeddings, 0) # n x d
assert embeddings.size(1) == complexes.num_classes
return embeddings
if __name__ == "__main__":
# Standard args
passed_args = sys.argv[1:]
baseline = (passed_args[0].lower() == 'true')
max_ring_size = int(passed_args[1])
assert max_ring_size > 3
# Execute
msg = f'Model: {"CIN" if not baseline else "MLP-sum"}({max_ring_size})'
print(msg)
for family in __families__:
text = f'\n======================== {family}'
msg += text+'\n'
print(text)
for seed in range(5):
embeddings = compute_embeddings(family, baseline, seed)
text = f'seed {seed}: {torch.max(torch.abs(embeddings)):.2f}'
msg += text+'\n'
print(text)
path = os.path.join(ROOT_DIR, 'exp', 'results')
if baseline:
path = os.path.join(path, f'sr-base-{max_ring_size}.txt')
else:
path = os.path.join(path, f'sr-{max_ring_size}.txt')
with open(path, 'w') as handle:
handle.write(msg)
| 3,564 | 31.409091 | 121 | py |
cwn | cwn-main/exp/test_sr.py | import torch
import numpy as np
import random
import pytest
from data.data_loading import DataLoader, load_dataset
from exp.prepare_sr_tests import prepare
from mp.models import MessagePassingAgnostic, SparseCIN
def _get_cwn_sr_embeddings(family, seed, baseline=False):
# Set the seed for everything
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# Please set the parameters below to the ones used in SR experiments.
# If so, if tests pass then the experiments are deemed sound.
hidden = 16
num_layers = 3
max_ring_size = 6
use_coboundaries = True
nonlinearity = 'elu'
graph_norm = 'id'
readout = 'sum'
final_readout = 'sum'
readout_dims = (0,1,2)
init = 'sum'
jobs = 64
prepare_seed = 43
device = torch.device("cuda:" + str(0)) if torch.cuda.is_available() else torch.device("cpu")
# Build and dump dataset if needed
prepare(family, jobs, max_ring_size, True, init, prepare_seed)
# Load reference dataset
complexes = load_dataset(family, max_dim=2, max_ring_size=max_ring_size, init_method=init)
permuted_complexes = load_dataset(f'{family}p{prepare_seed}', max_dim=2, max_ring_size=max_ring_size, init_method=init)
# Instantiate model
if not baseline:
model = SparseCIN(num_input_features=1, num_classes=complexes.num_classes, num_layers=num_layers, hidden=hidden,
use_coboundaries=use_coboundaries, nonlinearity=nonlinearity, graph_norm=graph_norm,
readout=readout, final_readout=final_readout, readout_dims=readout_dims)
else:
hidden = 256
model = MessagePassingAgnostic(num_input_features=1, num_classes=complexes.num_classes, hidden=hidden,
nonlinearity=nonlinearity, readout=readout)
model = model.to(device)
model.eval()
# Compute reference complex embeddings
data_loader = DataLoader(complexes, batch_size=8, shuffle=False, num_workers=16, max_dim=2)
data_loader_perm = DataLoader(permuted_complexes, batch_size=8, shuffle=False, num_workers=16, max_dim=2)
with torch.no_grad():
embeddings = list()
perm_embeddings = list()
for batch in data_loader:
batch.nodes.x = batch.nodes.x.double()
batch.edges.x = batch.edges.x.double()
batch.two_cells.x = batch.two_cells.x.double()
out = model.forward(batch.to(device))
embeddings.append(out)
for batch in data_loader_perm:
batch.nodes.x = batch.nodes.x.double()
batch.edges.x = batch.edges.x.double()
batch.two_cells.x = batch.two_cells.x.double()
out = model.forward(batch.to(device))
perm_embeddings.append(out)
embeddings = torch.cat(embeddings, 0) # n x d
perm_embeddings = torch.cat(perm_embeddings, 0) # n x d
assert embeddings.size(0) == perm_embeddings.size(0)
assert embeddings.size(1) == perm_embeddings.size(1) == complexes.num_classes
return embeddings, perm_embeddings
def _validate_self_iso_on_sr(embeddings, perm_embeddings):
eps = 0.01
for i in range(embeddings.size(0)):
preds = torch.stack((embeddings[i], perm_embeddings[i]), 0)
assert preds.size(0) == 2
assert preds.size(1) == embeddings.size(1)
dist = torch.pdist(preds, p=2).item()
assert dist <= eps
def _validate_magnitude_embeddings(embeddings):
# At (5)e8, the fp64 granularity is still (2**29 - 2**28) / (2**52) ≈ 0.000000059604645
# The fact that we work in such a (safe) range can also be verified by running the following:
# a = torch.DoubleTensor([2.5e8])
# d = torch.DoubleTensor([5.0e8])
# b = torch.nextafter(a, d)
# print(b - a)
# >>> tensor([2.9802e-08], dtype=torch.float64)
thresh = torch.DoubleTensor([5.0*1e8])
apex = torch.max(torch.abs(embeddings)).cpu()
print(apex)
assert apex.dtype == torch.float64
assert torch.all(apex < thresh)
@pytest.mark.slow
@pytest.mark.parametrize("family", ['sr16622', 'sr251256', 'sr261034', 'sr281264', 'sr291467', 'sr351668', 'sr351899', 'sr361446', 'sr401224'])
def test_sparse_cin0_self_isomorphism(family):
# Perform the check in double precision
torch.set_default_dtype(torch.float64)
for seed in range(5):
embeddings, perm_embeddings = _get_cwn_sr_embeddings(family, seed)
_validate_magnitude_embeddings(embeddings)
_validate_magnitude_embeddings(perm_embeddings)
_validate_self_iso_on_sr(embeddings, perm_embeddings)
# Revert back to float32 for other tests
torch.set_default_dtype(torch.float32)
@pytest.mark.slow
@pytest.mark.parametrize("family", ['sr16622', 'sr251256', 'sr261034', 'sr281264', 'sr291467', 'sr351668', 'sr351899', 'sr361446', 'sr401224'])
def test_cwn_baseline_self_isomorphism(family):
# Perform the check in double precision
torch.set_default_dtype(torch.float64)
for seed in range(5):
embeddings, perm_embeddings = _get_cwn_sr_embeddings(family, seed, baseline=True)
_validate_magnitude_embeddings(embeddings)
_validate_magnitude_embeddings(perm_embeddings)
_validate_self_iso_on_sr(embeddings, perm_embeddings)
# Revert back to float32 for other tests
torch.set_default_dtype(torch.float32)
| 5,473 | 41.434109 | 143 | py |
TCDF | TCDF-master/runTCDF.py | import TCDF
import argparse
import torch
import pandas as pd
import numpy as np
import networkx as nx
import pylab
import copy
import matplotlib.pyplot as plt
import os
import sys
# os.chdir(os.path.dirname(sys.argv[0])) #uncomment this line to run in VSCode
def check_positive(value):
"""Checks if argument is positive integer (larger than zero)."""
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
def check_zero_or_positive(value):
"""Checks if argument is positive integer (larger than or equal to zero)."""
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
class StoreDictKeyPair(argparse.Action):
"""Creates dictionary containing datasets as keys and ground truth files as values."""
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
k,v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def getextendeddelays(gtfile, columns):
"""Collects the total delay of indirect causal relationships."""
gtdata = pd.read_csv(gtfile, header=None)
readgt=dict()
effects = gtdata[1]
causes = gtdata[0]
delays = gtdata[2]
gtnrrelations = 0
pairdelays = dict()
for k in range(len(columns)):
readgt[k]=[]
for i in range(len(effects)):
key=effects[i]
value=causes[i]
readgt[key].append(value)
pairdelays[(key, value)]=delays[i]
gtnrrelations+=1
g = nx.DiGraph()
g.add_nodes_from(readgt.keys())
for e in readgt:
cs = readgt[e]
for c in cs:
g.add_edge(c, e)
extendedreadgt = copy.deepcopy(readgt)
for c1 in range(len(columns)):
for c2 in range(len(columns)):
paths = list(nx.all_simple_paths(g, c1, c2, cutoff=2)) #indirect path max length 3, no cycles
if len(paths)>0:
for path in paths:
for p in path[:-1]:
if p not in extendedreadgt[path[-1]]:
extendedreadgt[path[-1]].append(p)
extendedgtdelays = dict()
for effect in extendedreadgt:
causes = extendedreadgt[effect]
for cause in causes:
if (effect, cause) in pairdelays:
delay = pairdelays[(effect, cause)]
extendedgtdelays[(effect, cause)]=[delay]
else:
#find extended delay
paths = list(nx.all_simple_paths(g, cause, effect, cutoff=2)) #indirect path max length 3, no cycles
extendedgtdelays[(effect, cause)]=[]
for p in paths:
delay=0
for i in range(len(p)-1):
delay+=pairdelays[(p[i+1], p[i])]
extendedgtdelays[(effect, cause)].append(delay)
return extendedgtdelays, readgt, extendedreadgt
def evaluate(gtfile, validatedcauses, columns):
"""Evaluates the results of TCDF by comparing it to the ground truth graph, and calculating precision, recall and F1-score. F1'-score, precision' and recall' include indirect causal relationships."""
extendedgtdelays, readgt, extendedreadgt = getextendeddelays(gtfile, columns)
FP=0
FPdirect=0
TPdirect=0
TP=0
FN=0
FPs = []
FPsdirect = []
TPsdirect = []
TPs = []
FNs = []
for key in readgt:
for v in validatedcauses[key]:
if v not in extendedreadgt[key]:
FP+=1
FPs.append((key,v))
else:
TP+=1
TPs.append((key,v))
if v not in readgt[key]:
FPdirect+=1
FPsdirect.append((key,v))
else:
TPdirect+=1
TPsdirect.append((key,v))
for v in readgt[key]:
if v not in validatedcauses[key]:
FN+=1
FNs.append((key, v))
print("Total False Positives': ", FP)
print("Total True Positives': ", TP)
print("Total False Negatives: ", FN)
print("Total Direct False Positives: ", FPdirect)
print("Total Direct True Positives: ", TPdirect)
print("TPs': ", TPs)
print("FPs': ", FPs)
print("TPs direct: ", TPsdirect)
print("FPs direct: ", FPsdirect)
print("FNs: ", FNs)
precision = recall = 0.
if float(TP+FP)>0:
precision = TP / float(TP+FP)
print("Precision': ", precision)
if float(TP + FN)>0:
recall = TP / float(TP + FN)
print("Recall': ", recall)
if (precision + recall) > 0:
F1 = 2 * (precision * recall) / (precision + recall)
else:
F1 = 0.
print("F1' score: ", F1,"(includes direct and indirect causal relationships)")
precision = recall = 0.
if float(TPdirect+FPdirect)>0:
precision = TPdirect / float(TPdirect+FPdirect)
print("Precision: ", precision)
if float(TPdirect + FN)>0:
recall = TPdirect / float(TPdirect + FN)
print("Recall: ", recall)
if (precision + recall) > 0:
F1direct = 2 * (precision * recall) / (precision + recall)
else:
F1direct = 0.
print("F1 score: ", F1direct,"(includes only direct causal relationships)")
return FP, TP, FPdirect, TPdirect, FN, FPs, FPsdirect, TPs, TPsdirect, FNs, F1, F1direct
def evaluatedelay(extendedgtdelays, alldelays, TPs, receptivefield):
"""Evaluates the delay discovery of TCDF by comparing the discovered time delays with the ground truth."""
zeros = 0
total = 0.
for i in range(len(TPs)):
tp=TPs[i]
discovereddelay = alldelays[tp]
gtdelays = extendedgtdelays[tp]
for d in gtdelays:
if d <= receptivefield:
total+=1.
error = d - discovereddelay
if error == 0:
zeros+=1
else:
next
if zeros==0:
return 0.
else:
return zeros/float(total)
def runTCDF(datafile):
"""Loops through all variables in a dataset and return the discovered causes, time delays, losses, attention scores and variable names."""
df_data = pd.read_csv(datafile)
allcauses = dict()
alldelays = dict()
allreallosses=dict()
allscores=dict()
columns = list(df_data)
for c in columns:
idx = df_data.columns.get_loc(c)
causes, causeswithdelay, realloss, scores = TCDF.findcauses(c, cuda=cuda, epochs=nrepochs,
kernel_size=kernel_size, layers=levels, log_interval=loginterval,
lr=learningrate, optimizername=optimizername,
seed=seed, dilation_c=dilation_c, significance=significance, file=datafile)
allscores[idx]=scores
allcauses[idx]=causes
alldelays.update(causeswithdelay)
allreallosses[idx]=realloss
return allcauses, alldelays, allreallosses, allscores, columns
def plotgraph(stringdatafile,alldelays,columns):
"""Plots a temporal causal graph showing all discovered causal relationships annotated with the time delay between cause and effect."""
G = nx.DiGraph()
for c in columns:
G.add_node(c)
for pair in alldelays:
p1,p2 = pair
nodepair = (columns[p2], columns[p1])
G.add_edges_from([nodepair],weight=alldelays[pair])
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
pos=nx.circular_layout(G)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
nx.draw(G,pos, node_color = 'white', edge_color='black',node_size=1000,with_labels = True)
ax = plt.gca()
ax.collections[0].set_edgecolor("#000000")
pylab.show()
def main(datafiles, evaluation):
if evaluation:
totalF1direct = [] #contains F1-scores of all datasets
totalF1 = [] #contains F1'-scores of all datasets
receptivefield=1
for l in range(0, levels):
receptivefield+=(kernel_size-1) * dilation_c**(l)
for datafile in datafiles.keys():
stringdatafile = str(datafile)
if '/' in stringdatafile:
stringdatafile = str(datafile).rsplit('/', 1)[1]
print("\n Dataset: ", stringdatafile)
# run TCDF
allcauses, alldelays, allreallosses, allscores, columns = runTCDF(datafile) #results of TCDF containing indices of causes and effects
print("\n===================Results for", stringdatafile,"==================================")
for pair in alldelays:
print(columns[pair[1]], "causes", columns[pair[0]],"with a delay of",alldelays[pair],"time steps.")
if evaluation:
# evaluate TCDF by comparing discovered causes with ground truth
print("\n===================Evaluation for", stringdatafile,"===============================")
FP, TP, FPdirect, TPdirect, FN, FPs, FPsdirect, TPs, TPsdirect, FNs, F1, F1direct = evaluate(datafiles[datafile], allcauses, columns)
totalF1.append(F1)
totalF1direct.append(F1direct)
# evaluate delay discovery
extendeddelays, readgt, extendedreadgt = getextendeddelays(datafiles[datafile], columns)
percentagecorrect = evaluatedelay(extendeddelays, alldelays, TPs, receptivefield)*100
print("Percentage of delays that are correctly discovered: ", percentagecorrect,"%")
print("==================================================================================")
if args.plot:
plotgraph(stringdatafile, alldelays, columns)
# In case of multiple datasets, calculate average F1-score over all datasets and standard deviation
if len(datafiles.keys())>1 and evaluation:
print("\nOverall Evaluation: \n")
print("F1' scores: ")
for f in totalF1:
print(f)
print("Average F1': ", np.mean(totalF1))
print("Standard Deviation F1': ", np.std(totalF1),"\n")
print("F1 scores: ")
for f in totalF1direct:
print(f)
print("Average F1: ", np.mean(totalF1direct))
print("Standard Deviation F1: ", np.std(totalF1direct))
parser = argparse.ArgumentParser(description='TCDF: Temporal Causal Discovery Framework')
parser.add_argument('--cuda', action="store_true", default=False, help='Use CUDA (GPU) (default: False)')
parser.add_argument('--epochs', type=check_positive, default=1000, help='Number of epochs (default: 1000)')
parser.add_argument('--kernel_size', type=check_positive, default=4, help='Size of kernel, i.e. window size. Maximum delay to be found is kernel size - 1. Recommended to be equal to dilation coeffient (default: 4)')
parser.add_argument('--hidden_layers', type=check_zero_or_positive, default=0, help='Number of hidden layers in the depthwise convolution (default: 0)')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate (default: 0.01)')
parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam', 'RMSprop'], help='Optimizer to use (default: Adam)')
parser.add_argument('--log_interval', type=check_positive, default=500, help='Epoch interval to report loss (default: 500)')
parser.add_argument('--seed', type=check_positive, default=1111, help='Random seed (default: 1111)')
parser.add_argument('--dilation_coefficient', type=check_positive, default=4, help='Dilation coefficient, recommended to be equal to kernel size (default: 4)')
parser.add_argument('--significance', type=float, default=0.8, help="Significance number stating when an increase in loss is significant enough to label a potential cause as true (validated) cause. See paper for more details (default: 0.8)")
parser.add_argument('--plot', action="store_true", default=False, help='Show causal graph (default: False)')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--ground_truth',action=StoreDictKeyPair, help='Provide dataset(s) and the ground truth(s) to evaluate the results of TCDF. Argument format: DataFile1=GroundtruthFile1,Key2=Value2,... with a key for each dataset containing multivariate time series (required file format: csv, a column with header for each time series) and a value for the corresponding ground truth (required file format: csv, no header, index of cause in first column, index of effect in second column, time delay between cause and effect in third column)')
group.add_argument('--data', nargs='+', help='(Path to) one or more datasets to analyse by TCDF containing multiple time series. Required file format: csv with a column (incl. header) for each time series')
args = parser.parse_args()
print("Arguments:", args)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, you should probably run with --cuda to speed up training.")
if args.kernel_size != args.dilation_coefficient:
print("WARNING: The dilation coefficient is not equal to the kernel size. Multiple paths can lead to the same delays. Set kernel_size equal to dilation_c to have exaxtly one path for each delay.")
kernel_size = args.kernel_size
levels = args.hidden_layers+1
nrepochs = args.epochs
learningrate = args.learning_rate
optimizername = args.optimizer
dilation_c = args.dilation_coefficient
loginterval = args.log_interval
seed=args.seed
cuda=args.cuda
significance=args.significance
if args.ground_truth is not None:
datafiles = args.ground_truth
main(datafiles, evaluation=True)
else:
datafiles = dict()
for dataset in args.data:
datafiles[dataset]=""
main(datafiles, evaluation=False)
| 13,848 | 39.612903 | 544 | py |
TCDF | TCDF-master/TCDF.py | import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from model import ADDSTCN
import random
import pandas as pd
import numpy as np
import heapq
import copy
import os
import sys
def preparedata(file, target):
"""Reads data from csv file and transforms it to two PyTorch tensors: dataset x and target time series y that has to be predicted."""
df_data = pd.read_csv(file)
df_y = df_data.copy(deep=True)[[target]]
df_x = df_data.copy(deep=True)
df_yshift = df_y.copy(deep=True).shift(periods=1, axis=0)
df_yshift[target]=df_yshift[target].fillna(0.)
df_x[target] = df_yshift
data_x = df_x.values.astype('float32').transpose()
data_y = df_y.values.astype('float32').transpose()
data_x = torch.from_numpy(data_x)
data_y = torch.from_numpy(data_y)
x, y = Variable(data_x), Variable(data_y)
return x, y
def train(epoch, traindata, traintarget, modelname, optimizer,log_interval,epochs):
"""Trains model by performing one epoch and returns attention scores and loss."""
modelname.train()
x, y = traindata[0:1], traintarget[0:1]
optimizer.zero_grad()
epochpercentage = (epoch/float(epochs))*100
output = modelname(x)
attentionscores = modelname.fs_attention
loss = F.mse_loss(output, y)
loss.backward()
optimizer.step()
if epoch % log_interval ==0 or epoch % epochs == 0 or epoch==1:
print('Epoch: {:2d} [{:.0f}%] \tLoss: {:.6f}'.format(epoch, epochpercentage, loss))
return attentionscores.data, loss
def findcauses(target, cuda, epochs, kernel_size, layers,
log_interval, lr, optimizername, seed, dilation_c, significance, file):
"""Discovers potential causes of one target time series, validates these potential causes with PIVM and discovers the corresponding time delays"""
print("\n", "Analysis started for target: ", target)
torch.manual_seed(seed)
X_train, Y_train = preparedata(file, target)
X_train = X_train.unsqueeze(0).contiguous()
Y_train = Y_train.unsqueeze(2).contiguous()
input_channels = X_train.size()[1]
targetidx = pd.read_csv(file).columns.get_loc(target)
model = ADDSTCN(targetidx, input_channels, layers, kernel_size=kernel_size, cuda=cuda, dilation_c=dilation_c)
if cuda:
model.cuda()
X_train = X_train.cuda()
Y_train = Y_train.cuda()
optimizer = getattr(optim, optimizername)(model.parameters(), lr=lr)
scores, firstloss = train(1, X_train, Y_train, model, optimizer,log_interval,epochs)
firstloss = firstloss.cpu().data.item()
for ep in range(2, epochs+1):
scores, realloss = train(ep, X_train, Y_train, model, optimizer,log_interval,epochs)
realloss = realloss.cpu().data.item()
s = sorted(scores.view(-1).cpu().detach().numpy(), reverse=True)
indices = np.argsort(-1 *scores.view(-1).cpu().detach().numpy())
#attention interpretation to find tau: the threshold that distinguishes potential causes from non-causal time series
if len(s)<=5:
potentials = []
for i in indices:
if scores[i]>1.:
potentials.append(i)
else:
potentials = []
gaps = []
for i in range(len(s)-1):
if s[i]<1.: #tau should be greater or equal to 1, so only consider scores >= 1
break
gap = s[i]-s[i+1]
gaps.append(gap)
sortgaps = sorted(gaps, reverse=True)
for i in range(0, len(gaps)):
largestgap = sortgaps[i]
index = gaps.index(largestgap)
ind = -1
if index<((len(s)-1)/2): #gap should be in first half
if index>0:
ind=index #gap should have index > 0, except if second score <1
break
if ind<0:
ind = 0
potentials = indices[:ind+1].tolist()
print("Potential causes: ", potentials)
validated = copy.deepcopy(potentials)
#Apply PIVM (permutes the values) to check if potential cause is true cause
for idx in potentials:
random.seed(seed)
X_test2 = X_train.clone().cpu().numpy()
random.shuffle(X_test2[:,idx,:][0])
shuffled = torch.from_numpy(X_test2)
if cuda:
shuffled=shuffled.cuda()
model.eval()
output = model(shuffled)
testloss = F.mse_loss(output, Y_train)
testloss = testloss.cpu().data.item()
diff = firstloss-realloss
testdiff = firstloss-testloss
if testdiff>(diff*significance):
validated.remove(idx)
weights = []
#Discover time delay between cause and effect by interpreting kernel weights
for layer in range(layers):
weight = model.dwn.network[layer].net[0].weight.abs().view(model.dwn.network[layer].net[0].weight.size()[0], model.dwn.network[layer].net[0].weight.size()[2])
weights.append(weight)
causeswithdelay = dict()
for v in validated:
totaldelay=0
for k in range(len(weights)):
w=weights[k]
row = w[v]
twolargest = heapq.nlargest(2, row)
m = twolargest[0]
m2 = twolargest[1]
if m > m2:
index_max = len(row) - 1 - max(range(len(row)), key=row.__getitem__)
else:
#take first filter
index_max=0
delay = index_max *(dilation_c**k)
totaldelay+=delay
if targetidx != v:
causeswithdelay[(targetidx, v)]=totaldelay
else:
causeswithdelay[(targetidx, v)]=totaldelay+1
print("Validated causes: ", validated)
return validated, causeswithdelay, realloss, scores.view(-1).cpu().detach().numpy().tolist()
| 5,903 | 33.729412 | 166 | py |
TCDF | TCDF-master/depthwise.py | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.autograd import Variable
class Chomp1d(nn.Module):
"""PyTorch does not offer native support for causal convolutions, so it is implemented (with some inefficiency) by simply using a standard convolution with zero padding on both sides, and chopping off the end of the sequence."""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class FirstBlock(nn.Module):
def __init__(self, target, n_inputs, n_outputs, kernel_size, stride, dilation, padding):
super(FirstBlock, self).__init__()
self.target = target
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=n_outputs)
self.chomp1 = Chomp1d(padding)
self.net = nn.Sequential(self.conv1, self.chomp1)
self.relu = nn.PReLU(n_inputs)
self.init_weights()
def init_weights(self):
"""Initialize weights"""
self.conv1.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
return self.relu(out)
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding):
super(TemporalBlock, self).__init__()
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=n_outputs)
self.chomp1 = Chomp1d(padding)
self.net = nn.Sequential(self.conv1, self.chomp1)
self.relu = nn.PReLU(n_inputs)
self.init_weights()
def init_weights(self):
"""Initialize weights"""
self.conv1.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
return self.relu(out+x) #residual connection
class LastBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding):
super(LastBlock, self).__init__()
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=n_outputs)
self.chomp1 = Chomp1d(padding)
self.net = nn.Sequential(self.conv1, self.chomp1)
self.linear = nn.Linear(n_inputs, n_inputs)
self.init_weights()
def init_weights(self):
"""Initialize weights"""
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
return self.linear(out.transpose(1,2)+x.transpose(1,2)).transpose(1,2) #residual connection
class DepthwiseNet(nn.Module):
def __init__(self, target, num_inputs, num_levels, kernel_size=2, dilation_c=2):
super(DepthwiseNet, self).__init__()
layers = []
in_channels = num_inputs
out_channels = num_inputs
for l in range(num_levels):
dilation_size = dilation_c ** l
if l==0:
layers += [FirstBlock(target, in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size)]
elif l==num_levels-1:
layers+=[LastBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size)]
else:
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
| 3,952 | 39.752577 | 232 | py |
TCDF | TCDF-master/model.py | import torch as th
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from depthwise import DepthwiseNet
from torch.nn.utils import weight_norm
import numpy as np
class ADDSTCN(nn.Module):
def __init__(self, target, input_size, num_levels, kernel_size, cuda, dilation_c):
super(ADDSTCN, self).__init__()
self.target=target
self.dwn = DepthwiseNet(self.target, input_size, num_levels, kernel_size=kernel_size, dilation_c=dilation_c)
self.pointwise = nn.Conv1d(input_size, 1, 1)
self._attention = th.ones(input_size,1)
self._attention = Variable(self._attention, requires_grad=False)
self.fs_attention = th.nn.Parameter(self._attention.data)
if cuda:
self.dwn = self.dwn.cuda()
self.pointwise = self.pointwise.cuda()
self._attention = self._attention.cuda()
def init_weights(self):
self.pointwise.weight.data.normal_(0, 0.1)
def forward(self, x):
y1=self.dwn(x*F.softmax(self.fs_attention, dim=0))
y1 = self.pointwise(y1)
return y1.transpose(1,2) | 1,175 | 34.636364 | 116 | py |
TCDF | TCDF-master/evaluate_predictions_TCDF.py | import TCDF
import argparse
import torch
import torch.optim as optim
from model import ADDSTCN
import pandas as pd
import numpy as np
import networkx as nx
import pylab
import copy
import matplotlib.pyplot as plt
import os
import sys
# os.chdir(os.path.dirname(sys.argv[0])) #uncomment this line to run in VSCode
def check_positive(value):
"""Checks if argument is positive integer (larger than zero)."""
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
def check_zero_or_positive(value):
"""Checks if argument is positive integer (larger than or equal to zero)."""
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
class StoreDictKeyPair(argparse.Action):
"""Creates dictionary containing datasets as keys and ground truth files as values."""
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
k,v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def check_between_zero_and_one(value):
"""Checks if argument is float between zero and 1."""
fvalue = float(value)
if fvalue < 0.0 or fvalue > 1.0:
raise argparse.ArgumentTypeError("%s should be a float between 0 and 1" % value)
return fvalue
def evaluate_prediction(target, cuda, epochs, kernel_size, layers,
loginterval, lr, optimizername, seed, dilation_c, split, file):
"""Runs first part of TCDF to predict one time series and evaluate its accuracy (MASE)."""
print("\n", "Analysis started for target: ", target)
torch.manual_seed(seed)
X, Y = TCDF.preparedata(file, target)
X = X.unsqueeze(0).contiguous()
Y = Y.unsqueeze(2).contiguous()
timesteps = X.size()[2]
if timesteps!=Y.size()[1]:
print("WARNING: Time series do not have the same length.")
X_train = X[:,:,:int(split*timesteps)]
Y_train = Y[:,:int(split*timesteps),:]
X_test = X[:,:,int(split*timesteps):]
Y_test = Y[:,int(split*timesteps):,:]
input_channels = X_train.size()[1]
targetidx = pd.read_csv(file).columns.get_loc(target)
model = ADDSTCN(targetidx, input_channels, levels, kernel_size=kernel_size, cuda=cuda, dilation_c=dilation_c)
if cuda:
model.cuda()
X_train = X_train.cuda()
Y_train = Y_train.cuda()
X_test = X_test.cuda()
Y_test = Y_test.cuda()
optimizer = getattr(optim, optimizername)(model.parameters(), lr=lr)
for ep in range(1, epochs+1):
scores, realloss = TCDF.train(ep, X_train, Y_train, model, optimizer,loginterval,epochs)
realloss = realloss.cpu().data.item()
model.eval()
output = model(X_test)
prediction=output.cpu().detach().numpy()[0,:,0]
T = output.size()[1]
total_e = 0.
for t in range(T):
real = Y_test[:,t,:]
predicted = output[:,t,:]
e = abs(real - predicted)
total_e+=e
total_e = total_e.cpu().data.item()
total = 0.
for t in range(1,T):
temp = abs(Y_test[:,t,:] - Y_test[:,t-1,:])
total+=temp
denom = (T/float(T-1))*total
denom = denom.cpu().data.item()
if denom!=0.:
MASE = total_e/float(denom)
else:
MASE = 0.
return MASE, prediction
def plot_predictions(predictions, file):
"""Plots the predicted values of all time series in the dataset"""
for c in predictions:
p = predictions[c]
plt.plot(p,label=c)
plt.xlabel('Time')
plt.ylabel('Predicted value')
plt.title('Dataset %s'%file)
plt.legend()
plt.show()
def evaluate(datafile):
"""Collects the predictions of all time series in a dataset and returns overall results."""
stringdatafile = str(datafile)
if '/' in stringdatafile:
stringdatafile = str(datafile).rsplit('/', 1)[1]
df_data = pd.read_csv(datafile)
columns = list(df_data)
MASEs = []
predictions = dict()
for c in columns:
MASE, prediction = evaluate_prediction(c, cuda=cuda, epochs=nrepochs,
kernel_size=kernel_size, layers=levels, loginterval=loginterval,
lr=learningrate, optimizername=optimizername,
seed=seed, dilation_c=dilation_c, split=split, file=datafile)
predictions[c]= prediction
MASEs.append(MASE)
allres.append(MASE)
avg = np.mean(MASEs)
std = np.std(MASEs)
return allres, avg, std, predictions
parser = argparse.ArgumentParser(description='TCDF: Temporal Causal Discovery Framework')
parser.add_argument('--cuda', action="store_true", default=False, help='Use CUDA (GPU) (default: False)')
parser.add_argument('--epochs', type=check_positive, default=1000, help='Number of epochs (default: 1000)')
parser.add_argument('--kernel_size', type=check_positive, default=4, help='Size of sliding kernel (default: 4)')
parser.add_argument('--hidden_layers', type=check_zero_or_positive, default=0, help='Number of hidden layers in the depthwise convolution (default: 0)')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate (default: 0.01)')
parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam', 'RMSprop'], help='Optimizer to use: Adam or RMSprop (default: Adam)')
parser.add_argument('--log_interval', type=check_positive, default=500, help='Epoch interval to report loss (default: 500)')
parser.add_argument('--seed', type=check_positive, default=1111, help='Random seed (default: 1111)')
parser.add_argument('--dilation_coefficient', type=check_positive, default=4, help='Dilation coefficient, recommended to be equal to kernel size (default: 4)')
parser.add_argument('--plot', action="store_true", default=False, help='Plot predicted time series (default: False)')
parser.add_argument('--train_test_split', type=check_between_zero_and_one, default=0.8, help="Portion of dataset to use for training (default 0.8)")
parser.add_argument('--data', nargs='+', required=True, help='(Path to) Dataset(s) to predict by TCDF containing multiple time series. Required file format: csv with a column (incl. header) for each time series')
args = parser.parse_args()
print("Arguments:", args)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, you should probably run with --cuda to speed up training.")
if args.kernel_size != args.dilation_coefficient:
print("WARNING: The dilation coefficient is not equal to the kernel size. Multiple paths can lead to the same delays. Set kernel_size equal to dilation_c to have exaxtly one path for each delay.")
kernel_size = args.kernel_size
levels = args.hidden_layers+1
nrepochs = args.epochs
learningrate = args.learning_rate
optimizername = args.optimizer
dilation_c = args.dilation_coefficient
loginterval = args.log_interval
seed=args.seed
cuda=args.cuda
split=args.train_test_split
plot = args.plot
datasets = args.data
evalresults = dict()
allres = []
for datafile in datasets:
allres,avg,std,predictions = evaluate(datafile)
evalresults[datafile]=(avg, std)
print("\nMean Absolute Scaled Error (MASE) averaged over all time series in", datafile,":",evalresults[datafile][0],"with standard deviation",evalresults[datafile][1])
if plot:
plot_predictions(predictions,datafile)
if len(datasets)>1:
overallavg = np.mean(allres)
overallstd = np.std(allres)
print("=========================Overall Evaluation====================================")
print("Average MASE over all datasets: ", overallavg)
print("Standard Deviation MASE over all datasets: ", overallstd)
| 7,764 | 38.820513 | 212 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/run.py | #!/usr/bin/env python3
import argparse
import random
import os
import numpy as np
import torch
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
import habitat_extensions # noqa: F401
import vlnce_baselines # noqa: F401
from vlnce_baselines.config.default import get_config
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--exp_name",
type=str,
default="test",
required=True,
help="experiment id that matches to exp-id in Notion log",
)
parser.add_argument(
"--run-type",
choices=["train", "eval", "inference"],
required=True,
help="run type of the experiment (train, eval, inference)",
)
parser.add_argument(
"--exp-config",
type=str,
required=True,
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument('--local_rank', type=int, default=0, help="local gpu id")
args = parser.parse_args()
run_exp(**vars(args))
def run_exp(exp_name: str, exp_config: str,
run_type: str, opts=None, local_rank=None) -> None:
r"""Runs experiment given mode and config
Args:
exp_config: path to config file.
run_type: "train" or "eval.
opts: list of strings of additional config options.
Returns:
None.
"""
config = get_config(exp_config, opts)
config.defrost()
config.TENSORBOARD_DIR += exp_name
config.CHECKPOINT_FOLDER += exp_name
if os.path.isdir(config.EVAL_CKPT_PATH_DIR):
config.EVAL_CKPT_PATH_DIR += exp_name
config.RESULTS_DIR += exp_name
config.LOG_FILE = exp_name + '_' + config.LOG_FILE
config.TASK_CONFIG.SEED = 0
config.local_rank = local_rank
config.freeze()
# logger.info(f"config: {config}") # print all configs
logger.add_filehandler('logs/running_log/'+config.LOG_FILE)
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = False
if torch.cuda.is_available():
torch.set_num_threads(1)
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
trainer = trainer_init(config)
if run_type == "train":
trainer.train()
elif run_type == "eval":
trainer.eval()
elif run_type == "inference":
trainer.inference()
if __name__ == "__main__":
main()
| 2,787 | 27.742268 | 81 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/ss_trainer_CMA.py | import gc
import os
import random
import warnings
from collections import defaultdict
import lmdb
import msgpack_numpy
import numpy as np
import math
import time
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import tqdm
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer
from vlnce_baselines.common.env_utils import construct_envs, construct_envs_for_rl, is_slurm_batch_job
from vlnce_baselines.common.utils import extract_instruction_tokens
from vlnce_baselines.utils import reduce_loss
from .utils import get_camera_orientations
from .models.utils import (
length2mask, dir_angle_feature,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
import torch.distributed as distr
import gzip
import json
from copy import deepcopy
@baseline_registry.register_trainer(name="schedulesampler-CMA")
class SSTrainer(BaseVLNCETrainer):
def __init__(self, config=None):
super().__init__(config)
self.max_len = int(config.IL.max_traj_len)
def _make_dirs(self) -> None:
self._make_ckpt_dir()
# os.makedirs(self.lmdb_features_dir, exist_ok=True)
if self.config.EVAL.SAVE_RESULTS:
self._make_results_dir()
def save_checkpoint(self, epoch: int, step_id: int) -> None:
torch.save(
obj={
"state_dict": self.policy.state_dict(),
"config": self.config,
"optim_state": self.optimizer.state_dict(),
"epoch": epoch,
"step_id": step_id,
},
f=os.path.join(self.config.CHECKPOINT_FOLDER, f"ckpt.{epoch}.pth"),
)
def allocate_allowed_episode_by_scene(self):
''' discrete waypoints coordinates directly projected from MP3D '''
with gzip.open(
self.config.TASK_CONFIG.DATASET.DATA_PATH.format(
split=self.split)
) as f:
data = json.load(f) # dict_keys(['episodes', 'instruction_vocab'])
''' continuous waypoints coordinates by shortest paths in Habitat '''
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(
split=self.split)
) as f:
gt_data = json.load(f)
data = data['episodes']
# long_episode_ids = [int(k) for k,v in gt_data.items() if len(v['actions']) > self.config.IL.max_traj_len]
long_episode_ids = []
average_length = (len(data) - len(long_episode_ids))//self.world_size
episodes_by_scene = {}
for ep in data:
scan = ep['scene_id'].split('/')[1]
if scan not in episodes_by_scene.keys():
episodes_by_scene[scan] = []
if ep['episode_id'] not in long_episode_ids:
episodes_by_scene[scan].append(ep['episode_id'])
else:
continue
''' split data in each environments evenly to different GPUs '''
values_to_scenes = {}
values = []
for k,v in episodes_by_scene.items():
values.append(len(v))
if len(v) not in values_to_scenes.keys():
values_to_scenes[len(v)] = []
values_to_scenes[len(v)].append(k)
groups = self.world_size
values.sort(reverse=True)
last_scene_episodes = episodes_by_scene[values_to_scenes[values[0]].pop()]
values = values[1:]
load_balance_groups = [[] for grp in range(groups)]
scenes_groups = [[] for grp in range(groups)]
for v in values:
current_total = [sum(grp) for grp in load_balance_groups]
min_index = np.argmin(current_total)
load_balance_groups[min_index].append(v)
scenes_groups[min_index] += episodes_by_scene[values_to_scenes[v].pop()]
for grp in scenes_groups:
add_number = average_length - len(grp)
grp += last_scene_episodes[:add_number]
last_scene_episodes = last_scene_episodes[add_number:]
return scenes_groups[self.local_rank]
def train_ml(self, in_train=True, train_tf=False):
self.envs.resume_all()
observations = self.envs.reset()
shift_index = 0
for i, ep in enumerate(self.envs.current_episodes()):
if ep.episode_id in self.trained_episodes:
i = i - shift_index
observations.pop(i)
self.envs.pause_at(i)
shift_index += 1
if self.envs.num_envs == 0:
break
else:
self.trained_episodes.append(ep.episode_id)
if self.envs.num_envs == 0:
return -1
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
ml_loss = 0.
total_weight = 0.
losses = []
not_done_index = list(range(self.envs.num_envs))
not_done_masks = torch.zeros(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device
)
# encoding instructions
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
self.envs.num_envs,
self.num_recurrent_layers,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
init_num_envs = self.envs.num_envs
il_loss = 0.0
for stepk in range(self.max_len):
language_features = instruction_embedding[not_done_index]
lang_masks = all_lang_masks[not_done_index]
# agent's current position and heading
positions = []; headings = []
for ob_i in range(len(observations)):
agent_state_i = self.envs.call_at(ob_i,
"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
if 'CMA' in self.config.MODEL.policy_name:
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = in_train,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = language_features,
text_mask = lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
total_weight += len(candidate_lengths)
# get resulting distance by execting candidate actions
# the last value in each list is the current distance
if train_tf:
cand_dists_to_goal = [[] for _ in range(len(batch_angles))]
oracle_cand_idx = []
oracle_stop = []
for j in range(len(batch_angles)):
for k in range(len(batch_angles[j])):
angle_k = batch_angles[j][k]
forward_k = batch_distances[j][k]
dist_k = self.envs.call_at(j,
"cand_dist_to_goal", {
"angle": angle_k, "forward": forward_k,
})
cand_dists_to_goal[j].append(dist_k)
curr_dist_to_goal = self.envs.call_at(
j, "current_dist_to_goal")
# if within target range (default 3.0)
if curr_dist_to_goal < 1.5:
oracle_cand_idx.append(candidate_lengths[j] - 1)
oracle_stop.append(True)
else:
oracle_cand_idx.append(np.argmin(cand_dists_to_goal[j]))
oracle_stop.append(False)
if train_tf:
oracle_actions = torch.tensor(oracle_cand_idx, device=self.device).unsqueeze(1)
actions = logits.argmax(dim=-1, keepdim=True)
actions = torch.where(
torch.rand_like(actions, dtype=torch.float) <= self.ratio,
oracle_actions, actions)
current_loss = F.cross_entropy(logits, oracle_actions.squeeze(1), reduction="none")
ml_loss += torch.sum(current_loss)
else:
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = self.envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in
zip(*outputs)]
if sum(dones) > 0:
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = rnn_states[np.array(dones)==False]
shift_index = 0
for i in range(self.envs.num_envs):
if dones[i]:
i = i - shift_index
not_done_index.pop(i)
self.envs.pause_at(i)
if self.envs.num_envs == 0:
break
observations.pop(i)
infos.pop(i)
shift_index += 1
if self.envs.num_envs == 0:
break
not_done_masks = torch.ones(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device
)
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
if train_tf:
il_loss = ml_loss / total_weight
return il_loss
def train(self) -> None:
split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = self.config.IL.max_traj_len
if (
self.config.IL.DAGGER.expert_policy_sensor
not in self.config.TASK_CONFIG.TASK.SENSORS
):
self.config.TASK_CONFIG.TASK.SENSORS.append(
self.config.IL.DAGGER.expert_policy_sensor
)
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.NUM_ENVIRONMENTS = self.config.IL.batch_size // len(
self.config.SIMULATOR_GPU_IDS)
self.config.use_pbar = not is_slurm_batch_job()
''' *** if choosing image '''
if self.config.MODEL.policy_name == 'PolicyViewSelectionCMA':
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.freeze()
self.world_size = self.config.GPU_NUMBERS
self.local_rank = self.config.local_rank
self.batch_size = self.config.IL.batch_size
torch.cuda.set_device(self.device)
if self.world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
torch.cuda.set_device(self.device)
self.split = split
episode_ids = self.allocate_allowed_episode_by_scene()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
num_epoches_per_ratio = int(np.ceil(self.config.IL.epochs/self.config.IL.decay_time))
print('\nFinished constructing environments')
dataset_length = sum(self.envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
observation_space = self.envs.observation_spaces[0]
action_space = self.envs.action_spaces[0]
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
print('\nInitializing policy network ...')
self._initialize_policy(
self.config,
self.config.IL.load_from_ckpt,
observation_space=observation_space,
action_space=action_space,
)
print('\nTraining starts ...')
with TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=0,
) as writer:
AuxLosses.activate()
batches_per_epoch = int(np.ceil(dataset_length/self.batch_size))
for epoch in range(self.start_epoch, self.config.IL.epochs):
epoch_str = f"{epoch + 1}/{self.config.IL.epochs}"
t_ = (
tqdm.trange(
batches_per_epoch, leave=False, dynamic_ncols=True
)
if self.config.use_pbar & (self.local_rank < 1)
else range(batches_per_epoch)
)
self.ratio = np.power(self.config.IL.schedule_ratio, epoch//num_epoches_per_ratio + 1)
self.trained_episodes = []
# reconstruct env for every epoch to ensure load same data
if epoch != self.start_epoch:
self.envs = None
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
for batch_idx in t_:
loss = self.train_ml(
in_train=True, train_tf=True)
if loss == -1:
break
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses = [loss]
if self.world_size > 1:
for i in range(len(losses)):
reduce_loss(losses[i], self.local_rank, self.world_size)
losses[i] = losses[i].item()
else:
for i in range(len(losses)):
losses[i] = losses[i].item()
loss = losses[0]
if self.config.use_pbar:
if self.local_rank < 1: # seems can be removed
t_.set_postfix(
{
"epoch": epoch_str,
"loss": round(loss, 4),
}
)
writer.add_scalar("loss", loss, self.step_id)
self.step_id += 1 # noqa: SIM113
if self.local_rank < 1: # and epoch % 3 == 0:
self.save_checkpoint(epoch, self.step_id)
AuxLosses.deactivate()
| 18,334 | 39.474614 | 115 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/utils.py | import torch
import torch.distributed as dist
import numpy as np
import math
import copy
class ARGS():
def __init__(self):
self.local_rank = 0
def reduce_loss(tensor, rank, world_size):
with torch.no_grad():
dist.reduce(tensor, dst=0)
if rank == 0:
tensor /= world_size
def gather_list_and_concat(list_of_nums,world_size):
if not torch.is_tensor(list_of_nums):
tensor = torch.Tensor(list_of_nums).cuda()
else:
if list_of_nums.is_cuda == False:
tensor = list_of_nums.cuda()
else:
tensor = list_of_nums
gather_t = [torch.ones_like(tensor) for _ in
range(world_size)]
dist.all_gather(gather_t, tensor)
return gather_t
def repeat_allocation(allocations, max_number):
if torch.is_tensor(max_number):
max_number = max_number.long().item()
else:
max_number = max_number.long()
allocation_number = len(allocations)
repeat_time, res = max_number // allocation_number, max_number % allocation_number
allocations_ = []
for i in range(repeat_time):
allocations_ += copy.deepcopy(allocations)
allocations_ += copy.deepcopy(allocations)[:res]
return allocations_
def allocate(number, ep_length, size_per_time):
length_to_indexes = {ep_length[i]: [] for i in
range(len(ep_length))}
for i in range(len(ep_length)):
length_to_indexes[ep_length[i]] += [i]*number[i]
values = []
for i in range(len(number)):
values += [ep_length[i]] * number[i]
groups = int((len(values) - 0.01) // size_per_time + 1)
values.sort(reverse=True)
load_balance_groups = [[] for grp in range(groups)]
for v in values:
load_balance_groups.sort(key=lambda x: sum(x))
load_balance_groups[0].append(v)
indexes = []
set_length = list(set(ep_length))
for i in range(groups):
index = np.zeros(len(load_balance_groups[i]),dtype=int)
for j in range(len(set_length)):
length_indexes = length_to_indexes[set_length[j]]
position = np.where(np.array(load_balance_groups[i]) ==
set_length[j])[0]
position_length = len(position)
index[position] = length_indexes[:position_length]
length_to_indexes[set_length[j]] = length_indexes[position_length:]
indexes.append((index).tolist())
return indexes
def allocate_instructions(instruction_lengths, allocations,ep_length, instruction_ids):
instruction_ids_copy = copy.deepcopy(instruction_ids)
allocations_copy = copy.deepcopy(allocations)
instruction_lengths_copy = copy.deepcopy(instruction_lengths)
values = []
value_indexes = []
weights = []
for i in range(len(instruction_lengths)):
instruction_length = instruction_lengths[i]
values += instruction_length
value_indexes += len(instruction_length)*[i]
weights += [ep_length[i]] * len(instruction_length)
values = np.array(values)
weights = np.array(weights)
value_indexes = np.array(value_indexes)
sorted_index = np.argsort(values*weights)[::-1]
values = values[sorted_index]
value_indexes = value_indexes[sorted_index]
weights = weights[sorted_index]
groups = len(allocations)
load_balance_groups = [[] for grp in range(groups)]
group_weights = [[] for grp in range(groups)]
instruction_allocations = [[] for grp in range(groups)]
for j in range(len(values)):
summation = np.array([np.sum(np.array(load_balance_groups[i])*np.array(group_weights[i])) for i in range(groups)])
sorted_index = np.argsort(summation)
for i in sorted_index:
index = value_indexes[j]
value = values[j]
if index in allocations_copy[i]:
allocations_copy[i].remove(index)
load_balance_groups[i].append(value)
group_weights[i].append(weights[j])
index_in_length = np.where(np.array(instruction_lengths_copy[index]) == value)[0][0]
instruction_lengths_copy[index].pop(index_in_length)
instruction_allocations[i].append(instruction_ids_copy[index].pop(index_in_length))
break
return instruction_allocations
def allocate_by_scene_for_ddp(number, ep_length, size_per_time):
length_to_indexes = {ep_length[i]: [] for i in
range(len(ep_length))}
for i in range(len(ep_length)):
length_to_indexes[ep_length[i]] += [i]*number[i]
values = []
for i in range(len(number)):
values += [ep_length[i]] * number[i]
groups = int((len(values) - 0.01) // size_per_time + 1)
values.sort(reverse=True)
load_balance_groups = [[] for grp in range(groups)]
for v in values:
load_balance_groups.sort(key=lambda x: sum(x))
load_balance_groups[0].append(v)
indexes = []
set_length = list(set(ep_length))
for i in range(groups):
index = np.zeros(len(load_balance_groups[i]),dtype=int)
for j in range(len(set_length)):
length_indexes = length_to_indexes[set_length[j]]
position = np.where(np.array(load_balance_groups[i]) ==
set_length[j])[0]
position_length = len(position)
index[position] = length_indexes[:position_length]
length_to_indexes[set_length[j]] = length_indexes[position_length:]
indexes.append((index).tolist())
return indexes
def get_camera_orientations(num_views):
assert isinstance(num_views, int)
base_angle_deg = 360 / num_views
base_angle_rad = math.pi / 6
orient_dict = {}
for k in range(1,num_views):
orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0]
return orient_dict
| 5,848 | 34.883436 | 122 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/ss_trainer_VLNBERT.py | import gc
import os
import random
import warnings
from collections import defaultdict
import lmdb
import msgpack_numpy
import numpy as np
import math
import time
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import tqdm
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer
from vlnce_baselines.common.env_utils import construct_envs, construct_envs_for_rl, is_slurm_batch_job
from vlnce_baselines.common.utils import extract_instruction_tokens
from vlnce_baselines.utils import reduce_loss
from .utils import get_camera_orientations
from .models.utils import (
length2mask, dir_angle_feature_with_ele,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
import torch.distributed as distr
import gzip
import json
from copy import deepcopy
@baseline_registry.register_trainer(name="schedulesampler-VLNBERT")
class SSTrainer(BaseVLNCETrainer):
def __init__(self, config=None):
super().__init__(config)
self.max_len = int(config.IL.max_traj_len) # * 0.97 transfered gt path got 0.96 spl
def _make_dirs(self) -> None:
self._make_ckpt_dir()
# os.makedirs(self.lmdb_features_dir, exist_ok=True)
if self.config.EVAL.SAVE_RESULTS:
self._make_results_dir()
def save_checkpoint(self, epoch: int, step_id: int) -> None:
torch.save(
obj={
"state_dict": self.policy.state_dict(),
"config": self.config,
"optim_state": self.optimizer.state_dict(),
"epoch": epoch,
"step_id": step_id,
},
f=os.path.join(self.config.CHECKPOINT_FOLDER, f"ckpt.{epoch}.pth"),
)
def allocate_allowed_episode_by_scene(self):
''' discrete waypoints coordinates directly projected from MP3D '''
with gzip.open(
self.config.TASK_CONFIG.DATASET.DATA_PATH.format(
split=self.split)
) as f:
data = json.load(f) # dict_keys(['episodes', 'instruction_vocab'])
''' continuous waypoints coordinates by shortest paths in Habitat '''
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(
split=self.split)
) as f:
gt_data = json.load(f)
data = data['episodes']
# long_episode_ids = [int(k) for k,v in gt_data.items() if len(v['actions']) > self.config.IL.max_traj_len]
long_episode_ids = []
average_length = (len(data) - len(long_episode_ids))//self.world_size
episodes_by_scene = {}
for ep in data:
scan = ep['scene_id'].split('/')[1]
if scan not in episodes_by_scene.keys():
episodes_by_scene[scan] = []
if ep['episode_id'] not in long_episode_ids:
episodes_by_scene[scan].append(ep['episode_id'])
else:
continue
''' split data in each environments evenly to different GPUs ''' # averaging number set problem
values_to_scenes = {}
values = []
for k,v in episodes_by_scene.items():
values.append(len(v))
if len(v) not in values_to_scenes.keys():
values_to_scenes[len(v)] = []
values_to_scenes[len(v)].append(k)
groups = self.world_size
values.sort(reverse=True)
last_scene_episodes = episodes_by_scene[values_to_scenes[values[0]].pop()]
values = values[1:]
load_balance_groups = [[] for grp in range(groups)]
scenes_groups = [[] for grp in range(groups)]
for v in values:
current_total = [sum(grp) for grp in load_balance_groups]
min_index = np.argmin(current_total)
load_balance_groups[min_index].append(v)
scenes_groups[min_index] += episodes_by_scene[values_to_scenes[v].pop()]
for grp in scenes_groups:
add_number = average_length - len(grp)
grp += last_scene_episodes[:add_number]
last_scene_episodes = last_scene_episodes[add_number:]
# episode_ids = [ep['episode_id'] for ep in data if
# ep['episode_id'] not in long_episode_ids]
# scenes_groups[self.local_rank] = episode_ids[
# self.local_rank:self.world_size * average_length:self.world_size]
return scenes_groups[self.local_rank]
def train_ml(self, in_train=True, train_tf=False, train_rl=False):
self.envs.resume_all()
observations = self.envs.reset()
shift_index = 0
for i, ep in enumerate(self.envs.current_episodes()):
if ep.episode_id in self.trained_episodes:
i = i - shift_index
observations.pop(i)
self.envs.pause_at(i)
shift_index += 1
if self.envs.num_envs == 0:
break
else:
self.trained_episodes.append(ep.episode_id)
if self.envs.num_envs == 0:
return -1
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
# expert_uuid = self.config.IL.DAGGER.expert_policy_sensor_uuid
not_done_masks = torch.zeros(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device)
ml_loss = 0.
total_weight = 0.
losses = []
not_done_index = list(range(self.envs.num_envs))
# encoding instructions
if 'VLNBERT' in self.config.MODEL.policy_name:
lang_idx_tokens = batch['instruction']
padding_idx = 0
all_lang_masks = (lang_idx_tokens != padding_idx)
lang_lengths = all_lang_masks.sum(1)
lang_token_type_ids = torch.zeros_like(all_lang_masks,
dtype=torch.long, device=self.device)
h_t, all_language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=all_lang_masks,
)
init_num_envs = self.envs.num_envs
# Init the reward shaping
# last_dist = np.zeros(len(observations), np.float32)
# last_ndtw = np.zeros(len(observations), np.float32)
# for i in range(len(observations)):
# info = self.envs.call_at(i, "get_metrics", {})
# last_dist[i] = info['distance_to_goal']
# last_ndtw[i] = info['ndtw']
init_bs = len(observations)
state_not_dones = np.array([True] * init_bs)
# rewards = []
# hidden_states = []
# policy_log_probs = []
# critic_masks = []
# entropys = []
# # RL waypoint predictor
# way_log_probs = []
# way_rewards = []
# way_rl_masks = []
il_loss = 0.0
for stepk in range(self.max_len):
language_features = all_language_features[not_done_index]
lang_masks = all_lang_masks[not_done_index]
# instruction_embedding = all_instr_embed[not_done_index]
if 'VLNBERT' in self.config.MODEL.policy_name:
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# agent's current position and heading
positions = []; headings = []
for ob_i in range(len(observations)):
agent_state_i = self.envs.call_at(ob_i,
"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
if 'VLNBERT' in self.config.MODEL.policy_name:
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = in_train,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
# step_rnn_states = torch.zeros(init_bs, 768, device=self.device)
# step_rnn_states[state_not_dones] = h_t
# hidden_states.append(step_rnn_states)
logits = logits.masked_fill_(cand_mask, -float('inf'))
total_weight += len(candidate_lengths)
# get resulting distance by execting candidate actions
# the last value in each list is the current distance
if train_tf:
cand_dists_to_goal = [[] for _ in range(len(batch_angles))]
oracle_cand_idx = []
oracle_stop = []
for j in range(len(batch_angles)):
for k in range(len(batch_angles[j])):
angle_k = batch_angles[j][k]
forward_k = batch_distances[j][k]
dist_k = self.envs.call_at(j,
"cand_dist_to_goal", {
"angle": angle_k, "forward": forward_k,
})
cand_dists_to_goal[j].append(dist_k)
curr_dist_to_goal = self.envs.call_at(
j, "current_dist_to_goal")
# if within target range (which def as 3.0)
if curr_dist_to_goal < 1.5:
oracle_cand_idx.append(candidate_lengths[j] - 1)
oracle_stop.append(True)
else:
oracle_cand_idx.append(np.argmin(cand_dists_to_goal[j]))
oracle_stop.append(False)
if train_rl:
probs = F.softmax(logits, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
actions = c.sample().detach()
rl_entropy = torch.zeros(init_bs, device=self.device)
rl_entropy[state_not_dones] = c.entropy()
entropys.append(rl_entropy)
rl_policy_log_probs = torch.zeros(init_bs, device=self.device)
rl_policy_log_probs[state_not_dones] = c.log_prob(actions)
policy_log_probs.append(rl_policy_log_probs)
elif train_tf:
oracle_actions = torch.tensor(oracle_cand_idx, device=self.device).unsqueeze(1)
actions = logits.argmax(dim=-1, keepdim=True)
actions = torch.where(
torch.rand_like(actions, dtype=torch.float) <= self.ratio,
oracle_actions, actions)
current_loss = F.cross_entropy(logits, oracle_actions.squeeze(1), reduction="none")
ml_loss += torch.sum(current_loss)
else:
actions = logits.argmax(dim=-1, keepdim=True)
# # REINFORCE waypoint predictor action
# way_step_mask = np.zeros(init_num_envs, np.float32)
# way_step_reward = np.zeros(init_num_envs, np.float32)
# way_step_logp = torch.zeros(init_num_envs, requires_grad=True).cuda()
# for j in range(logits.size(0)):
# perm_index = not_done_index[j]
# way_step_mask[perm_index] = 1.0
# if ( # for all the non-stopping cases
# actions[j].item() != candidate_lengths[j]-1
# ):
# way_step_logp[perm_index] = \
# batch_way_log_prob[j][actions[j].item()]
# # time penalty
# way_step_reward[perm_index] = -1.0
# else:
# if oracle_stop[j]:
# # nav success reward
# way_step_reward[perm_index] = 3.0
# else:
# way_step_reward[perm_index] = -3.0
# way_rl_masks.append(way_step_mask)
# way_rewards.append(way_step_reward)
# way_log_probs.append(way_step_logp)
# action_angles = []
# action_distances = []
env_actions = []
# rl_actions = np.array([-100] * init_bs)
for j in range(logits.size(0)):
if train_rl and (actions[j].item() == candidate_lengths[j]-1 or stepk == self.max_len-1):
# if RL, force stop at the max step
# action_angles.append(0)
# action_distances.append(0)
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
elif actions[j].item() == candidate_lengths[j]-1:
# action_angles.append(0)
# action_distances.append(0)
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
# action_angles.append(batch_angles[j][actions[j].item()])
# action_distances.append(batch_distances[j][actions[j].item()])
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
# self.envs.step(env_actions)
outputs = self.envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in
zip(*outputs)]
h_t = h_t[np.array(dones)==False]
# print('infos', infos)
# import pdb; pdb.set_trace()
if train_rl:
rl_actions[state_not_dones] = np.array([sk['action']['action'] for sk in env_actions])
# Calculate the mask and reward
current_dist = np.zeros(init_bs, np.float32)
# ndtw_score = np.zeros(init_bs, np.float32)
reward = np.zeros(init_bs, np.float32)
ct_mask = np.ones(init_bs, np.float32)
sbi = 0
for si in range(init_bs):
if state_not_dones[si]:
info = self.envs.call_at(sbi, "get_metrics", {})
current_dist[si] = info['distance_to_goal']
# ndtw_score[si] = info['ndtw']
sbi += 1
if not state_not_dones[si]:
reward[si] = 0.0
ct_mask[si] = 0.0
else:
action_idx = rl_actions[si]
# Target reward
if action_idx == 0: # If the action now is end
if current_dist[si] < 3.0: # Correct
reward[si] = 2.0 # + ndtw_score[si] * 2.0
else: # Incorrect
reward[si] = -2.0
elif action_idx != -100: # The action is not end
# Path fidelity rewards (distance & nDTW)
reward[si] = - (current_dist[si] - last_dist[si])
# ndtw_reward = ndtw_score[si] - last_ndtw[si]
if reward[si] > 0.0: # Quantification
reward[si] = 1.0 # + ndtw_reward
else:
reward[si] = -1.0 # + ndtw_reward
# # Miss the target penalty
# if (last_dist[i] <= 1.0) and (current_dist[i]-last_dist[i] > 0.0):
# reward[i] -= (1.0 - last_dist[i]) * 2.0
rewards.append(reward)
critic_masks.append(ct_mask)
last_dist[:] = current_dist
# last_ndtw[:] = ndtw_score
state_not_dones[state_not_dones] = np.array(dones) == False
if sum(dones) > 0:
shift_index = 0
for i in range(self.envs.num_envs):
if dones[i]:
# print(k, self.local_rank)
i = i - shift_index
not_done_index.pop(i)
self.envs.pause_at(i)
if self.envs.num_envs == 0:
break
# def pop_helper(data, index):
# dim = list(data.shape)
# data = data.tolist()
# data.pop(index)
# dim[0] -= 1
# return torch.tensor(data).view(dim).cuda()
# # prev_actions = pop_helper(prev_actions, i)
# # prev_oracle_actions = pop_helper(prev_oracle_actions, i)
# if 'CMA' in self.config.MODEL.policy_name:
# rnn_states = pop_helper(rnn_states, i)
observations.pop(i)
shift_index += 1
if self.envs.num_envs == 0:
break
not_done_masks = torch.ones(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device
)
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
# # REINFORCE waypoint prediction
# way_rl_loss = 0.0
# way_rl_total = 0.0
# way_rl_length = len(way_rewards)
# way_discount_reward = np.zeros(init_num_envs, np.float32)
# for t in range(way_rl_length-1, -1, -1):
# way_discount_reward = way_discount_reward * 0.90 + way_rewards[t]
# way_r_ = Variable(torch.from_numpy(way_discount_reward.copy()),
# requires_grad=False).cuda()
# way_mask_ = Variable(torch.from_numpy(way_rl_masks[t]),
# requires_grad=False).cuda()
# way_rl_loss += (-way_log_probs[t] * way_r_ * way_mask_).sum()
# way_rl_total = way_rl_total + np.sum(way_rl_masks[t])
# way_rl_loss /= way_rl_total
# A2C
if train_rl:
rl_loss = 0.
length = len(rewards)
discount_reward = np.zeros(init_bs, np.float32)
rl_total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * 0.90 + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(critic_masks[t]), requires_grad=False).to(self.device)
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).to(self.device)
v_ = self.policy.net(
mode = 'critic',
post_states = hidden_states[t])
a_ = (r_ - v_).detach()
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
rl_total = rl_total + np.sum(critic_masks[t])
rl_loss = rl_loss / rl_total
il_loss += rl_loss
elif train_tf:
il_loss = ml_loss / total_weight # 0.20 factor
return il_loss #, way_rl_loss
def train(self) -> None:
split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = self.config.IL.max_traj_len
if (
self.config.IL.DAGGER.expert_policy_sensor
not in self.config.TASK_CONFIG.TASK.SENSORS
):
self.config.TASK_CONFIG.TASK.SENSORS.append(
self.config.IL.DAGGER.expert_policy_sensor
)
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.NUM_ENVIRONMENTS = self.config.IL.batch_size // len(
self.config.SIMULATOR_GPU_IDS)
self.config.use_pbar = not is_slurm_batch_job()
''' *** if choosing image '''
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
# print('deal with choosing images')
# import pdb; pdb.set_trace()
self.config.freeze()
self.world_size = self.config.GPU_NUMBERS
self.local_rank = self.config.local_rank
self.batch_size = self.config.IL.batch_size
torch.cuda.set_device(self.device)
if self.world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
torch.cuda.set_device(self.device)
# print(self.local_rank,self.device)
self.split = split
episode_ids = self.allocate_allowed_episode_by_scene()
# self.temp_envs = get_env_class(self.config.ENV_NAME)(self.config)
# self.temp_envs.episodes contains all 10819 GT samples
# episodes_allowed is slightly smaller -- 10783 valid episodes
# check the usage of self.temp_envs._env.sim.is_navigable([0,0,0])
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
num_epoches_per_ratio = int(np.ceil(self.config.IL.epochs/self.config.IL.decay_time))
print('\nFinished constructing environments')
dataset_length = sum(self.envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
observation_space = self.envs.observation_spaces[0]
action_space = self.envs.action_spaces[0]
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
# self.inflection_weight = torch.tensor([1.0,
# self.config.IL.inflection_weight_coef], device=self.device)
# import pdb; pdb.set_trace()
print('\nInitializing policy network ...')
self._initialize_policy(
self.config,
self.config.IL.load_from_ckpt,
observation_space=observation_space,
action_space=action_space,
)
# import pdb; pdb.set_trace()
print('\nTraining starts ...')
with TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=0,
) as writer:
AuxLosses.activate()
batches_per_epoch = int(np.ceil(dataset_length/self.batch_size))
for epoch in range(self.start_epoch, self.config.IL.epochs):
epoch_str = f"{epoch + 1}/{self.config.IL.epochs}"
t_ = (
tqdm.trange(
batches_per_epoch, leave=False, dynamic_ncols=True
)
if self.config.use_pbar & (self.local_rank < 1)
else range(batches_per_epoch)
)
self.ratio = np.power(self.config.IL.schedule_ratio, epoch//num_epoches_per_ratio + 1)
self.trained_episodes = []
# reconstruct env for every epoch to ensure load same data
if epoch != self.start_epoch:
self.envs = None
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
for batch_idx in t_:
# if batch_idx % 2 == 0:
# loss = self.train_ml(train_rl=False)
# if batch_idx != len(t_)-1:
# continue
# else:
loss = self.train_ml( # way_rl_loss
in_train=True,
train_tf=True, train_rl=False)
# loss += self.train_ml(train_rl=False)
if loss == -1:
break
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses = [loss]
# self.way_rl_optimizer.zero_grad()
# way_rl_loss.backward()
# self.way_rl_optimizer.step()
if self.world_size > 1:
for i in range(len(losses)):
reduce_loss(losses[i], self.local_rank, self.world_size)
losses[i] = losses[i].item()
else:
for i in range(len(losses)):
losses[i] = losses[i].item()
loss = losses[0]
if self.config.use_pbar:
if self.local_rank < 1: # seems can be removed
t_.set_postfix(
{
"epoch": epoch_str,
"loss": round(loss, 4),
}
)
writer.add_scalar("loss", loss, self.step_id)
self.step_id += 1 # noqa: SIM113
if self.local_rank < 1: # and epoch % 3 == 0:
self.save_checkpoint(epoch, self.step_id)
AuxLosses.deactivate()
| 28,887 | 42.310345 | 116 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/aux_losses.py | import torch
class _AuxLosses:
def __init__(self):
self._losses = {}
self._loss_alphas = {}
self._is_active = False
def clear(self):
self._losses.clear()
self._loss_alphas.clear()
def register_loss(self, name, loss, alpha=1.0):
assert self.is_active()
assert name not in self._losses
self._losses[name] = loss
self._loss_alphas[name] = alpha
def get_loss(self, name):
return self._losses[name]
def reduce(self, mask):
assert self.is_active()
total = torch.tensor(0.0).cuda()
for k in self._losses.keys():
k_loss = torch.masked_select(self._losses[k], mask).mean()
total = total + self._loss_alphas[k] * k_loss
return total
def is_active(self):
return self._is_active
def activate(self):
self._is_active = True
def deactivate(self):
self._is_active = False
AuxLosses = _AuxLosses()
| 987 | 20.955556 | 70 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/recollection_dataset.py | import gzip
import json
from collections import defaultdict, deque
import numpy as np
import torch
import tqdm
from gym import Space
from habitat.config.default import Config
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
from vlnce_baselines.common.env_utils import construct_envs
from vlnce_baselines.common.utils import extract_instruction_tokens
class TeacherRecollectionDataset(torch.utils.data.IterableDataset):
def __init__(self, config: Config):
super().__init__()
self.config = config
# self._preload = []
self._preload = deque()
self.world_size = self.config.GPU_NUMBERS
self.rank = self.config.local_rank
assert (
config.IL.RECOLLECT_TRAINER.preload_size >= config.IL.batch_size
), "preload size must be greater than batch size."
self.envs = None
self._env_observations = None
if config.IL.use_iw:
self.inflec_weights = torch.tensor(
[1.0, config.IL.inflection_weight_coef]
)
else:
self.inflec_weights = torch.tensor([1.0, 1.0])
if self.config.IL.RECOLLECT_TRAINER.preload_trajectories_file:
self.config.defrost()
self.config.IL.RECOLLECT_TRAINER.trajectories_file = \
self.config.IL.RECOLLECT_TRAINER.trajectories_file[
:-8] + '_w' + \
str(self.world_size) + '_r' + str(self.rank) + '.json.gz'
self.config.freeze()
with gzip.open(
config.IL.RECOLLECT_TRAINER.trajectories_file, "rt"
) as f:
self.trajectories = json.load(f)
else:
self.trajectories = self.collect_dataset()
self.initialize_sims()
def initialize_sims(self):
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.MEASUREMENTS = []
config.freeze()
self.envs = construct_envs(
config,
get_env_class(config.ENV_NAME),
episodes_allowed=list(self.trajectories.keys()),
)
self.length = sum(self.envs.number_of_episodes)
self.obs_transforms = get_active_obs_transforms(self.config)
self._observation_space = apply_obs_transforms_obs_space(
self.envs.observation_spaces[0], self.obs_transforms
)
self.env_step = [0 for _ in range(self.envs.num_envs)]
self._env_observations = [[] for _ in range(self.envs.num_envs)]
observations = self.envs.reset()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
for i, ep in enumerate(self.envs.current_episodes()):
path_step = self.trajectories[str(ep.episode_id)][0]
self._env_observations[i].append(
(
observations[i],
path_step[0], # prev_action
path_step[2], # oracle_action
)
)
@property
def batch_size(self):
return self.config.IL.batch_size
@property
def observation_space(self) -> Space:
assert self.envs is not None, "Simulator must first be loaded."
assert self._observation_space is not None
return self._observation_space
@property
def action_space(self) -> Space:
assert self.envs is not None, "Simulator must first be loaded."
return self.envs.action_spaces[0]
def close_sims(self):
self.envs.close()
del self.envs
del self._env_observations
self.envs = None
self._env_observations = None
def collect_dataset(self):
r"""Uses the ground truth trajectories to create a teacher forcing
datset for a given split. Loads both guide and follower episodes.
"""
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(split=split)
) as f:
gt_data = json.load(f)
t = (
tqdm.tqdm(gt_data.items(), "GT Collection")
if self.config.use_pbar
else gt_data.items()
)
for episode_id, trajectory in t:
if (
self.config.IL.RECOLLECT_TRAINER.max_traj_len != -1
and len(trajectory["actions"])
> self.config.IL.RECOLLECT_TRAINER.max_traj_len
) or (
self.config.IL.RECOLLECT_TRAINER.min_traj_len != -1
and len(trajectory["actions"])
< self.config.IL.RECOLLECT_TRAINER.min_traj_len
):
continue
for i, action in enumerate(trajectory["actions"]):
prev_action = (
trajectories[episode_id][i - 1][1]
if i
else HabitatSimActions.STOP
)
# [prev_action, action, oracle_action]
trajectories[episode_id].append([prev_action, action, action])
trajectories = dict(list(trajectories.items())[self.rank::self.world_size])
self.config.defrost()
self.config.IL.RECOLLECT_TRAINER.trajectories_file = \
self.config.IL.RECOLLECT_TRAINER.trajectories_file[:-8]+'_w'+ \
str(self.world_size)+'_r'+str(self.rank) + '.json.gz'
self.config.freeze()
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.trajectories_file, "wt"
) as f:
f.write(json.dumps(trajectories))
return trajectories
def _load_next(self):
"""
Episode length is currently not considered. We were previously batching episodes
together with similar lengths. Not sure if we need to bring that back.
"""
# self.rank = 0
if len(self._preload):
# out = self._preload[self.rank]
# self._preload = self._preload[self.world_size:]
# return out
return self._preload.popleft()
while (
len(self._preload) < self.config.IL.RECOLLECT_TRAINER.preload_size
):
current_episodes = self.envs.current_episodes()
prev_eps = current_episodes
# get the next action for each env
actions = [
self.trajectories[str(ep.episode_id)][self.env_step[i]][1]
for i, ep in enumerate(current_episodes)
]
outputs = self.envs.step(actions)
observations, _, dones, _ = [list(x) for x in zip(*outputs)]
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
current_episodes = self.envs.current_episodes()
for i in range(self.envs.num_envs):
self.env_step[i] += 1
if dones[i]:
assert len(self._env_observations[i]) == len(
self.trajectories[str(prev_eps[i].episode_id)]
), "Collected episode does not match the step count of trajectory"
self._preload.append(
(
[o[0] for o in self._env_observations[i]],
[o[1] for o in self._env_observations[i]],
[o[2] for o in self._env_observations[i]],
)
)
self._env_observations[i] = []
self.env_step[i] = 0
path_step = self.trajectories[
str(current_episodes[i].episode_id)
][self.env_step[i]]
self._env_observations[i].append(
(
observations[i],
path_step[0], # prev_action
path_step[2], # oracle_action
)
)
assert (
len(self._env_observations[i])
<= self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS
), "Trajectories should be no more than the maximum episode steps."
# out = self._preload[self.rank]
# self._preload = self._preload[self.world_size:]
# return out
return self._preload.popleft()
def __next__(self):
"""Takes about 1s to once self._load_next() has finished with a batch
size of 5. For this reason, we probably don't need to use extra workers.
"""
x = self._load_next()
obs, prev_actions, oracle_actions = x
# transpose obs
obs_t = defaultdict(list)
for k in obs[0]:
for i in range(len(obs)):
obs_t[k].append(obs[i][k])
obs_t[k] = np.array(obs_t[k])
for k, v in obs_t.items():
obs_t[k] = torch.from_numpy(np.copy(v))
prev_actions = torch.from_numpy(np.copy(prev_actions))
oracle_actions = torch.from_numpy(np.copy(oracle_actions))
inflections = torch.cat(
[
torch.tensor([1], dtype=torch.long),
(oracle_actions[1:] != oracle_actions[:-1]).long(),
]
)
return (
obs_t,
prev_actions,
oracle_actions,
self.inflec_weights[inflections],
)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
assert (
worker_info.num_workers == 1
), "multiple workers not supported."
return self
| 10,692 | 34.88255 | 88 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/utils.py | from typing import Any, Dict, List
import torch
import torch.distributed as dist
import numpy as np
import copy
def extract_instruction_tokens(
observations: List[Dict],
instruction_sensor_uuid: str,
tokens_uuid: str = "tokens",
) -> Dict[str, Any]:
r"""Extracts instruction tokens from an instruction sensor if the tokens
exist and are in a dict structure.
"""
for i in range(len(observations)):
if (
isinstance(observations[i][instruction_sensor_uuid], dict)
and tokens_uuid in observations[i][instruction_sensor_uuid]
):
observations[i][instruction_sensor_uuid] = observations[i][
instruction_sensor_uuid
]["tokens"]
else:
break
return observations
def gather_list_and_concat(list_of_nums,world_size):
if not torch.is_tensor(list_of_nums):
tensor = torch.Tensor(list_of_nums).cuda()
else:
if list_of_nums.is_cuda == False:
tensor = list_of_nums.cuda()
else:
tensor = list_of_nums
gather_t = [torch.ones_like(tensor) for _ in
range(world_size)]
dist.all_gather(gather_t, tensor)
return gather_t
def dis_to_con(path, amount=0.25):
starts = path[:-1]
ends = path[1:]
new_path = [path[0]]
for s, e in zip(starts,ends):
vec = np.array(e) - np.array(s)
ratio = amount/np.linalg.norm(vec[[0,2]])
unit = vec*ratio
times = int(1/ratio)
for i in range(times):
if i != times - 1:
location = np.array(new_path[-1])+unit
new_path.append(location.tolist())
new_path.append(e)
return new_path | 1,716 | 30.218182 | 76 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/base_il_trainer.py | import json
import jsonlines
import os
import time
import warnings
from collections import defaultdict
from typing import Dict, List
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as distr
import torch.multiprocessing as mp
import gzip
import math
from copy import deepcopy
import tqdm
from gym import Space
from habitat import Config, logger
from habitat.utils.visualizations.utils import append_text_to_image
from habitat_baselines.common.base_il_trainer import BaseILTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.measures import Position
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs, generate_video
from habitat_baselines.utils.common import (
get_checkpoint_id,
poll_checkpoint_folder,
)
from habitat_extensions.utils import observations_to_image
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.env_utils import (
construct_envs_auto_reset_false,
construct_envs,
is_slurm_batch_job,
)
from vlnce_baselines.common.utils import *
from habitat_extensions.measures import NDTW
from fastdtw import fastdtw
from ..utils import get_camera_orientations
from ..models.utils import (
length2mask, dir_angle_feature, dir_angle_feature_with_ele,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
class BaseVLNCETrainer(BaseILTrainer):
r"""A base trainer for VLN-CE imitation learning."""
supported_tasks: List[str] = ["VLN-v0"]
def __init__(self, config=None):
super().__init__(config)
self.policy = None
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.obs_transforms = []
self.start_epoch = 0
self.step_id = 0
def _initialize_policy(
self,
config: Config,
load_from_ckpt: bool,
observation_space: Space,
action_space: Space,
) -> None:
policy = baseline_registry.get_policy(self.config.MODEL.policy_name)
self.policy = policy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
''' initialize the waypoint predictor here '''
from waypoint_prediction.TRM_net import BinaryDistPredictor_TRM
self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device)
self.waypoint_predictor.load_state_dict(
torch.load(
'./waypoint_prediction/checkpoints/check_val_best_avg_wayscore',
map_location = torch.device('cpu'),
)['predictor']['state_dict']
)
for param in self.waypoint_predictor.parameters():
param.requires_grad = False
self.policy.to(self.device)
self.waypoint_predictor.to(self.device)
self.num_recurrent_layers = self.policy.net.num_recurrent_layers
if self.config.GPU_NUMBERS > 1:
print('Using', self.config.GPU_NUMBERS,'GPU!')
self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device],
output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
# self.waypoint_predictor = DDP(self.waypoint_predictor.to(self.device), device_ids=[self.device],
# output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
self.optimizer = torch.optim.AdamW(
self.policy.parameters(), lr=self.config.IL.lr,
)
if load_from_ckpt:
ckpt_path = config.IL.ckpt_to_load
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1:
self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device),
device_ids=[self.device], output_device=self.device)
self.policy.load_state_dict(ckpt_dict["state_dict"])
self.policy.net = self.policy.net.module
# self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device),
# device_ids=[self.device], output_device=self.device)
else:
self.policy.load_state_dict(ckpt_dict["state_dict"])
if config.IL.is_requeue:
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
self.start_epoch = ckpt_dict["epoch"] + 1
self.step_id = ckpt_dict["step_id"]
logger.info(f"Loaded weights from checkpoint: {ckpt_path}")
params = sum(param.numel() for param in self.policy.parameters())
params_t = sum(
p.numel() for p in self.policy.parameters() if p.requires_grad
)
logger.info(f"Agent parameters: {params}. Trainable: {params_t}")
logger.info("Finished setting up policy.")
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
return torch.load(checkpoint_path, *args, **kwargs)
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames=None,
):
# pausing envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
recurrent_hidden_states = recurrent_hidden_states[state_index]
not_done_masks = not_done_masks[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
if rgb_frames is not None:
rgb_frames = [rgb_frames[i] for i in state_index]
return (
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames,
)
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object
checkpoint_index: index of the current checkpoint
Returns:
None
"""
if self.local_rank < 1:
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
# config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.DATASET.ROLES = ["guide"]
# config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES
# config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = checkpoint_path
if len(config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json",
)
if os.path.exists(fname):
print("skipping -- evaluation exists.")
return
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=self.traj
)
dataset_length = sum(envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
# prev_actions = torch.zeros(
# envs.num_envs, 1, device=self.device, dtype=torch.long
# )
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
stats_episodes = {}
rgb_frames = [[] for _ in range(envs.num_envs)]
if len(config.VIDEO_OPTION) > 0:
os.makedirs(config.VIDEO_DIR, exist_ok=True)
if config.EVAL.EPISODE_COUNT == -1:
episodes_to_eval = sum(envs.number_of_episodes)
else:
episodes_to_eval = min(
config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes)
)
pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None
log_str = (
f"[Ckpt: {checkpoint_index}]"
" [Episodes evaluated: {evaluated}/{total}]"
" [Time elapsed (s): {time}]"
)
start_time = time.time()
total_weight = 0.
ml_loss = 0.
while envs.num_envs > 0 and len(stats_episodes) < episodes_to_eval:
current_episodes = envs.current_episodes()
positions = []; headings = []
for ob_i in range(len(current_episodes)):
agent_state_i = envs.call_at(ob_i,
"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
elif 'VLNBERT' in self.config.MODEL.policy_name:
# instruction
lang_idx_tokens = batch['instruction']
padding_idx = 0
lang_masks = (lang_idx_tokens != padding_idx)
lang_token_type_ids = torch.zeros_like(lang_masks,
dtype=torch.long, device=self.device)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=lang_masks)
h_t[h_t_flag] = h_t_init[h_t_flag]
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for j, ob in enumerate(observations):
if env_actions[j]['action']['action'] == 0:
continue
else:
envs.call_at(j,
'change_current_path',
{'new_path': ob.pop('positions'),
'collisions': ob.pop('collisions')}
)
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8, device=self.device)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i])
frame = append_text_to_image(
frame, current_episodes[i].instruction.instruction_text
)
rgb_frames[i].append(frame)
if not dones[i]:
continue
info = infos[i]
metric = {}
metric['steps_taken'] = info['steps_taken']
ep_id = str(envs.current_episodes()[i].episode_id)
gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float)
if 'current_path' in envs.current_episodes()[i].info.keys():
positions_ = np.array(envs.current_episodes()[i].info['current_path']).astype(np.float)
collisions_ = np.array(envs.current_episodes()[i].info['collisions'])
assert collisions_.shape[0] == positions_.shape[0] - 1
else:
positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float)
distance = np.array(info['position']['distance']).astype(np.float)
metric['distance_to_goal'] = distance[-1]
metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0.
metric['oracle_success'] = 1. if (distance <= 3.).any() else 0.
metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum()
metric['collisions'] = collisions_.mean()
gt_length = distance[0]
metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length'])
act_con_path = positions_
gt_con_path = np.array(gt_path).astype(np.float)
dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE))
metric['ndtw'] = nDTW
stats_episodes[current_episodes[i].episode_id] = metric
observations[i] = envs.reset_at(i)[0]
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
if config.use_pbar:
pbar.update()
else:
logger.info(
log_str.format(
evaluated=len(stats_episodes),
total=episodes_to_eval,
time=round(time.time() - start_time),
)
)
if len(config.VIDEO_OPTION) > 0:
generate_video(
video_option=config.VIDEO_OPTION,
video_dir=config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics={
"spl": stats_episodes[
current_episodes[i].episode_id
]["spl"]
},
tb_writer=writer,
)
del stats_episodes[current_episodes[i].episode_id][
"top_down_map_vlnce"
]
del stats_episodes[current_episodes[i].episode_id][
"collisions"
]
rgb_frames[i] = []
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if next_episodes[i].episode_id in stats_episodes:
envs_to_pause.append(i)
if 'VLNBERT' in self.config.MODEL.policy_name:
rnn_states = h_t
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings,
batch,
rgb_frames,
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.use_pbar:
pbar.close()
if self.world_size > 1:
distr.barrier()
aggregated_stats = {}
num_episodes = len(stats_episodes)
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum(v[stat_key] for v in stats_episodes.values())
/ num_episodes
)
total = torch.tensor(num_episodes).cuda()
if self.world_size > 1:
dist.reduce(total,dst=0)
total = total.item()
if self.world_size > 1:
logger.info(
f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_stats}")
for k,v in aggregated_stats.items():
v = torch.tensor(v*num_episodes).cuda()
cat_v = gather_list_and_concat(v,self.world_size)
v = (sum(cat_v)/total).item()
aggregated_stats[k] = v
split = config.TASK_CONFIG.DATASET.SPLIT
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json",
)
with open(fname, "w") as f:
json.dump(stats_episodes, f, indent=4)
if self.local_rank < 1:
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{split}.json",
)
with open(fname, "w") as f:
json.dump(aggregated_stats, f, indent=4)
logger.info(f"Episodes evaluated: {total}")
checkpoint_num = checkpoint_index + 1
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.6f}")
writer.add_scalar(f"eval_{split}_{k}", v, checkpoint_num)
def collect_val_traj(self):
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(
split=split)
) as f:
gt_data = json.load(f)
else:
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(
split=split)
) as f:
gt_data = json.load(f)
self.gt_data = gt_data
trajectories = gt_data
self.trajectories = gt_data
trajectories = list(trajectories.keys())[self.config.local_rank::self.config.GPU_NUMBERS]
return trajectories
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
or BaseILTrainer
Returns:
None
"""
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
os.makedirs(self.config.TENSORBOARD_DIR, exist_ok=True)
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
world_size = self.config.GPU_NUMBERS
self.world_size = world_size
self.local_rank = self.config.local_rank
self.config.defrost()
# split = self.config.TASK_CONFIG.DATASET.SPLIT
# self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
# self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['POSITION',
'STEPS_TAKEN',
]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWEVAL'
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.EVAL.LANGUAGES
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.config.EVAL.SPLIT
self.config.use_pbar = not is_slurm_batch_job()
if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
self.config.EVAL.trajectories_file = \
self.config.EVAL.trajectories_file[:-8] + '_w' + \
str(self.world_size) + '_r' + str(self.local_rank) + '.json.gz'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.freeze()
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
# -1
# )
torch.cuda.set_device(self.device)
if world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
torch.cuda.set_device(self.device)
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
#
# if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
self.traj = self.collect_val_traj()
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL_CKPT_PATH_DIR):
# evaluate singe checkpoint
proposed_index = get_checkpoint_id(
self.config.EVAL_CKPT_PATH_DIR
)
if proposed_index is not None:
ckpt_idx = proposed_index
else:
ckpt_idx = 0
self._eval_checkpoint(
self.config.EVAL_CKPT_PATH_DIR,
writer,
checkpoint_index=ckpt_idx,
)
else:
# evaluate multiple checkpoints in order
prev_ckpt_ind = -1
while True:
current_ckpt = None
while current_ckpt is None:
current_ckpt = poll_checkpoint_folder(
self.config.EVAL_CKPT_PATH_DIR, prev_ckpt_ind
)
time.sleep(2) # sleep for 2 secs before polling again
if self.local_rank < 1:
logger.info(f"=======current_ckpt: {current_ckpt}=======")
prev_ckpt_ind += 1
self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=prev_ckpt_ind,
)
def inference(self) -> None:
r"""Runs inference on a single checkpoint, creating a path predictions file."""
checkpoint_path = self.config.INFERENCE.CKPT_PATH
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.INFERENCE.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT
# config.TASK_CONFIG.DATASET.SPLIT = 'val_unseen'
config.TASK_CONFIG.DATASET.ROLES = ["guide"]
config.TASK_CONFIG.DATASET.LANGUAGES = config.INFERENCE.LANGUAGES
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = config.INFERENCE.CKPT_PATH
config.TASK_CONFIG.TASK.MEASUREMENTS = []
config.TASK_CONFIG.TASK.SENSORS = [
s for s in config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s
]
if 'HIGHTOLOW' in config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFER'
# if choosing image
resize_config = config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
task_config = config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(task_config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(task_config.SIMULATOR, camera_template, camera_config)
task_config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
config.TASK_CONFIG = task_config
config.SENSORS = task_config.SIMULATOR.AGENT_0.SENSORS
config.ENV_NAME = "VLNCEInferenceEnv"
config.freeze()
# envs = construct_envs_auto_reset_false(
# config, get_env_class(config.ENV_NAME),
# self.traj
# )
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=None,
)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
episode_predictions = defaultdict(list)
# episode ID --> instruction ID for rxr predictions format
instruction_ids: Dict[str, int] = {}
# populate episode_predictions with the starting state
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_info", {"observations": {}})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = current_episodes[i].episode_id
k = current_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
with tqdm.tqdm(
total=sum(envs.count_episodes()),
desc=f"[inference:{self.config.INFERENCE.SPLIT}]",
) as pbar:
while envs.num_envs > 0:
current_episodes = envs.current_episodes()
positions = []; headings = []
for ob_i in range(len(current_episodes)):
agent_state_i = envs.call_at(ob_i,
"get_info", {"observations": {}})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
elif 'VLNBERT' in self.config.MODEL.policy_name:
# instruction
lang_idx_tokens = batch['instruction']
padding_idx = 0
lang_masks = (lang_idx_tokens != padding_idx)
lang_token_type_ids = torch.zeros_like(lang_masks,
dtype=torch.long, device=self.device)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=lang_masks)
h_t[h_t_flag] = h_t_init[h_t_flag]
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8,
device=self.device,
)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if 'infos' in observations[i].keys():
episode_predictions[current_episodes[i].episode_id] += observations[i].pop('infos')
else:
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_info", {"observations": {}}))
if not dones[i]:
continue
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
observations[i] = envs.reset_at(i)[0]
pbar.update()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if not dones[i]:
continue
if next_episodes[i].episode_id in episode_predictions:
envs_to_pause.append(i)
else:
episode_predictions[next_episodes[i].episode_id].append(
envs.call_at(i, "get_info", {"observations": {}}))
if config.INFERENCE.FORMAT == "rxr":
ep_id = next_episodes[i].episode_id
k = next_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
if 'VLNBERT' in self.config.MODEL.policy_name:
rnn_states = h_t
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings,
batch,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings,
batch,
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.INFERENCE.FORMAT == "r2r":
with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f:
json.dump(episode_predictions, f, indent=2)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
else: # use 'rxr' format for rxr-habitat leaderboard
predictions_out = []
for k,v in episode_predictions.items():
# save only positions that changed
path = [v[0]["position"]]
for p in v[1:]:
if path[-1] != p["position"]:
path.append(p["position"])
predictions_out.append(
{
"instruction_id": instruction_ids[k],
"path": path,
}
)
predictions_out.sort(key=lambda x: x["instruction_id"])
with jsonlines.open(
config.INFERENCE.PREDICTIONS_FILE, mode="w"
) as writer:
writer.write_all(predictions_out)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
| 45,832 | 40.971612 | 112 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/Policy_ViewSelection_CMA.py | import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import Space
from habitat import Config
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
from habitat_baselines.rl.ppo.policy import Net
from habitat_baselines.utils.common import CustomFixedCategorical
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.models.encoders.instruction_encoder import (
InstructionEncoder,
)
from vlnce_baselines.models.encoders.resnet_encoders import (
TorchVisionResNet50,
VlnResnetDepthEncoder
)
from vlnce_baselines.models.policy import ILPolicy
from waypoint_prediction.utils import nms
from vlnce_baselines.models.utils import (
length2mask, angle_feature, dir_angle_feature)
import math
@baseline_registry.register_policy
class PolicyViewSelectionCMA(ILPolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
model_config: Config,
):
super().__init__(
CMANet(
observation_space=observation_space,
model_config=model_config,
num_actions=action_space.n,
),
action_space.n,
)
@classmethod
def from_config(
cls, config: Config, observation_space: Space, action_space: Space
):
config.defrost()
config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_IDS[config.local_rank]
config.freeze()
return cls(
observation_space=observation_space,
action_space=action_space,
model_config=config.MODEL,
)
class CMANet(Net):
r"""A cross-modal attention (CMA) network that contains:
Instruction encoder
Depth encoder
RGB encoder
CMA state encoder
"""
def __init__(
self, observation_space: Space, model_config: Config, num_actions
):
super().__init__()
self.model_config = model_config
model_config.defrost()
model_config.INSTRUCTION_ENCODER.final_state_only = False
model_config.freeze()
device = (
torch.device("cuda", model_config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
# Init the instruction encoder
self.instruction_encoder = InstructionEncoder(
model_config.INSTRUCTION_ENCODER
)
# Init the depth encoder
assert model_config.DEPTH_ENCODER.cnn_type in [
"VlnResnetDepthEncoder"
], "DEPTH_ENCODER.cnn_type must be VlnResnetDepthEncoder"
self.depth_encoder = VlnResnetDepthEncoder(
observation_space,
output_size=model_config.DEPTH_ENCODER.output_size,
checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint,
backbone=model_config.DEPTH_ENCODER.backbone,
spatial_output=model_config.spatial_output,
)
# Init the RGB encoder
assert model_config.RGB_ENCODER.cnn_type in [
"TorchVisionResNet152", "TorchVisionResNet50"
], "RGB_ENCODER.cnn_type must be TorchVisionResNet152 or TorchVisionResNet50"
if model_config.RGB_ENCODER.cnn_type == "TorchVisionResNet50":
self.rgb_encoder = TorchVisionResNet50(
observation_space,
model_config.RGB_ENCODER.output_size,
device,
spatial_output=model_config.spatial_output,
)
hidden_size = model_config.STATE_ENCODER.hidden_size
self._hidden_size = hidden_size
# merging visual inputs
self.rgb_linear = nn.Sequential(
nn.Linear(
model_config.RGB_ENCODER.encode_size,
model_config.RGB_ENCODER.output_size,
),
nn.ReLU(True),
)
if self.depth_encoder.spatial_output:
None
else:
self.depth_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.encode_size,
model_config.DEPTH_ENCODER.output_size,
),
nn.ReLU(True),
)
self.vismerge_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.output_size + model_config.RGB_ENCODER.output_size + model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden,
),
nn.ReLU(True),
)
self.enc_prev_act = nn.Sequential(
nn.Linear(model_config.VISUAL_DIM.directional, model_config.VISUAL_DIM.directional),
nn.Tanh(),
)
# Init the RNN state decoder
self.state_encoder = build_rnn_state_encoder(
input_size=model_config.VISUAL_DIM.vis_hidden + model_config.VISUAL_DIM.directional,
hidden_size=model_config.STATE_ENCODER.hidden_size,
rnn_type=model_config.STATE_ENCODER.rnn_type,
num_layers=1,
)
self.prev_state_vis_attn = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.VISUAL_DIM.vis_hidden,
output_tilde=False
)
self.text_vis_attn = SoftDotAttention(
self.instruction_encoder.output_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.VISUAL_DIM.vis_hidden,
output_tilde=False
)
self.state_text_attn = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size,
self.instruction_encoder.output_size,
self.instruction_encoder.output_size,
output_tilde=False
)
self.state_vis_logits = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size+model_config.VISUAL_DIM.vis_hidden+self.instruction_encoder.output_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.STATE_ENCODER.hidden_size,
output_tilde=False
)
self.register_buffer(
"_scale", torch.tensor(1.0 / ((hidden_size // 2) ** 0.5))
)
self.space_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(start_dim=2),
)
self.train()
@property
def is_blind(self):
return self.rgb_encoder.is_blind or self.depth_encoder.is_blind
@property # trivial argument, just for init with habitat
def output_size(self):
return 1
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def forward(self, mode=None,
waypoint_predictor=None,
observations=None,
instruction=None, text_mask=None,
rnn_states=None,
cand_rgb=None, cand_depth=None,
cand_direction=None, cand_mask=None,
headings=None, masks=None,
post_states=None, in_train=True):
r"""
instruction_embedding: [batch_size x INSTRUCTION_ENCODER.output_size]
depth_embedding: [batch_size x DEPTH_ENCODER.output_size]
rgb_embedding: [batch_size x RGB_ENCODER.output_size]
"""
if mode == 'language':
ctx, all_lang_masks = self.instruction_encoder(observations)
return ctx, all_lang_masks
elif mode == 'waypoint':
batch_size = observations['instruction'].size(0)
''' encoding rgb/depth at all directions ----------------------------- '''
NUM_ANGLES = 120 # 120 angles 3 degrees each
NUM_IMGS = 12
NUM_CLASSES = 12 # 12 distances at each sector
depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1)
rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1)
# reverse the order of input images to clockwise
# single view images in clockwise agrees with the panoramic image
a_count = 0
for i, (k, v) in enumerate(observations.items()):
if 'depth' in k:
for bi in range(v.size(0)):
ra_count = (NUM_IMGS - a_count)%NUM_IMGS
depth_batch[ra_count+bi*NUM_IMGS] = v[bi]
rgb_batch[ra_count+bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi]
a_count += 1
obs_view12 = {}
obs_view12['depth'] = depth_batch
obs_view12['rgb'] = rgb_batch
depth_embedding = self.depth_encoder(obs_view12)
rgb_embedding = self.rgb_encoder(obs_view12)
''' waypoint prediction ----------------------------- '''
waypoint_heatmap_logits = waypoint_predictor(
rgb_embedding, depth_embedding)
# reverse the order of images back to counter-clockwise
rgb_embed_reshape = rgb_embedding.reshape(
batch_size, NUM_IMGS, 2048, 7, 7)
depth_embed_reshape = depth_embedding.reshape(
batch_size, NUM_IMGS, 128, 4, 4)
rgb_feats = torch.cat((
rgb_embed_reshape[:,0:1,:],
torch.flip(rgb_embed_reshape[:,1:,:], [1]),
), dim=1)
depth_feats = torch.cat((
depth_embed_reshape[:,0:1,:],
torch.flip(depth_embed_reshape[:,1:,:], [1]),
), dim=1)
# from heatmap to points
batch_x_norm = torch.softmax(
waypoint_heatmap_logits.reshape(
batch_size, NUM_ANGLES*NUM_CLASSES,
), dim=1
)
batch_x_norm = batch_x_norm.reshape(
batch_size, NUM_ANGLES, NUM_CLASSES,
)
batch_x_norm_wrap = torch.cat((
batch_x_norm[:,-1:,:],
batch_x_norm,
batch_x_norm[:,:1,:]),
dim=1)
batch_output_map = nms(
batch_x_norm_wrap.unsqueeze(1),
max_predictions=5,
sigma=(7.0,5.0))
# predicted waypoints before sampling
batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:]
candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist()
if isinstance(candidate_lengths, int):
candidate_lengths = [candidate_lengths]
max_candidate = max(candidate_lengths) # including stop
cand_mask = length2mask(candidate_lengths, device=self.device)
if in_train:
# Augment waypoint prediction
# parts of heatmap for sampling (fix offset first)
batch_way_heats_regional = torch.cat(
(waypoint_heatmap_logits[:,-waypoint_predictor.HEATMAP_OFFSET:,:],
waypoint_heatmap_logits[:,:-waypoint_predictor.HEATMAP_OFFSET,:],
), dim=1)
batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12)
batch_sample_angle_idxes = []
batch_sample_distance_idxes = []
batch_way_log_prob = []
for j in range(batch_size):
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# clockwise image indexes (same as batch_x_norm)
img_idxes = ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
# heatmap regions for sampling
way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1)
way_heats_probs = F.softmax(way_heats_regional, 1)
probs_c = torch.distributions.Categorical(way_heats_probs)
way_heats_act = probs_c.sample().detach()
sample_angle_idxes = []
sample_distance_idxes = []
for k, way_act in enumerate(way_heats_act):
if img_idxes[k] != 0:
angle_pointer = (img_idxes[k] - 1) * 10 + 5
else:
angle_pointer = 0
sample_angle_idxes.append(way_act//12+angle_pointer)
sample_distance_idxes.append(way_act%12)
batch_sample_angle_idxes.append(sample_angle_idxes)
batch_sample_distance_idxes.append(sample_distance_idxes)
batch_way_log_prob.append(
probs_c.log_prob(way_heats_act))
cand_rgb = torch.zeros(
(batch_size, max_candidate, 2048, 7, 7),
dtype=torch.float32, device=self.device)
cand_depth = torch.zeros(
(batch_size, max_candidate, 128, 4, 4),
dtype=torch.float32, device=self.device)
batch_angles = []
batch_distances = []
batch_img_idxes = []
for j in range(batch_size):
if in_train:
angle_idxes = torch.tensor(batch_sample_angle_idxes[j])
distance_idxes = torch.tensor(batch_sample_distance_idxes[j])
else:
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# distance indexes for candidates
distance_idxes = batch_output_map[j].nonzero()[:, 1]
# 2pi- becoz counter-clockwise is the positive direction
angle_rad = 2*math.pi-angle_idxes.float()/120*2*math.pi
batch_angles.append(angle_rad.tolist())
batch_distances.append(
((distance_idxes + 1)*0.25).tolist())
# counter-clockwise image indexes
img_idxes = 12 - ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
batch_img_idxes.append(img_idxes)
for k in range(len(img_idxes)):
cand_rgb[j][k] = rgb_feats[j][img_idxes[k]]
cand_depth[j][k] = depth_feats[j][img_idxes[k]]
cand_direction = dir_angle_feature(batch_angles).to(self.device)
if in_train:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances #, batch_way_log_prob
else:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances
elif mode == 'navigation':
cand_rgb_feats_pool = self.space_pool(cand_rgb)
rgb_in = self.rgb_linear(cand_rgb_feats_pool)
cand_depth_feats_pool = self.space_pool(cand_depth)
depth_in = self.depth_linear(cand_depth_feats_pool)
vis_in = self.vismerge_linear(
torch.cat((rgb_in, depth_in, cand_direction), dim=2),)
''' aggregate visual features by agent's previous state -------------- '''
prev_state = rnn_states[:, 0:self.state_encoder.num_recurrent_layers].squeeze(1)
vis_prev_state, _ = self.prev_state_vis_attn(
prev_state, vis_in, cand_mask)
''' first state encoder for new visual features '''
prev_actions = angle_feature(headings, device=self.device)
prev_actions = self.enc_prev_act(prev_actions)
state_in = torch.cat([vis_prev_state, prev_actions], dim=1)
rnn_states_out = rnn_states.detach().clone()
(
state,
rnn_states_out[:, 0 : self.state_encoder.num_recurrent_layers],
) = self.state_encoder(
state_in,
rnn_states[:, 0 : self.state_encoder.num_recurrent_layers],
masks,
)
''' language attention using state '''
text_state, _ = self.state_text_attn(
state, instruction, text_mask)
''' visual attention using attended language '''
vis_text_feats, _ = self.text_vis_attn(
text_state, vis_in, cand_mask)
x = torch.cat((state, vis_text_feats, text_state), dim=1)
_, logits = self.state_vis_logits(
x, vis_in, cand_mask, output_prob=False)
return logits, rnn_states_out
class SoftDotAttention(nn.Module):
def __init__(self, q_dim, kv_dim, hidden_dim, output_tilde=False):
'''Initialize layer.'''
super(SoftDotAttention, self).__init__()
self.linear_q = nn.Linear(q_dim, hidden_dim, bias=True)
self.linear_kv = nn.Linear(kv_dim, hidden_dim, bias=True)
self.sm = nn.Softmax(dim=1)
self.output_tilde = output_tilde
if output_tilde:
self.linear_out = nn.Linear(q_dim + hidden_dim, hidden_dim, bias=False)
self.tanh = nn.Tanh()
def forward(self, q, kv, mask=None, output_prob=True):
'''Propagate h through the network.
q: (query) batch x dim
kv: (keys and values) batch x seq_len x dim
mask: batch x seq_len indices to be masked
'''
x_q = self.linear_q(q).unsqueeze(2)
x_kv = self.linear_kv(kv)
attn = torch.bmm(x_kv, x_q).squeeze(2)
logit = attn
if mask is not None:
attn.masked_fill_(mask, -float('inf'))
attn = self.sm(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1))
weighted_x_kv = torch.bmm(attn3, x_kv).squeeze(1)
if not output_prob:
attn = logit
if self.output_tilde:
h_tilde = torch.cat((weighted_x_kv, q), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
else:
return weighted_x_kv, attn
| 18,135 | 38.598253 | 142 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/Policy_ViewSelection_VLNBERT.py | import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import Space
from habitat import Config
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
from habitat_baselines.rl.ppo.policy import Net
from vlnce_baselines.models.vlnbert.vlnbert_init import get_vlnbert_models
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.models.encoders.instruction_encoder import (
InstructionEncoder,
)
from vlnce_baselines.models.encoders.resnet_encoders import (
TorchVisionResNet50,
VlnResnetDepthEncoder,
)
from vlnce_baselines.models.policy import ILPolicy
from waypoint_prediction.utils import nms
from vlnce_baselines.models.utils import (
angle_feature_with_ele, dir_angle_feature_with_ele, length2mask)
import math
@baseline_registry.register_policy
class PolicyViewSelectionVLNBERT(ILPolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
model_config: Config,
):
super().__init__(
VLNBERT(
observation_space=observation_space,
model_config=model_config,
num_actions=action_space.n,
),
action_space.n,
)
@classmethod
def from_config(
cls, config: Config, observation_space: Space, action_space: Space
):
config.defrost()
config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_IDS[config.local_rank]
config.freeze()
return cls(
observation_space=observation_space,
action_space=action_space,
model_config=config.MODEL,
)
class VLNBERT(Net):
def __init__(
self, observation_space: Space, model_config: Config, num_actions,
):
super().__init__()
device = (
torch.device("cuda", model_config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
print('\nInitalizing the VLN-BERT model ...')
self.vln_bert = get_vlnbert_models(config=None)
self.vln_bert.config.directions = 1 # a trivial number, change during nav
layer_norm_eps = self.vln_bert.config.layer_norm_eps
# Init the depth encoder
assert model_config.DEPTH_ENCODER.cnn_type in [
"VlnResnetDepthEncoder"
], "DEPTH_ENCODER.cnn_type must be VlnResnetDepthEncoder"
self.depth_encoder = VlnResnetDepthEncoder(
observation_space,
output_size=model_config.DEPTH_ENCODER.output_size,
checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint,
backbone=model_config.DEPTH_ENCODER.backbone,
spatial_output=model_config.spatial_output,
)
# Init the RGB encoder
assert model_config.RGB_ENCODER.cnn_type in [
"TorchVisionResNet152", "TorchVisionResNet50"
], "RGB_ENCODER.cnn_type must be TorchVisionResNet152 or TorchVisionResNet50"
if model_config.RGB_ENCODER.cnn_type == "TorchVisionResNet50":
self.rgb_encoder = TorchVisionResNet50(
observation_space,
model_config.RGB_ENCODER.output_size,
device,
spatial_output=model_config.spatial_output,
)
# merging visual inputs
self.space_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(start_dim=2),)
self.rgb_linear = nn.Sequential(
nn.Linear(
model_config.RGB_ENCODER.encode_size,
model_config.RGB_ENCODER.output_size,
),
nn.ReLU(True),
)
self.depth_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.encode_size,
model_config.DEPTH_ENCODER.output_size,
),
nn.ReLU(True),
)
self.vismerge_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.output_size + model_config.RGB_ENCODER.output_size + model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden,
),
nn.ReLU(True),
)
self.action_state_project = nn.Sequential(
nn.Linear(model_config.VISUAL_DIM.vis_hidden+model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden),
nn.Tanh())
self.action_LayerNorm = BertLayerNorm(
model_config.VISUAL_DIM.vis_hidden, eps=layer_norm_eps)
self.drop_env = nn.Dropout(p=0.4)
self.train()
@property # trivial argument, just for init with habitat
def output_size(self):
return 1
@property
def is_blind(self):
return self.rgb_encoder.is_blind or self.depth_encoder.is_blind
@property
def num_recurrent_layers(self):
return 1
def forward(self, mode=None,
waypoint_predictor=None,
observations=None,
lang_idx_tokens=None, lang_masks=None,
lang_feats=None, lang_token_type_ids=None,
headings=None,
cand_rgb=None, cand_depth=None,
cand_direction=None, cand_mask=None,
masks=None,
post_states=None, in_train=True):
if mode == 'language':
h_t, language_features = self.vln_bert(
'language', lang_idx_tokens,
attention_mask=lang_masks, lang_mask=lang_masks,)
return h_t, language_features
elif mode == 'waypoint':
batch_size = observations['instruction'].size(0)
''' encoding rgb/depth at all directions ----------------------------- '''
NUM_ANGLES = 120 # 120 angles 3 degrees each
NUM_IMGS = 12
NUM_CLASSES = 12 # 12 distances at each sector
depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1)
rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1)
# reverse the order of input images to clockwise
# single view images in clockwise agrees with the panoramic image
a_count = 0
for i, (k, v) in enumerate(observations.items()):
if 'depth' in k:
for bi in range(v.size(0)):
ra_count = (NUM_IMGS - a_count)%NUM_IMGS
depth_batch[ra_count+bi*NUM_IMGS] = v[bi]
rgb_batch[ra_count+bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi]
a_count += 1
obs_view12 = {}
obs_view12['depth'] = depth_batch
obs_view12['rgb'] = rgb_batch
depth_embedding = self.depth_encoder(obs_view12)
rgb_embedding = self.rgb_encoder(obs_view12)
''' waypoint prediction ----------------------------- '''
waypoint_heatmap_logits = waypoint_predictor(
rgb_embedding, depth_embedding)
# reverse the order of images back to counter-clockwise
rgb_embed_reshape = rgb_embedding.reshape(
batch_size, NUM_IMGS, 2048, 7, 7)
depth_embed_reshape = depth_embedding.reshape(
batch_size, NUM_IMGS, 128, 4, 4)
rgb_feats = torch.cat((
rgb_embed_reshape[:,0:1,:],
torch.flip(rgb_embed_reshape[:,1:,:], [1]),
), dim=1)
depth_feats = torch.cat((
depth_embed_reshape[:,0:1,:],
torch.flip(depth_embed_reshape[:,1:,:], [1]),
), dim=1)
# from heatmap to points
batch_x_norm = torch.softmax(
waypoint_heatmap_logits.reshape(
batch_size, NUM_ANGLES*NUM_CLASSES,
), dim=1
)
batch_x_norm = batch_x_norm.reshape(
batch_size, NUM_ANGLES, NUM_CLASSES,
)
batch_x_norm_wrap = torch.cat((
batch_x_norm[:,-1:,:],
batch_x_norm,
batch_x_norm[:,:1,:]),
dim=1)
batch_output_map = nms(
batch_x_norm_wrap.unsqueeze(1),
max_predictions=5,
sigma=(7.0,5.0))
# predicted waypoints before sampling
batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:]
candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist()
if isinstance(candidate_lengths, int):
candidate_lengths = [candidate_lengths]
max_candidate = max(candidate_lengths) # including stop
cand_mask = length2mask(candidate_lengths, device=self.device)
if in_train:
# Augment waypoint prediction
# parts of heatmap for sampling (fix offset first)
HEATMAP_OFFSET = 5
batch_way_heats_regional = torch.cat(
(waypoint_heatmap_logits[:,-HEATMAP_OFFSET:,:],
waypoint_heatmap_logits[:,:-HEATMAP_OFFSET,:],
), dim=1)
batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12)
batch_sample_angle_idxes = []
batch_sample_distance_idxes = []
for j in range(batch_size):
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# clockwise image indexes (same as batch_x_norm)
img_idxes = ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
# heatmap regions for sampling
way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1)
way_heats_probs = F.softmax(way_heats_regional, 1)
probs_c = torch.distributions.Categorical(way_heats_probs)
way_heats_act = probs_c.sample().detach()
sample_angle_idxes = []
sample_distance_idxes = []
for k, way_act in enumerate(way_heats_act):
if img_idxes[k] != 0:
angle_pointer = (img_idxes[k] - 1) * 10 + 5
else:
angle_pointer = 0
sample_angle_idxes.append(way_act//12+angle_pointer)
sample_distance_idxes.append(way_act%12)
batch_sample_angle_idxes.append(sample_angle_idxes)
batch_sample_distance_idxes.append(sample_distance_idxes)
cand_rgb = torch.zeros(
(batch_size, max_candidate, 2048, 7, 7),
dtype=torch.float32, device=self.device)
cand_depth = torch.zeros(
(batch_size, max_candidate, 128, 4, 4),
dtype=torch.float32, device=self.device)
batch_angles = []; batch_angles_c = []
batch_distances = []
batch_img_idxes = []
for j in range(batch_size):
if in_train:
angle_idxes = torch.tensor(batch_sample_angle_idxes[j])
distance_idxes = torch.tensor(batch_sample_distance_idxes[j])
else:
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# distance indexes for candidates
distance_idxes = batch_output_map[j].nonzero()[:, 1]
# 2pi- becoz counter-clockwise is the positive direction
angle_rad_cc = 2*math.pi-angle_idxes.float()/120*2*math.pi
batch_angles.append(angle_rad_cc.tolist())
angle_rad_c = angle_idxes.float()/120*2*math.pi
batch_angles_c.append(angle_rad_c.tolist())
batch_distances.append(
((distance_idxes + 1)*0.25).tolist())
# counter-clockwise image indexes
img_idxes = 12 - (angle_idxes.cpu().numpy()+5) // 10
img_idxes[img_idxes==12] = 0
batch_img_idxes.append(img_idxes)
for k in range(len(img_idxes)):
cand_rgb[j][k] = rgb_feats[j][img_idxes[k]]
cand_depth[j][k] = depth_feats[j][img_idxes[k]]
# use clockwise angles because of vlnbert pretraining
cand_direction = dir_angle_feature_with_ele(batch_angles_c).to(self.device)
if in_train:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances #, batch_way_log_prob
else:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances
elif mode == 'navigation':
# use clockwise angles because of vlnbert pretraining
headings = [2*np.pi - k for k in headings]
prev_actions = angle_feature_with_ele(headings, device=self.device)
cand_rgb_feats_pool = self.space_pool(cand_rgb)
cand_rgb_feats_pool = self.drop_env(cand_rgb_feats_pool)
cand_depth_feats_pool = self.space_pool(cand_depth)
rgb_in = self.rgb_linear(cand_rgb_feats_pool)
depth_in = self.depth_linear(cand_depth_feats_pool)
vis_in = self.vismerge_linear(
torch.cat((rgb_in, depth_in, cand_direction), dim=2),
)
''' vln-bert processing ------------------------------------- '''
state_action_embed = torch.cat(
(lang_feats[:,0,:], prev_actions), dim=1)
state_with_action = self.action_state_project(state_action_embed)
state_with_action = self.action_LayerNorm(state_with_action)
self.vln_bert.config.directions = cand_rgb.size(1)
state_feats = torch.cat((
state_with_action.unsqueeze(1), lang_feats[:,1:,:]), dim=1)
bert_candidate_mask = (cand_mask == 0)
attention_mask = torch.cat((
lang_masks, bert_candidate_mask), dim=-1)
h_t, logits = self.vln_bert('visual',
state_feats,
attention_mask=attention_mask,
lang_mask=lang_masks, vis_mask=bert_candidate_mask,
img_feats=vis_in)
return logits, h_t
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| 15,286 | 40.204852 | 142 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/utils.py | import math
import torch
def angle_feature(headings, device=None):
heading_enc = torch.zeros(len(headings), 64, dtype=torch.float32)
for i, head in enumerate(headings):
heading_enc[i] = torch.tensor(
[math.sin(head), math.cos(head)] * (64 // 2))
return heading_enc.to(device)
def dir_angle_feature(angle_list, device=None):
feature_dim = 64
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[math.sin(angle_rad),
math.cos(angle_rad)] * (feature_dim // 2))
return heading_enc
def angle_feature_with_ele(headings, device=None):
heading_enc = torch.zeros(len(headings), 128, dtype=torch.float32)
for i, head in enumerate(headings):
heading_enc[i] = torch.tensor(
[
math.sin(head), math.cos(head),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc.to(device)
def dir_angle_feature_with_ele(angle_list, device=None):
feature_dim = 128
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[
math.sin(angle_rad), math.cos(angle_rad),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc
def length2mask(length, size=None, device=None):
batch_size = len(length)
size = int(max(length)) if size is None else size
mask = (torch.arange(size, dtype=torch.int64).unsqueeze(0).repeat(batch_size, 1)
> (torch.LongTensor(length) - 1).unsqueeze(1)).to(device)
return mask | 2,129 | 31.769231 | 84 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/policy.py | import abc
from typing import Any
from habitat_baselines.rl.ppo.policy import Policy
from habitat_baselines.utils.common import (
CategoricalNet,
CustomFixedCategorical,
)
from torch.distributions import Categorical
class ILPolicy(Policy, metaclass=abc.ABCMeta):
def __init__(self, net, dim_actions):
r"""Defines an imitation learning policy as having functions act() and
build_distribution().
"""
super(Policy, self).__init__()
self.net = net
self.dim_actions = dim_actions
# self.action_distribution = CategoricalNet(
# self.net.output_size, self.dim_actions
# )
def forward(self, *x):
raise NotImplementedError
def act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
print('need to revise for CMA and VLNBERT')
import pdb; pdb.set_trace()
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
# if distribution.logit
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
return action, rnn_hidden_states
def get_value(self, *args: Any, **kwargs: Any):
raise NotImplementedError
def evaluate_actions(self, *args: Any, **kwargs: Any):
raise NotImplementedError
def build_distribution(
self, observations, rnn_hidden_states, prev_actions, masks
) -> CustomFixedCategorical:
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
return self.action_distribution(features)
def act2(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
print('need to revise for CMA and VLNBERT')
import pdb; pdb.set_trace()
feature_rgb, feature_depth, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution_rgb = self.action_distribution(feature_rgb)
distribution_depth = self.action_distribution(feature_depth)
probs = (distribution_rgb.probs + distribution_depth.probs)/2
# if distribution.logit
if deterministic:
action = probs.argmax(dim=-1, keepdim=True)
else:
action = Categorical(probs).sample().unsqueeze(-1)
return action, rnn_hidden_states
| 2,642 | 27.419355 | 78 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/encoders/resnet_encoders.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from gym import spaces
from habitat import logger
from habitat_baselines.rl.ddppo.policy import resnet
from habitat_baselines.rl.ddppo.policy.resnet_policy import ResNetEncoder
import torchvision
class VlnResnetDepthEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=128,
checkpoint="NONE",
backbone="resnet50",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=False,
spatial_output: bool = False,
):
super().__init__()
self.visual_encoder = ResNetEncoder(
spaces.Dict({"depth": observation_space.spaces["depth"]}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
if checkpoint != "NONE":
ddppo_weights = torch.load(checkpoint)
weights_dict = {}
for k, v in ddppo_weights["state_dict"].items():
split_layer_name = k.split(".")[2:]
if split_layer_name[0] != "visual_encoder":
continue
layer_name = ".".join(split_layer_name[1:])
weights_dict[layer_name] = v
del ddppo_weights
self.visual_encoder.load_state_dict(weights_dict, strict=True)
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.visual_fc = nn.Sequential(
# nn.Flatten(),
# nn.Linear(
# np.prod(self.visual_encoder.output_shape), output_size
# ),
# nn.ReLU(True),
# )
None
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
if "depth_features" in observations:
x = observations["depth_features"]
else:
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
# return self.visual_fc(x)
return x
class TorchVisionResNet50(nn.Module):
r"""
Takes in observations and produces an embedding of the rgb component.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
device: torch.device
"""
def __init__(
self,
observation_space,
output_size,
device,
spatial_output: bool = False,
):
super().__init__()
self.device = device
self.resnet_layer_size = 2048
linear_layer_input_size = 0
if "rgb" in observation_space.spaces:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
obs_size_0 = observation_space.spaces["rgb"].shape[0]
obs_size_1 = observation_space.spaces["rgb"].shape[1]
if obs_size_0 != 224 or obs_size_1 != 224:
logger.warn(
"TorchVisionResNet50: observation size is not conformant to expected ResNet input size [3x224x224]"
)
linear_layer_input_size += self.resnet_layer_size
else:
self._n_input_rgb = 0
if self.is_blind:
self.cnn = nn.Sequential()
return
rgb_resnet = models.resnet50(pretrained=True)
rgb_modules = list(rgb_resnet.children())[:-2]
self.cnn = torch.nn.Sequential(*rgb_modules)
# disable gradients for resnet, params frozen
for param in self.cnn.parameters():
param.requires_grad = False
self.cnn.eval()
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.fc = nn.Linear(linear_layer_input_size, output_size)
# self.activation = nn.ReLU()
None
else:
class SpatialAvgPool(nn.Module):
def forward(self, x):
x = F.adaptive_avg_pool2d(x, (4, 4))
return x
self.cnn.avgpool = SpatialAvgPool()
self.cnn.fc = nn.Sequential()
self.spatial_embeddings = nn.Embedding(4 * 4, 64)
self.output_shape = (
self.resnet_layer_size + self.spatial_embeddings.embedding_dim,
4,
4,
)
# self.layer_extract = self.cnn._modules.get("avgpool")
from torchvision import transforms
self.rgb_transform = torch.nn.Sequential(
transforms.ConvertImageDtype(torch.float),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
)
@property
def is_blind(self):
return self._n_input_rgb == 0
def forward(self, observations):
r"""Sends RGB observation through the TorchVision ResNet50 pre-trained
on ImageNet. Sends through fully connected layer, activates, and
returns final embedding.
"""
def resnet_forward(observation):
# resnet_output = torch.zeros(
# 1, dtype=torch.float32, device=observation.device
# )
# def hook(m, i, o):
# resnet_output.set_(o)
# output: [BATCH x RESNET_DIM]
# h = self.layer_extract.register_forward_hook(hook)
resnet_output = self.cnn(observation)
# h.remove()
return resnet_output
if "rgb_features" in observations:
resnet_output = observations["rgb_features"]
else:
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH]
rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
rgb_observations = self.rgb_transform(rgb_observations)
# rgb_observations = rgb_observations / 255.0 # normalize RGB
resnet_output = resnet_forward(rgb_observations.contiguous())
if self.spatial_output:
b, c, h, w = resnet_output.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=resnet_output.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([resnet_output, spatial_features], dim=1)#.to(self.device)
else:
# return self.activation(
# self.fc(torch.flatten(resnet_output, 1))
# ) # [BATCH x OUTPUT_DIM]
return resnet_output
| 8,103 | 32.626556 | 119 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/encoders/instruction_encoder.py | import gzip
import json
import torch
import torch.nn as nn
from habitat import Config
class InstructionEncoder(nn.Module):
def __init__(self, config: Config):
r"""An encoder that uses RNN to encode an instruction. Returns
the final hidden state after processing the instruction sequence.
Args:
config: must have
embedding_size: The dimension of each embedding vector
hidden_size: The hidden (output) size
rnn_type: The RNN cell type. Must be GRU or LSTM
final_state_only: Whether or not to return just the final state
"""
super().__init__()
self.config = config
# lang_drop_ratio = 0.50
# self.drop = nn.Dropout(p=lang_drop_ratio)
rnn = nn.GRU if self.config.rnn_type == "GRU" else nn.LSTM
self.encoder_rnn = rnn(
input_size=config.embedding_size,
hidden_size=config.hidden_size,
bidirectional=config.bidirectional,
)
if config.sensor_uuid == "instruction":
if self.config.use_pretrained_embeddings:
self.embedding_layer = nn.Embedding.from_pretrained(
embeddings=self._load_embeddings(),
freeze=not self.config.fine_tune_embeddings,
)
else: # each embedding initialized to sampled Gaussian
self.embedding_layer = nn.Embedding(
num_embeddings=config.vocab_size,
embedding_dim=config.embedding_size,
padding_idx=0,
)
@property
def output_size(self):
return self.config.hidden_size * (1 + int(self.config.bidirectional))
def _load_embeddings(self):
"""Loads word embeddings from a pretrained embeddings file.
PAD: index 0. [0.0, ... 0.0]
UNK: index 1. mean of all R2R word embeddings: [mean_0, ..., mean_n]
why UNK is averaged: https://bit.ly/3u3hkYg
Returns:
embeddings tensor of size [num_words x embedding_dim]
"""
with gzip.open(self.config.embedding_file, "rt") as f:
embeddings = torch.tensor(json.load(f))
return embeddings
def forward(self, observations):
"""
Tensor sizes after computation:
instruction: [batch_size x seq_length]
lengths: [batch_size]
hidden_state: [batch_size x hidden_size]
"""
if self.config.sensor_uuid == "instruction":
instruction = observations["instruction"].long()
lengths = (instruction != 0.0).long().sum(dim=1)
instruction = self.embedding_layer(instruction)
# instruction = self.drop(instruction)
else:
instruction = observations["rxr_instruction"]
lengths = (instruction != 0.0).long().sum(dim=2)
lengths = (lengths != 0.0).long().sum(dim=1)
packed_seq = nn.utils.rnn.pack_padded_sequence(
instruction, lengths.cpu(), batch_first=True, enforce_sorted=False
)
output, final_state = self.encoder_rnn(packed_seq)
if self.config.rnn_type == "LSTM":
final_state = final_state[0]
if self.config.final_state_only: # default False
return final_state.squeeze(0)
else:
ctx = nn.utils.rnn.pad_packed_sequence(output,
batch_first=True)[0].permute(0, 2, 1)
all_lang_masks = (ctx == 0.0).all(dim=1)
ctx = ctx.permute(0, 2, 1)
# ctx = self.drop(ctx)
return ctx, all_lang_masks
| 3,647 | 34.764706 | 79 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/vlnbert/vlnbert_PREVALENT.py | # PREVALENT, 2020, [email protected]
# Modified in Recurrent VLN-BERT, 2020, [email protected]
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import BertPreTrainedModel, BertConfig
import pdb
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except (ImportError, AttributeError) as e:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = True
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertXAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
self.att = BertOutAttention(config, ctx_dim=ctx_dim)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask)
attention_output = self.output(output, input_tensor)
return attention_output, attention_scores
class BertOutAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim =config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class LXRTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# Lang self-att and FFN layer
self.lang_self_att = BertAttention(config)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
# Visn self-att and FFN layer
self.visn_self_att = BertAttention(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
# The cross attention layer
self.visual_attention = BertXAttention(config)
def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
''' Cross Attention -- cross for vision not for language '''
visn_att_output, attention_scores = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)
return visn_att_output, attention_scores
def self_att(self, visn_input, visn_attention_mask):
''' Self Attention -- on visual features with language clues '''
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)
return visn_att_output
def output_fc(self, visn_input):
''' Feed forward '''
visn_inter_output = self.visn_inter(visn_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return visn_output
def forward(self, lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask, tdx):
''' visual self-attention with state '''
visn_att_output = torch.cat((lang_feats[:, 0:1, :], visn_feats), dim=1)
state_vis_mask = torch.cat((lang_attention_mask[:,:,:,0:1], visn_attention_mask), dim=-1)
''' state and vision attend to language '''
visn_att_output, cross_attention_scores = self.cross_att(lang_feats[:, 1:, :], lang_attention_mask[:, :, :, 1:], visn_att_output, state_vis_mask)
language_attention_scores = cross_attention_scores[:, :, 0, :]
state_visn_att_output = self.self_att(visn_att_output, state_vis_mask)
state_visn_output = self.output_fc(state_visn_att_output[0])
visn_att_output = state_visn_output[:, 1:, :]
lang_att_output = torch.cat((state_visn_output[:, 0:1, :], lang_feats[:,1:,:]), dim=1)
visual_attention_scores = state_visn_att_output[1][:, :, 0, 1:]
return lang_att_output, visn_att_output, language_attention_scores, visual_attention_scores
class VisionEncoder(nn.Module):
def __init__(self, vision_size, config):
super().__init__()
feat_dim = vision_size
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visn_input):
feats = visn_input
x = self.visn_fc(feats)
x = self.visn_layer_norm(x)
output = self.dropout(x)
return output
class VLNBert(BertPreTrainedModel):
def __init__(self, config):
super(VLNBert, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.pooler = BertPooler(config)
self.img_dim = config.img_feature_dim # 2176
logger.info('VLNBert Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type # ''
self.vl_layers = config.vl_layers # 4
self.la_layers = config.la_layers # 9
self.lalayer = nn.ModuleList(
[BertLayer(config) for _ in range(self.la_layers)])
self.addlayer = nn.ModuleList(
[LXRTXLayer(config) for _ in range(self.vl_layers)])
# self.vision_encoder = VisionEncoder(self.config.img_feature_dim, self.config)
# self.apply(self.init_weights)
self.init_weights()
# del self.img_dim
# del self.vision_encoder
# del self.embeddings
def forward(self, mode, input_ids, token_type_ids=None,
attention_mask=None, lang_mask=None, vis_mask=None,
position_ids=None, head_mask=None, img_feats=None):
attention_mask = lang_mask
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
if mode == 'language':
''' LXMERT language branch (in VLN only perform this at initialization) '''
embedding_output = self.embeddings(input_ids,
position_ids=position_ids, token_type_ids=token_type_ids)
text_embeds = embedding_output
for layer_module in self.lalayer:
temp_output = layer_module(text_embeds, extended_attention_mask)
text_embeds = temp_output[0]
sequence_output = text_embeds
pooled_output = self.pooler(sequence_output)
return pooled_output, sequence_output
elif mode == 'visual':
''' LXMERT visual branch (no language processing during navigation) '''
text_embeds = input_ids
text_mask = extended_attention_mask
# img_embedding_output = self.vision_encoder(img_feats)
img_embedding_output = img_feats
img_seq_len = img_feats.shape[1]
batch_size = text_embeds.size(0)
img_seq_mask = vis_mask
extended_img_mask = img_seq_mask.unsqueeze(1).unsqueeze(2)
extended_img_mask = extended_img_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_img_mask = (1.0 - extended_img_mask) * -10000.0
img_mask = extended_img_mask
lang_output = text_embeds
visn_output = img_embedding_output
for tdx, layer_module in enumerate(self.addlayer):
lang_output, visn_output, language_attention_scores, visual_attention_scores = layer_module(lang_output, text_mask, visn_output, img_mask, tdx)
sequence_output = lang_output
pooled_output = self.pooler(sequence_output)
visual_action_scores = visual_attention_scores.mean(dim=1)
return pooled_output, visual_action_scores
| 19,050 | 41.811236 | 159 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/obs_transformers.py | import copy
import numbers
from typing import Dict, List, Tuple, Union
import torch
from gym import spaces
from habitat.config import Config
from habitat.core.logging import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.obs_transformers import ObservationTransformer
from habitat_baselines.utils.common import (
center_crop,
get_image_height_width,
overwrite_gym_box_shape,
)
from torch import Tensor
@baseline_registry.register_obs_transformer()
class CenterCropperPerSensor(ObservationTransformer):
"""An observation transformer that center crops your input on a per-sensor basis."""
sensor_crops: Dict[str, Union[int, Tuple[int, int]]]
channels_last: bool
def __init__(
self,
sensor_crops: List[Tuple[str, Union[int, Tuple[int, int]]]],
channels_last: bool = True,
):
"""Args:
size: A sequence (h, w) or int of the size you wish to resize/center_crop.
If int, assumes square crop
channels_list: indicates if channels is the last dimension
trans_keys: The list of sensors it will try to centercrop.
"""
super().__init__()
self.sensor_crops = dict(sensor_crops)
for k in self.sensor_crops:
size = self.sensor_crops[k]
if isinstance(size, numbers.Number):
self.sensor_crops[k] = (int(size), int(size))
assert len(size) == 2, "forced input size must be len of 2 (h, w)"
self.channels_last = channels_last
def transform_observation_space(
self,
observation_space: spaces.Dict,
):
observation_space = copy.deepcopy(observation_space)
for key in observation_space.spaces:
if (
key in self.sensor_crops
and observation_space.spaces[key].shape[-3:-1]
!= self.sensor_crops[key]
):
h, w = get_image_height_width(
observation_space.spaces[key], channels_last=True
)
logger.info(
"Center cropping observation size of %s from %s to %s"
% (key, (h, w), self.sensor_crops[key])
)
observation_space.spaces[key] = overwrite_gym_box_shape(
observation_space.spaces[key], self.sensor_crops[key]
)
return observation_space
@torch.no_grad()
def forward(self, observations: Dict[str, Tensor]) -> Dict[str, Tensor]:
observations.update(
{
sensor: center_crop(
observations[sensor],
self.sensor_crops[sensor],
channels_last=self.channels_last,
)
for sensor in self.sensor_crops
if sensor in observations
}
)
return observations
@classmethod
def from_config(cls, config: Config):
cc_config = config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR
return cls(cc_config.SENSOR_CROPS)
@baseline_registry.register_obs_transformer()
class ResizerPerSensor(ObservationTransformer):
r"""An nn module the resizes images to any aspect ratio.
This module assumes that all images in the batch are of the same size.
"""
def __init__(
self,
sizes: int,
channels_last: bool = True,
trans_keys: Tuple[str] = ("rgb", "depth", "semantic"),
):
super().__init__()
"""Args:
size: The size you want to resize
channels_last: indicates if channels is the last dimension
"""
self.sensor_resizes = dict(sizes)
for k in self.sensor_resizes:
size = self.sensor_resizes[k]
if isinstance(size, numbers.Number):
self.sensor_resizes[k] = (int(size), int(size))
assert len(size) == 2, "forced input size must be len of 2 (h, w)"
self.channels_last = channels_last
def transform_observation_space(
self,
observation_space: spaces.Dict,
):
for key in observation_space.spaces:
if (
key in self.sensor_resizes
and observation_space.spaces[key].shape[-3:-1]
!= self.sensor_resizes[key]
):
h, w = get_image_height_width(
observation_space.spaces[key], channels_last=True
)
logger.info(
"Resizing observation size of %s from %s to %s"
% (key, (h, w), self.sensor_resizes[key])
)
observation_space.spaces[key] = overwrite_gym_box_shape(
observation_space.spaces[key], self.sensor_resizes[key]
)
return observation_space
def _transform_obs(self, obs: torch.Tensor, size) -> torch.Tensor:
img = torch.as_tensor(obs)
no_batch_dim = len(img.shape) == 3
if len(img.shape) < 3 or len(img.shape) > 5:
raise NotImplementedError()
if no_batch_dim:
img = img.unsqueeze(0) # Adds a batch dimension
h, w = get_image_height_width(img, channels_last=self.channels_last)
if self.channels_last:
if len(img.shape) == 4:
# NHWC -> NCHW
img = img.permute(0, 3, 1, 2)
else:
# NDHWC -> NDCHW
img = img.permute(0, 1, 4, 2, 3)
h, w = size
img = torch.nn.functional.interpolate(
img.float(), size=(h, w), mode="area"
).to(dtype=img.dtype)
if self.channels_last:
if len(img.shape) == 4:
# NCHW -> NHWC
img = img.permute(0, 2, 3, 1)
else:
# NDCHW -> NDHWC
img = img.permute(0, 1, 3, 4, 2)
if no_batch_dim:
img = img.squeeze(dim=0) # Removes the batch dimension
return img
@torch.no_grad()
def forward(
self, observations: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
observations.update(
{
sensor: self._transform_obs(
observations[sensor], self.sensor_resizes[sensor])
for sensor in self.sensor_resizes
if sensor in observations
}
)
return observations
@classmethod
def from_config(cls, config: Config):
r_config = config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR
return cls(r_config.SIZES)
| 6,642 | 33.598958 | 88 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/habitat_simulator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Union,
cast,
)
import numpy as np
from gym import spaces
from gym.spaces.box import Box
from numpy import ndarray
if TYPE_CHECKING:
from torch import Tensor
import habitat_sim
from habitat_sim.simulator import MutableMapping, MutableMapping_T
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.core.dataset import Episode
from habitat.core.registry import registry
from habitat.core.simulator import (
AgentState,
Config,
DepthSensor,
Observations,
RGBSensor,
SemanticSensor,
Sensor,
SensorSuite,
ShortestPathPoint,
Simulator,
VisualObservation,
)
from habitat.core.spaces import Space
# inherit habitat-lab/habitat/sims/habitat_simulator/habitat_simulator.py
@registry.register_simulator(name="Sim-v1")
class Simulator(HabitatSim):
r"""Simulator wrapper over habitat-sim
habitat-sim repo: https://github.com/facebookresearch/habitat-sim
Args:
config: configuration for initializing the simulator.
"""
def __init__(self, config: Config) -> None:
super().__init__(config)
def step_without_obs(self,
action: Union[str, int, MutableMapping_T[int, Union[str, int]]],
dt: float = 1.0 / 60.0,):
self._num_total_frames += 1
if isinstance(action, MutableMapping):
return_single = False
else:
action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action})
return_single = True
collided_dict: Dict[int, bool] = {}
for agent_id, agent_act in action.items():
agent = self.get_agent(agent_id)
collided_dict[agent_id] = agent.act(agent_act)
self.__last_state[agent_id] = agent.get_state()
# # step physics by dt
# step_start_Time = time.time()
# super().step_world(dt)
# self._previous_step_time = time.time() - step_start_Time
multi_observations = {}
for agent_id in action.keys():
agent_observation = {}
agent_observation["collided"] = collided_dict[agent_id]
multi_observations[agent_id] = agent_observation
if return_single:
sim_obs = multi_observations[self._default_agent_id]
else:
sim_obs = multi_observations
self._prev_sim_obs = sim_obs
| 2,654 | 27.244681 | 87 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/TRM_net.py | import torch
import torch.nn as nn
import numpy as np
from .utils import get_attention_mask
from .transformer.waypoint_bert import WaypointBert
from pytorch_transformers import BertConfig
class BinaryDistPredictor_TRM(nn.Module):
def __init__(self, hidden_dim=768, n_classes=12, device=None):
super(BinaryDistPredictor_TRM, self).__init__()
self.device = device
self.num_angles = 120
self.num_imgs = 12
self.n_classes = 12 # num of distances
self.TRM_LAYER = 2
self.TRM_NEIGHBOR = 1
self.HEATMAP_OFFSET = 5
self.visual_fc_rgb = nn.Sequential(
nn.Flatten(),
nn.Linear(np.prod([2048,7,7]), hidden_dim),
nn.ReLU(True),
)
self.visual_fc_depth = nn.Sequential(
nn.Flatten(),
nn.Linear(np.prod([128,4,4]), hidden_dim),
nn.ReLU(True),
)
self.visual_merge = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim),
nn.ReLU(True),
)
config = BertConfig()
config.model_type = 'visual'
config.finetuning_task = 'waypoint_predictor'
config.hidden_dropout_prob = 0.3
config.hidden_size = 768
config.num_attention_heads = 12
config.num_hidden_layers = self.TRM_LAYER
self.waypoint_TRM = WaypointBert(config=config)
self.mask = get_attention_mask(
num_imgs=self.num_imgs,
neighbor=self.TRM_NEIGHBOR).to(self.device)
self.vis_classifier = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim,
int(n_classes*(self.num_angles/self.num_imgs))),
)
def forward(self, rgb_feats, depth_feats):
bsi = rgb_feats.size(0) // self.num_imgs
rgb_x = self.visual_fc_rgb(rgb_feats).reshape(
bsi, self.num_imgs, -1)
depth_x = self.visual_fc_depth(depth_feats).reshape(
bsi, self.num_imgs, -1)
vis_x = self.visual_merge(
torch.cat((rgb_x, depth_x), dim=-1)
)
attention_mask = self.mask.repeat(bsi,1,1,1)
vis_rel_x = self.waypoint_TRM(
vis_x, attention_mask=attention_mask
)
vis_logits = self.vis_classifier(vis_rel_x)
vis_logits = vis_logits.reshape(
bsi, self.num_angles, self.n_classes)
# heatmap offset (each image is pointing at the agent's heading)
vis_logits = torch.cat(
(vis_logits[:,self.HEATMAP_OFFSET:,:], vis_logits[:,:self.HEATMAP_OFFSET,:]),
dim=1)
return vis_logits #, vis_rel_x
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| 3,269 | 32.030303 | 89 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/utils.py |
import torch
import numpy as np
import sys
import glob
import json
def neighborhoods(mu, x_range, y_range, sigma, circular_x=True, gaussian=False):
""" Generate masks centered at mu of the given x and y range with the
origin in the centre of the output
Inputs:
mu: tensor (N, 2)
Outputs:
tensor (N, y_range, s_range)
"""
x_mu = mu[:,0].unsqueeze(1).unsqueeze(1)
y_mu = mu[:,1].unsqueeze(1).unsqueeze(1)
# Generate bivariate Gaussians centered at position mu
x = torch.arange(start=0,end=x_range, device=mu.device, dtype=mu.dtype).unsqueeze(0).unsqueeze(0)
y = torch.arange(start=0,end=y_range, device=mu.device, dtype=mu.dtype).unsqueeze(1).unsqueeze(0)
y_diff = y - y_mu
x_diff = x - x_mu
if circular_x:
x_diff = torch.min(torch.abs(x_diff), torch.abs(x_diff + x_range))
if gaussian:
output = torch.exp(-0.5 * ((x_diff/sigma[0])**2 + (y_diff/sigma[1])**2 ))
else:
output = torch.logical_and(
torch.abs(x_diff) <= sigma[0], torch.abs(y_diff) <= sigma[1]
).type(mu.dtype)
return output
def nms(pred, max_predictions=10, sigma=(1.0,1.0), gaussian=False):
''' Input (batch_size, 1, height, width) '''
shape = pred.shape
output = torch.zeros_like(pred)
flat_pred = pred.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48)
supp_pred = pred.clone()
flat_output = output.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48)
for i in range(max_predictions):
# Find and save max over the entire map
flat_supp_pred = supp_pred.reshape((shape[0],-1))
val, ix = torch.max(flat_supp_pred, dim=1)
indices = torch.arange(0,shape[0])
flat_output[indices,ix] = flat_pred[indices,ix]
# Suppression
y = ix / shape[-1]
x = ix % shape[-1]
mu = torch.stack([x,y], dim=1).float()
g = neighborhoods(mu, shape[-1], shape[-2], sigma, gaussian=gaussian)
supp_pred *= (1-g.unsqueeze(1))
output[output < 0] = 0
return output
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def get_attention_mask(num_imgs=12, neighbor=1):
assert neighbor <= 5
mask = np.zeros((num_imgs,num_imgs))
t = np.zeros(num_imgs)
t[:neighbor+1] = np.ones(neighbor+1)
if neighbor != 0:
t[-neighbor:] = np.ones(neighbor)
for ri in range(num_imgs):
mask[ri] = t
t = np.roll(t, 1)
return torch.from_numpy(mask).reshape(1,1,num_imgs,num_imgs).long() | 3,409 | 32.431373 | 101 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/waypoint_bert.py | # Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license.
# Modified in Recurrent VLN-BERT, 2020, [email protected]
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from .pytorch_transformer.modeling_bert import (BertEmbeddings,
BertSelfAttention, BertAttention, BertEncoder, BertLayer,
BertSelfOutput, BertIntermediate, BertOutput,
BertPooler, BertLayerNorm, BertPreTrainedModel,
BertPredictionHeadTransform)
logger = logging.getLogger(__name__)
class VisPosEmbeddings(nn.Module):
def __init__(self, config):
super(VisPosEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(24, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_vis_feats, position_ids=None):
seq_length = input_vis_feats.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_vis_feats.device)
position_ids = position_ids.unsqueeze(0).repeat(input_vis_feats.size(0), 1)
vis_embeddings = input_vis_feats
position_embeddings = self.position_embeddings(position_ids)
embeddings = vis_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
# embeddings = self.dropout(embeddings)
return embeddings
class CaptionBertSelfAttention(BertSelfAttention):
"""
Modified from BertSelfAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertSelfAttention, self).__init__(config)
self.config = config
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
if history_state is not None:
x_states = torch.cat([history_state, hidden_states], dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
else:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
''' language feature only provide Keys and Values '''
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_scores)
return outputs
class CaptionBertAttention(BertAttention):
"""
Modified from BertAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertAttention, self).__init__(config)
self.self = CaptionBertSelfAttention(config)
self.output = BertSelfOutput(config)
self.config = config
def forward(self, input_tensor, attention_mask, head_mask=None,
history_state=None):
''' transformer processing '''
self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state)
''' feed-forward network with residule '''
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class CaptionBertLayer(BertLayer):
"""
Modified from BertLayer to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertLayer, self).__init__(config)
self.attention = CaptionBertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
attention_outputs = self.attention(hidden_states, attention_mask,
head_mask, history_state)
''' feed-forward network with residule '''
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:]
return outputs
class CaptionBertEncoder(BertEncoder):
"""
Modified from BertEncoder to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertEncoder, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# 12 Bert layers
self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)])
self.config = config
def forward(self, hidden_states, attention_mask, head_mask=None,
encoder_history_states=None):
for i, layer_module in enumerate(self.layer):
history_state = None if encoder_history_states is None else encoder_history_states[i] # default None
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i],
history_state)
hidden_states = layer_outputs[0]
if i == self.config.num_hidden_layers - 1:
slang_attention_score = layer_outputs[1]
outputs = (hidden_states, slang_attention_score)
return outputs
class BertImgModel(nn.Module):
""" Expand from BertModel to handle image region features as input
"""
def __init__(self, config):
super(BertImgModel, self).__init__()
self.config = config
# self.vis_pos_embeds = VisPosEmbeddings(config)
self.encoder = CaptionBertEncoder(config)
def forward(self, input_x, attention_mask=None):
extended_attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
''' positional encodings '''
# input_x = self.vis_pos_embeds(input_x)
''' pass to the Transformer layers '''
encoder_outputs = self.encoder(input_x,
extended_attention_mask, head_mask=head_mask)
outputs = (encoder_outputs[0],) + encoder_outputs[1:]
return outputs
class WaypointBert(nn.Module):
"""
Modified from BertForMultipleChoice to support oscar training.
"""
def __init__(self, config=None):
super(WaypointBert, self).__init__()
self.config = config
self.bert = BertImgModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_x, attention_mask=None):
outputs = self.bert(input_x, attention_mask=attention_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
return sequence_output | 8,306 | 37.281106 | 112 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/pytorch_transformer/modeling_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .file_utils import cached_path
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
TF_WEIGHTS_NAME = 'model.ckpt'
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input):
return input
if not six.PY2:
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = ''.join(docstr) + fn.__doc__
return fn
return docstring_decorator
else:
# Not possible to update class docstrings on python2
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
class PretrainedConfig(object):
""" Base class for all configuration classes.
Handle a few common parameters and methods for loading/downloading/saving configurations.
"""
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
self.finetuning_task = kwargs.pop('finetuning_task', None)
self.num_labels = kwargs.pop('num_labels', 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.torchscript = kwargs.pop('torchscript', False)
def save_pretrained(self, save_directory):
""" Save a configuration object to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r""" Instantiate a PretrainedConfig from a pre-trained model configuration.
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a saved configuration `file`.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**return_unused_kwargs**: (`optional`) bool:
- If False, then this function returns just the final configuration object.
- If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes:
ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
**kwargs**: (`optional`) dict:
Dictionary of key/value pairs with which to update the configuration object after loading.
- The values in kwargs of any keys which are configuration attributes will be used
to override the loaded values.
- Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
>>> config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
>>> config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
>>> config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
>>> assert config.output_attention == True
>>> config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
>>> foo=False, return_unused_kwargs=True)
>>> assert config.output_attention == True
>>> assert unused_kwargs == {'foo': False}
"""
cache_dir = kwargs.pop('cache_dir', None)
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
else:
config_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_config_archive_map.keys()),
config_file))
return None
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = cls.from_json_file(resolved_config_file)
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", config)
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PreTrainedModel(nn.Module):
""" Base class for all models. Handle loading/storing model config and
a simple interface for dowloading and loading pretrained models.
"""
config_class = PretrainedConfig
pretrained_model_archive_map = {}
load_tf_weights = lambda model, config, path: None
base_model_prefix = ""
input_embeddings = None
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
# Save config in model
self.config = config
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self.init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: does nothing and just returns a pointer to the input tokens Embedding Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embedding Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
if hasattr(self, 'tie_weights'):
self.tie_weights()
return model_embeds
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Args:
heads_to_prune: dict of {layer_num (int): list of heads to prune in this layer (list of int)}
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model with its configuration file to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model it-self if we are using distributed training
model_to_save = self.module if hasattr(self, 'module') else self
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are desactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a tensorflow index checkpoint `file` (e.g. `./tf_model/model.ckpt.index`).
In this case, ``from_tf`` should be set to True and a configuration object should be
provided as `config` argument. This loading option is slower than converting the TensorFlow
checkpoint in a PyTorch model using the provided conversion scripts and loading
the PyTorch model afterwards.
**model_args**: (`optional`) Sequence:
All remaning positional arguments will be passed to the underlying model's __init__ function
**config**: an optional configuration for the model to use instead of an automatically loaded configuation.
Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with a `shortcut name` of a pre-trained model), or
- the model was saved using the `save_pretrained(save_directory)` (loaded by suppling the save directory).
**state_dict**: an optional state dictionnary for the model to use instead of a state dictionary loaded
from saved weights file.
This option can be used if you want to create a model from a pretrained configuraton but load your own weights.
In this case though, you should check if using `save_pretrained(dir)` and `from_pretrained(save_directory)` is not
a simpler option.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**output_loading_info**: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
**kwargs**: (`optional`) dict:
Dictionary of key, values to update the configuration object after loading.
Can be used to override selected configuration parameters. E.g. ``output_attention=True``.
- If a configuration is provided with `config`, **kwargs will be directly passed
to the underlying model's __init__ method.
- If a configuration is not provided, **kwargs will be first passed to the pretrained
model configuration class loading function (`PretrainedConfig.from_pretrained`).
Each key of **kwargs that corresponds to a configuration attribute
will be used to override said attribute with the supplied **kwargs value.
Remaining keys that do not correspond to any configuration attribute will
be passed to the underlying model's __init__ function.
Examples::
>>> model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
>>> assert model.config.output_attention == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop('config', None)
state_dict = kwargs.pop('state_dict', None)
cache_dir = kwargs.pop('cache_dir', None)
from_tf = kwargs.pop('from_tf', False)
output_loading_info = kwargs.pop('output_loading_info', False)
# Load config
if config is None:
config, model_kwargs = cls.config_class.from_pretrained(
pretrained_model_name_or_path, *model_args,
cache_dir=cache_dir, return_unused_kwargs=True,
**kwargs
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_model_archive_map.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Load from a PyTorch state_dict
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ''
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
start_prefix = cls.base_model_prefix + '.'
if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
print(" Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
print(" Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
if hasattr(model, 'tie_weights'):
model.tie_weights() # make sure word embedding weights are still tied
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
return model, loading_info
return model
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super(PoolerAnswerClass, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super(SQuADHead, self).__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(self, hidden_states, start_positions=None, end_positions=None,
cls_index=None, is_impossible=None, p_mask=None):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config):
super(SequenceSummary, self).__init__()
self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last'
if config.summary_type == 'attn':
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, 'summary_use_proj') and config.summary_use_proj:
if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = Identity()
if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh':
self.activation = nn.Tanh()
self.first_dropout = Identity()
if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, token_ids=None):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
token_ids: [optional] index of the classification token if summary_type == 'token_ids',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'token_ids' and token_ids is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = hidden_states.mean(dim=1)
elif self.summary_type == 'token_ids':
if token_ids is None:
token_ids = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
else:
token_ids = token_ids.unsqueeze(-1).unsqueeze(-1)
token_ids = token_ids.expand((-1,) * (token_ids.dim()-1) + (hidden_states.size(-1),))
# shape of token_ids: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, token_ids).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == 'attn':
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| 44,611 | 48.513873 | 157 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/pytorch_transformer/modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
prune_linear_layer, add_start_docstrings)
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
}
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(PretrainedConfig):
r"""
:class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.key = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.value = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertModel(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMaskedLM(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, masked_lm_labels=input_ids)
>>> loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention is they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForNextSentencePrediction(BertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForNextSentencePrediction(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForSequenceClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING)
class BertForMultipleChoice(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMultipleChoice(config)
>>> choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMultipleChoice, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForTokenClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForTokenClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, scores = outputs[:2]
"""
def __init__(self, config):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForQuestionAnswering(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 67,047 | 52.382166 | 187 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/pytorch_transformer/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| 8,876 | 33.142308 | 98 | py |
Synthetic2Realistic | Synthetic2Realistic-master/options/base_options.py | import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
# basic define
self.parser.add_argument('--name', type=str, default='experiment_name',
help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints',
help='models are save here')
self.parser.add_argument('--which_epoch', type=str, default='latest',
help='which epoch to load')
self.parser.add_argument('--gpu_ids', type=str, default='0',
help='gpu ids: e.g. 0, 1, 2 use -1 for CPU')
self.parser.add_argument('--model', type=str, default='wsupervised',
help='choose which model to use, [supervised] | [wsupervised]')
# data pattern define
self.parser.add_argument('--img_source_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainA_SYN10.txt',
help='training and testing dataset for source domain')
self.parser.add_argument('--img_target_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainA.txt',
help='training and testing dataser for target domain')
self.parser.add_argument('--lab_source_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainC_SYN10.txt',
help='training label for source domain')
self.parser.add_argument('--lab_target_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainC.txt',
help='training label for target domain')
self.parser.add_argument('--dataset_mode', type=str, default='paired',
help='chooses how datasets are loaded. [paired| unpaired]')
self.parser.add_argument('--loadSize', type=list, default=[640, 192],
help='load image into same size [256, 192]|[640, 192]')
self.parser.add_argument('--flip', action='store_true',
help='if specified, do flip the image for data augmentation')
self.parser.add_argument('--scale_rate', type=float, default=0,
help='scale images with same rate')
self.parser.add_argument('--rotation', action='store_true',
help='if specified, rotate the images for data augmentation')
self.parser.add_argument('--crop', action='store_true',
help='if specified, crop the images for data augmentation')
self.parser.add_argument('--batchSize', type=int, default=6,
help='input batch size')
self.parser.add_argument('--nThreads', type=int, default=2,
help='# threads for loading data')
self.parser.add_argument('--shuffle', action='store_true',
help='if true, takes images randomly')
# network structure define
self.parser.add_argument('--image_nc', type=int, default=3,
help='# of input image channels')
self.parser.add_argument('--label_nc', type=int, default=1,
help='# of output label channels')
self.parser.add_argument('--ngf', type=int, default=64,
help='# of encoder filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64,
help='# of discriminator filter in first conv layer')
self.parser.add_argument('--image_feature', type=int, default=512,
help='the max channels for image features')
self.parser.add_argument('--num_D', type=int, default=1,
help='# of number of the discriminator')
self.parser.add_argument('--transform_layers', type=int, default=9,
help='# of number of the down sample layers for transform network')
self.parser.add_argument('--task_layers', type=int, default=4,
help='# of number of the down sample layers for task network')
self.parser.add_argument('--image_D_layers', type=int, default=3,
help='# of number of the down layers for image discriminator')
self.parser.add_argument('--feature_D_layers', type=int, default=2,
help='# of number of the layers for features discriminator')
self.parser.add_argument('--task_model_type', type=str, default='UNet',
help='select model for task network [UNet] |[ResNet]')
self.parser.add_argument('--trans_model_type', type=str, default='ResNet',
help='select model for transform network [UNet] |[ResNet]')
self.parser.add_argument('--norm', type=str, default='batch',
help='batch normalization or instance normalization')
self.parser.add_argument('--activation', type=str, default='PReLU',
help='ReLu, LeakyReLU, PReLU, or SELU')
self.parser.add_argument('--init_type', type=str, default='kaiming',
help='network initialization [normal|xavier|kaiming]')
self.parser.add_argument('--drop_rate', type=float, default=0,
help='# of drop rate')
self.parser.add_argument('--U_weight', type=float, default=0.1,
help='weight for Unet')
# display parameter define
self.parser.add_argument('--display_winsize', type=int, default=256,
help='display window size')
self.parser.add_argument('--display_id', type=int, default=1,
help='display id of the web')
self.parser.add_argument('--display_port', type=int, default=8097,
help='visidom port of the web display')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0,
help='if positive, display all images in a single visidom web panel')
def parse(self):
if not self.initialized:
self.initialize()
self.opt=self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >=0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids):
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('--------------Options--------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('----------------End----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
if self.opt.isTrain:
file_name = os.path.join(expr_dir, 'train_opt.txt')
else:
file_name = os.path.join(expr_dir, 'test_opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('--------------Options--------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('----------------End----------------\n')
return self.opt | 7,866 | 57.708955 | 125 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/image_pool.py | import random
import torch
from torch.autograd import Variable
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return Variable(images)
return_images = []
for image in images:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs += 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0,1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
| 1,083 | 29.111111 | 67 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/task.py | import torch
import torch.nn.functional as F
###################################################################
# depth function
###################################################################
# calculate the loss
def rec_loss(pred, truth):
mask = truth == -1
mask = mask.float()
errors = torch.abs(pred - truth) * (1.0-mask)
# batch_max = 0.2 * torch.max(errors).data[0]
batch_max = 0.0 * torch.max(errors).item()
if batch_max == 0:
return torch.mean(errors)
errors_mask = errors < batch_max
errors_mask = errors_mask.float()
sqerrors = (errors ** 2 + batch_max*batch_max) / (2*batch_max)
return torch.mean(errors*errors_mask + sqerrors*(1-errors_mask))
def scale_pyramid(img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(1, num_scales):
ratio = 2**i
nh = h // ratio
nw = w // ratio
scaled_img = F.upsample(img, size=(nh, nw), mode='bilinear', align_corners=True)
scaled_imgs.append(scaled_img)
scaled_imgs.reverse()
return scaled_imgs
def gradient_x(img):
gx = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx
def gradient_y(img):
gy = img[:, :, :, :-1] - img[:, :, :, 1:]
return gy
# calculate the gradient loss
def get_smooth_weight(depths, Images, num_scales):
depth_gradient_x = [gradient_x(d) for d in depths]
depth_gradient_y = [gradient_y(d) for d in depths]
Image_gradient_x = [gradient_x(img) for img in Images]
Image_gradient_y = [gradient_y(img) for img in Images]
weight_x = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in Image_gradient_x]
weight_y = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in Image_gradient_y]
smoothness_x = [depth_gradient_x[i] * weight_x[i] for i in range(num_scales)]
smoothness_y = [depth_gradient_y[i] * weight_y[i] for i in range(num_scales)]
loss_x = [torch.mean(torch.abs(smoothness_x[i]))/2**i for i in range(num_scales)]
loss_y = [torch.mean(torch.abs(smoothness_y[i]))/2**i for i in range(num_scales)]
return sum(loss_x+loss_y) | 2,150 | 27.302632 | 96 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/base_model.py | import os
import torch
from collections import OrderedDict
from util import util
class BaseModel():
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self, input):
self.input = input
# update learning rate
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return training loss
def get_current_errors(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = getattr(self, 'loss_' + name).item()
return errors_ret
# return visualization images
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_ret[name] = util.tensor2im(value[-1].data)
else:
visual_ret[name] = util.tensor2im(value.data)
return visual_ret
# save models
def save_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (which_epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net_' + name)
torch.save(net.cpu().state_dict(), save_path)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
net.cuda()
# load models
def load_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (which_epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net_'+name)
net.load_state_dict(torch.load(save_path))
if not self.isTrain:
net.eval() | 2,424 | 33.642857 | 71 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/network.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torchvision import models
import torch.nn.functional as F
from torch.optim import lr_scheduler
######################################################################################
# Functions
######################################################################################
def get_norm_layer(norm_type='batch'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_nonlinearity_layer(activation_type='PReLU'):
if activation_type == 'ReLU':
nonlinearity_layer = nn.ReLU(True)
elif activation_type == 'SELU':
nonlinearity_layer = nn.SELU(True)
elif activation_type == 'LeakyReLU':
nonlinearity_layer = nn.LeakyReLU(0.1, True)
elif activation_type == 'PReLU':
nonlinearity_layer = nn.PReLU()
else:
raise NotImplementedError('activation layer [%s] is not found' % activation_type)
return nonlinearity_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch+1+1+opt.epoch_count-opt.niter) / float(opt.niter_decay+1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'exponent':
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
else:
raise NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.uniform_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('total number of parameters: %.3f M' % (num_params / 1e6))
def init_net(net, init_type='normal', gpu_ids=[]):
print_network(net)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net = torch.nn.DataParallel(net, gpu_ids)
net.cuda()
init_weights(net, init_type)
return net
def _freeze(*args):
for module in args:
if module:
for p in module.parameters():
p.requires_grad = False
def _unfreeze(*args):
for module in args:
if module:
for p in module.parameters():
p.requires_grad = True
# define the generator(transform, task) network
def define_G(input_nc, output_nc, ngf=64, layers=4, norm='batch', activation='PReLU', model_type='UNet',
init_type='xavier', drop_rate=0, add_noise=False, gpu_ids=[], weight=0.1):
if model_type == 'ResNet':
net = _ResGenerator(input_nc, output_nc, ngf, layers, norm, activation, drop_rate, add_noise, gpu_ids)
elif model_type == 'UNet':
net = _UNetGenerator(input_nc, output_nc, ngf, layers, norm, activation, drop_rate, add_noise, gpu_ids, weight)
# net = _PreUNet16(input_nc, output_nc, ngf, layers, True, norm, activation, drop_rate, gpu_ids)
else:
raise NotImplementedError('model type [%s] is not implemented', model_type)
return init_net(net, init_type, gpu_ids)
# define the discriminator network
def define_D(input_nc, ndf = 64, n_layers = 3, num_D = 1, norm = 'batch', activation = 'PReLU', init_type='xavier', gpu_ids = []):
net = _MultiscaleDiscriminator(input_nc, ndf, n_layers, num_D, norm, activation, gpu_ids)
return init_net(net, init_type, gpu_ids)
# define the feature discriminator network
def define_featureD(input_nc, n_layers=2, norm='batch', activation='PReLU', init_type='xavier', gpu_ids=[]):
net = _FeatureDiscriminator(input_nc, n_layers, norm, activation, gpu_ids)
return init_net(net, init_type, gpu_ids)
######################################################################################
# Basic Operation
######################################################################################
class GaussianNoiseLayer(nn.Module):
def __init__(self):
super(GaussianNoiseLayer, self).__init__()
def forward(self, x):
if self.training == False:
return x
noise = Variable((torch.randn(x.size()).cuda(x.data.get_device()) - 0.5) / 10.0)
return x+noise
class _InceptionBlock(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), width=1, drop_rate=0, use_bias=False):
super(_InceptionBlock, self).__init__()
self.width = width
self.drop_rate = drop_rate
for i in range(width):
layer = nn.Sequential(
nn.ReflectionPad2d(i*2+1),
nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=0, dilation=i*2+1, bias=use_bias)
)
setattr(self, 'layer'+str(i), layer)
self.norm1 = norm_layer(output_nc * width)
self.norm2 = norm_layer(output_nc)
self.nonlinearity = nonlinearity
self.branch1x1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(output_nc * width, output_nc, kernel_size=3, padding=0, bias=use_bias)
)
def forward(self, x):
result = []
for i in range(self.width):
layer = getattr(self, 'layer'+str(i))
result.append(layer(x))
output = torch.cat(result, 1)
output = self.nonlinearity(self.norm1(output))
output = self.norm2(self.branch1x1(output))
if self.drop_rate > 0:
output = F.dropout(output, p=self.drop_rate, training=self.training)
return self.nonlinearity(output+x)
class _EncoderBlock(nn.Module):
def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_EncoderBlock, self).__init__()
model = [
nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(middle_nc),
nonlinearity,
nn.Conv2d(middle_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _DownBlock(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_DownBlock, self).__init__()
model = [
nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity,
nn.MaxPool2d(kernel_size=2, stride=2),
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _ShuffleUpBlock(nn.Module):
def __init__(self, input_nc, up_scale, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_ShuffleUpBlock, self).__init__()
model = [
nn.Conv2d(input_nc, input_nc*up_scale**2, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.PixelShuffle(up_scale),
nonlinearity,
nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _DecoderUpBlock(nn.Module):
def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_DecoderUpBlock, self).__init__()
model = [
nn.ReflectionPad2d(1),
nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=0, bias=use_bias),
norm_layer(middle_nc),
nonlinearity,
nn.ConvTranspose2d(middle_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _OutputBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=3, use_bias=False):
super(_OutputBlock, self).__init__()
model = [
nn.ReflectionPad2d(int(kernel_size/2)),
nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, padding=0, bias=use_bias),
nn.Tanh()
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
######################################################################################
# Network structure
######################################################################################
class _ResGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, norm='batch', activation='PReLU', drop_rate=0, add_noise=False, gpu_ids=[]):
super(_ResGenerator, self).__init__()
self.gpu_ids = gpu_ids
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
encoder = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nonlinearity
]
n_downsampling = 2
mult = 1
for i in range(n_downsampling):
mult_prev = mult
mult = min(2 ** (i+1), 2)
encoder += [
_EncoderBlock(ngf * mult_prev, ngf*mult, ngf*mult, norm_layer, nonlinearity, use_bias),
nn.AvgPool2d(kernel_size=2, stride=2)
]
mult = min(2 ** n_downsampling, 2)
for i in range(n_blocks-n_downsampling):
encoder +=[
_InceptionBlock(ngf*mult, ngf*mult, norm_layer=norm_layer, nonlinearity=nonlinearity, width=1,
drop_rate=drop_rate, use_bias=use_bias)
]
decoder = []
if add_noise:
decoder += [GaussianNoiseLayer()]
for i in range(n_downsampling):
mult_prev = mult
mult = min(2 ** (n_downsampling - i -1), 2)
decoder +=[
_DecoderUpBlock(ngf*mult_prev, ngf*mult_prev, ngf*mult, norm_layer, nonlinearity, use_bias),
]
decoder +=[
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
nn.Tanh()
]
self.encoder = nn.Sequential(*encoder)
self.decoder = nn.Sequential(*decoder)
def forward(self, input):
feature = self.encoder(input)
result = [feature]
output = self.decoder(feature)
result.append(output)
return result
class _PreUNet16(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, layers=5, pretrained=False, norm ='batch', activation='PReLu',
drop_rate=0, gpu_ids=[]):
super(_PreUNet16, self).__init__()
self.gpu_ids = gpu_ids
self.layers = layers
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
encoder = models.vgg16(pretrained=pretrained).features
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(encoder[0], self.relu, encoder[2], self.relu)
self.conv2 = nn.Sequential(encoder[5], self.relu, encoder[7], self.relu)
self.conv3 = nn.Sequential(encoder[10], self.relu, encoder[12], self.relu, encoder[14], self.relu)
self.conv4 = nn.Sequential(encoder[17], self.relu, encoder[19], self.relu, encoder[21], self.relu)
for i in range(layers - 4):
conv = _EncoderBlock(ngf * 8, ngf * 8, ngf * 8, norm_layer, nonlinearity, use_bias)
setattr(self, 'down' + str(i), conv.model)
center = []
for i in range(7 - layers):
center += [
_InceptionBlock(ngf * 8, ngf * 8, norm_layer, nonlinearity, 7 - layers, drop_rate, use_bias)
]
center += [_DecoderUpBlock(ngf * 8, ngf * 8, ngf * 4, norm_layer, nonlinearity, use_bias)]
for i in range(layers - 4):
upconv = _DecoderUpBlock(ngf * (8 + 4), ngf * 8, ngf * 4, norm_layer, nonlinearity, use_bias)
setattr(self, 'up' + str(i), upconv.model)
self.deconv4 = _DecoderUpBlock(ngf * (4 + 4), ngf * 8, ngf * 2, norm_layer, nonlinearity, use_bias)
self.deconv3 = _DecoderUpBlock(ngf * (2 + 2) + output_nc, ngf * 4, ngf, norm_layer, nonlinearity, use_bias)
self.deconv2 = _DecoderUpBlock(ngf * (1 + 1) + output_nc, ngf * 2, int(ngf / 2), norm_layer, nonlinearity, use_bias)
self.deconv1 = _OutputBlock(int(ngf / 2) + output_nc, output_nc, kernel_size=7, use_bias=use_bias)
self.output4 = _OutputBlock(ngf * (4 + 4), output_nc, kernel_size=3, use_bias=use_bias)
self.output3 = _OutputBlock(ngf * (2 + 2) + output_nc, output_nc, kernel_size=3, use_bias=use_bias)
self.output2 = _OutputBlock(ngf * (1 + 1) + output_nc, output_nc, kernel_size=3, use_bias=use_bias)
self.center = nn.Sequential(*center)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
def forward(self, input):
conv1 = self.pool(self.conv1(input))
conv2 = self.pool(self.conv2(conv1))
conv3 = self.pool(self.conv3(conv2))
center_in = self.pool(self.conv4(conv3))
middle = [center_in]
for i in range(self.layers - 4):
model = getattr(self, 'down' + str(i))
center_in = self.pool(model(center_in))
middle.append(center_in)
result = [center_in]
center_out = self.center(center_in)
for i in range(self.layers - 4):
model = getattr(self, 'up' + str(i))
center_out = model(torch.cat([center_out, middle[self.layers - 4 - i]], 1))
deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * 0.1], 1))
output4 = self.output4.forward(torch.cat([center_out, conv3 * 0.1], 1))
result.append(output4)
deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * 0.05, self.upsample(output4)], 1))
output3 = self.output3.forward(torch.cat([deconv4, conv2 * 0.05, self.upsample(output4)], 1))
result.append(output3)
deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * 0.01, self.upsample(output3)], 1))
output2 = self.output2.forward(torch.cat([deconv3, conv1 * 0.01, self.upsample(output3)], 1))
result.append(output2)
output1 = self.deconv1.forward(torch.cat([deconv2, self.upsample(output2)], 1))
result.append(output1)
return result
class _UNetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, layers=4, norm='batch', activation='PReLU', drop_rate=0, add_noise=False, gpu_ids=[],
weight=0.1):
super(_UNetGenerator, self).__init__()
self.gpu_ids = gpu_ids
self.layers = layers
self.weight = weight
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
# encoder part
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv1 = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nonlinearity
)
self.conv2 = _EncoderBlock(ngf, ngf*2, ngf*2, norm_layer, nonlinearity, use_bias)
self.conv3 = _EncoderBlock(ngf*2, ngf*4, ngf*4, norm_layer, nonlinearity, use_bias)
self.conv4 = _EncoderBlock(ngf*4, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias)
for i in range(layers-4):
conv = _EncoderBlock(ngf*8, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias)
setattr(self, 'down'+str(i), conv.model)
center=[]
for i in range(7-layers):
center +=[
_InceptionBlock(ngf*8, ngf*8, norm_layer, nonlinearity, 7-layers, drop_rate, use_bias)
]
center += [
_DecoderUpBlock(ngf*8, ngf*8, ngf*4, norm_layer, nonlinearity, use_bias)
]
if add_noise:
center += [GaussianNoiseLayer()]
self.center = nn.Sequential(*center)
for i in range(layers-4):
upconv = _DecoderUpBlock(ngf*(8+4), ngf*8, ngf*4, norm_layer, nonlinearity, use_bias)
setattr(self, 'up' + str(i), upconv.model)
self.deconv4 = _DecoderUpBlock(ngf*(4+4), ngf*8, ngf*2, norm_layer, nonlinearity, use_bias)
self.deconv3 = _DecoderUpBlock(ngf*(2+2)+output_nc, ngf*4, ngf, norm_layer, nonlinearity, use_bias)
self.deconv2 = _DecoderUpBlock(ngf*(1+1)+output_nc, ngf*2, int(ngf/2), norm_layer, nonlinearity, use_bias)
self.output4 = _OutputBlock(ngf*(4+4), output_nc, 3, use_bias)
self.output3 = _OutputBlock(ngf*(2+2)+output_nc, output_nc, 3, use_bias)
self.output2 = _OutputBlock(ngf*(1+1)+output_nc, output_nc, 3, use_bias)
self.output1 = _OutputBlock(int(ngf/2)+output_nc, output_nc, 7, use_bias)
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
def forward(self, input):
conv1 = self.pool(self.conv1(input))
conv2 = self.pool(self.conv2.forward(conv1))
conv3 = self.pool(self.conv3.forward(conv2))
center_in = self.pool(self.conv4.forward(conv3))
middle = [center_in]
for i in range(self.layers-4):
model = getattr(self, 'down'+str(i))
center_in = self.pool(model.forward(center_in))
middle.append(center_in)
center_out = self.center.forward(center_in)
result = [center_in]
for i in range(self.layers-4):
model = getattr(self, 'up'+str(i))
center_out = model.forward(torch.cat([center_out, middle[self.layers-5-i]], 1))
deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * self.weight], 1))
output4 = self.output4.forward(torch.cat([center_out, conv3 * self.weight], 1))
result.append(output4)
deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
output3 = self.output3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
result.append(output3)
deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
output2 = self.output2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
result.append(output2)
output1 = self.output1.forward(torch.cat([deconv2, self.upsample(output2)], 1))
result.append(output1)
return result
class _MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, num_D=1, norm='batch', activation='PReLU', gpu_ids=[]):
super(_MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.gpu_ids = gpu_ids
for i in range(num_D):
netD = _Discriminator(input_nc, ndf, n_layers, norm, activation, gpu_ids)
setattr(self, 'scale'+str(i), netD)
self.downsample = nn.AvgPool2d(kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input):
result = []
for i in range(self.num_D):
netD = getattr(self, 'scale'+str(i))
output = netD.forward(input)
result.append(output)
if i != (self.num_D-1):
input = self.downsample(input)
return result
class _Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm='batch', activation='PReLU', gpu_ids=[]):
super(_Discriminator, self).__init__()
self.gpu_ids = gpu_ids
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [
nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1, bias=use_bias),
nonlinearity,
]
nf_mult=1
for i in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**i, 8)
model += [
nn.Conv2d(ndf*nf_mult_prev, ndf*nf_mult, kernel_size=4, stride=2, padding=1, bias=use_bias),
norm_layer(ndf*nf_mult),
nonlinearity,
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
model += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=4, stride=1, padding=1, bias=use_bias),
norm_layer(ndf * 8),
nonlinearity,
nn.Conv2d(ndf*nf_mult, 1, kernel_size=4, stride=1, padding=1)
]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
class _FeatureDiscriminator(nn.Module):
def __init__(self, input_nc, n_layers=2, norm='batch', activation='PReLU', gpu_ids=[]):
super(_FeatureDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [
nn.Linear(input_nc * 40 * 12, input_nc),
nonlinearity,
]
for i in range(1, n_layers):
model +=[
nn.Linear(input_nc, input_nc),
nonlinearity
]
model +=[nn.Linear(input_nc, 1)]
self.model = nn.Sequential(*model)
def forward(self, input):
result = []
input = input.view(-1, 512 * 40 * 12)
output = self.model(input)
result.append(output)
return result | 24,337 | 37.028125 | 140 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/TaskModel.py | import torch
from torch.autograd import Variable
import util.task as task
from .base_model import BaseModel
from . import network
class TNetModel(BaseModel):
def name(self):
return 'TNet Model'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.loss_names = ['lab_s', 'lab_t', 'lab_smooth']
self.visual_names = ['img_s', 'lab_s', 'lab_s_g', 'img_t', 'lab_t', 'lab_t_g']
self.model_names = ['img2task']
# define the task network
self.net_img2task = network.define_G(opt.image_nc, opt.label_nc, opt.ngf, opt.task_layers, opt.norm,
opt.activation, opt.task_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
if self.isTrain:
# define the loss function
self.l1loss = torch.nn.L1Loss()
self.l2loss = torch.nn.MSELoss()
self.optimizer_img2task = torch.optim.Adam(self.net_img2task.parameters(), lr=opt.lr_task, betas=(0.9, 0.999))
self.optimizers = []
self.schedulers = []
self.optimizers.append(self.optimizer_img2task)
for optimizer in self.optimizers:
self.schedulers.append(network.get_scheduler(optimizer, opt))
if not self.isTrain or opt.continue_train:
self.load_networks(opt.which_epoch)
def set_input(self, input):
self.input = input
self.img_source = input['img_source']
self.img_target = input['img_target']
if self.isTrain:
self.lab_source = input['lab_source']
self.lab_target = input['lab_target']
if len(self.gpu_ids) > 0:
self.img_source = self.img_source.cuda(self.gpu_ids[0], async=True)
self.img_target = self.img_target.cuda(self.gpu_ids[0], async=True)
if self.isTrain:
self.lab_source = self.lab_source.cuda(self.gpu_ids[0], async=True)
self.lab_target = self.lab_target.cuda(self.gpu_ids[0], async=True)
def forward(self):
self.img_s = Variable(self.img_source)
self.img_t = Variable(self.img_target)
self.lab_s = Variable(self.lab_source)
self.lab_t = Variable(self.lab_target)
def foreward_G_basic(self, net_G, img_s, img_t):
img = torch.cat([img_s, img_t], 0)
fake = net_G(img)
size = len(fake)
f_s, f_t = fake[0].chunk(2)
img_fake = fake[1:]
img_s_fake = []
img_t_fake = []
for img_fake_i in img_fake:
img_s, img_t = img_fake_i.chunk(2)
img_s_fake.append(img_s)
img_t_fake.append(img_t)
return img_s_fake, img_t_fake, f_s, f_t, size
def backward_task(self):
self.lab_s_g, self.lab_t_g, self.lab_f_s, self.lab_f_t, size = \
self.foreward_G_basic(self.net_img2task, self.img_s, self.img_t)
lab_real = task.scale_pyramid(self.lab_s, size-1)
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_s_g, lab_real):
task_loss += self.l1loss(lab_fake_i, lab_real_i)
self.loss_lab_s = task_loss * self.opt.lambda_rec_lab
img_real = task.scale_pyramid(self.img_t, size - 1)
self.loss_lab_smooth = task.get_smooth_weight(self.lab_t_g, img_real, size - 1) * self.opt.lambda_smooth
total_loss = self.loss_lab_s + self.loss_lab_smooth
total_loss.backward()
def optimize_parameters(self, epoch_iter):
self.forward()
# task network
self.optimizer_img2task.zero_grad()
self.backward_task()
self.optimizer_img2task.step()
def validation_target(self):
lab_real = task.scale_pyramid(self.lab_t, len(self.lab_t_g))
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_t_g, lab_real):
task_loss += self.l1loss(lab_fake_i, lab_real_i)
self.loss_lab_t = task_loss * self.opt.lambda_rec_lab | 4,032 | 33.767241 | 122 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/T2model.py | import torch
from torch.autograd import Variable
import itertools
from util.image_pool import ImagePool
import util.task as task
from .base_model import BaseModel
from . import network
class T2NetModel(BaseModel):
def name(self):
return 'T2Net model'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.loss_names = ['img_rec', 'img_G', 'img_D', 'lab_s', 'lab_t', 'f_G', 'f_D', 'lab_smooth']
self.visual_names = ['img_s', 'img_t', 'lab_s', 'lab_t', 'img_s2t', 'img_t2t', 'lab_s_g', 'lab_t_g']
if self.isTrain:
self.model_names = ['img2task', 's2t', 'img_D', 'f_D']
else:
self.model_names = ['img2task', 's2t']
# define the transform network
self.net_s2t = network.define_G(opt.image_nc, opt.image_nc, opt.ngf, opt.transform_layers, opt.norm,
opt.activation, opt.trans_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
# define the task network
self.net_img2task = network.define_G(opt.image_nc, opt.label_nc, opt.ngf, opt.task_layers, opt.norm,
opt.activation, opt.task_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
# define the discriminator
if self.isTrain:
self.net_img_D = network.define_D(opt.image_nc, opt.ndf, opt.image_D_layers, opt.num_D, opt.norm,
opt.activation, opt.init_type, opt.gpu_ids)
self.net_f_D = network.define_featureD(opt.image_feature, opt.feature_D_layers, opt.norm,
opt.activation, opt.init_type, opt.gpu_ids)
if self.isTrain:
self.fake_img_pool = ImagePool(opt.pool_size)
# define loss functions
self.l1loss = torch.nn.L1Loss()
self.nonlinearity = torch.nn.ReLU()
# initialize optimizers
self.optimizer_T2Net = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, self.net_s2t.parameters())},
{'params': filter(lambda p: p.requires_grad, self.net_img2task.parameters()),
'lr': opt.lr_task, 'betas': (0.95, 0.999)}],
lr=opt.lr_trans, betas=(0.5, 0.9))
self.optimizer_D = torch.optim.Adam(itertools.chain(filter(lambda p: p.requires_grad, self.net_img_D.parameters()),
filter(lambda p: p.requires_grad, self.net_f_D.parameters())),
lr=opt.lr_trans, betas=(0.5, 0.9))
self.optimizers = []
self.schedulers = []
self.optimizers.append(self.optimizer_T2Net)
self.optimizers.append(self.optimizer_D)
for optimizer in self.optimizers:
self.schedulers.append(network.get_scheduler(optimizer, opt))
if not self.isTrain or opt.continue_train:
self.load_networks(opt.which_epoch)
def set_input(self, input):
self.input = input
self.img_source = input['img_source']
self.img_target = input['img_target']
if self.isTrain:
self.lab_source = input['lab_source']
self.lab_target = input['lab_target']
if len(self.gpu_ids) > 0:
self.img_source = self.img_source.cuda(self.gpu_ids[0], async=True)
self.img_target = self.img_target.cuda(self.gpu_ids[0], async=True)
if self.isTrain:
self.lab_source = self.lab_source.cuda(self.gpu_ids[0], async=True)
self.lab_target = self.lab_target.cuda(self.gpu_ids[0], async=True)
def forward(self):
self.img_s = Variable(self.img_source)
self.img_t = Variable(self.img_target)
self.lab_s = Variable(self.lab_source)
self.lab_t = Variable(self.lab_target)
def backward_D_basic(self, netD, real, fake):
D_loss = 0
for (real_i, fake_i) in zip(real, fake):
# Real
D_real = netD(real_i.detach())
# fake
D_fake = netD(fake_i.detach())
for (D_real_i, D_fake_i) in zip(D_real, D_fake):
D_loss += (torch.mean((D_real_i-1.0)**2) + torch.mean((D_fake_i -0.0)**2))*0.5
D_loss.backward()
return D_loss
def backward_D_image(self):
network._freeze(self.net_s2t, self.net_img2task, self.net_f_D)
network._unfreeze(self.net_img_D)
size = len(self.img_s2t)
fake = []
for i in range(size):
fake.append(self.fake_img_pool.query(self.img_s2t[i]))
real = task.scale_pyramid(self.img_t, size)
self.loss_img_D = self.backward_D_basic(self.net_img_D, real, fake)
def backward_D_feature(self):
network._freeze(self.net_s2t, self.net_img2task, self.net_img_D)
network._unfreeze(self.net_f_D)
self.loss_f_D = self.backward_D_basic(self.net_f_D, [self.lab_f_t], [self.lab_f_s])
def foreward_G_basic(self, net_G, img_s, img_t):
img = torch.cat([img_s, img_t], 0)
fake = net_G(img)
size = len(fake)
f_s, f_t = fake[0].chunk(2)
img_fake = fake[1:]
img_s_fake = []
img_t_fake = []
for img_fake_i in img_fake:
img_s, img_t = img_fake_i.chunk(2)
img_s_fake.append(img_s)
img_t_fake.append(img_t)
return img_s_fake, img_t_fake, f_s, f_t, size
def backward_synthesis2real(self):
# image to image transform
network._freeze(self.net_img2task, self.net_img_D, self.net_f_D)
network._unfreeze(self.net_s2t)
self.img_s2t, self.img_t2t, self.img_f_s, self.img_f_t, size = \
self.foreward_G_basic(self.net_s2t, self.img_s, self.img_t)
# image GAN loss and reconstruction loss
img_real = task.scale_pyramid(self.img_t, size - 1)
G_loss = 0
rec_loss = 0
for i in range(size - 1):
rec_loss += self.l1loss(self.img_t2t[i], img_real[i])
D_fake = self.net_img_D(self.img_s2t[i])
for D_fake_i in D_fake:
G_loss += torch.mean((D_fake_i - 1.0) ** 2)
self.loss_img_G = G_loss * self.opt.lambda_gan_img
self.loss_img_rec = rec_loss * self.opt.lambda_rec_img
total_loss = self.loss_img_G + self.loss_img_rec
total_loss.backward(retain_graph=True)
def backward_translated2depth(self):
# task network
network._freeze(self.net_img_D, self.net_f_D)
network._unfreeze(self.net_s2t, self.net_img2task)
fake = self.net_img2task.forward(self.img_s2t[-1])
size=len(fake)
self.lab_f_s = fake[0]
self.lab_s_g = fake[1:]
#feature GAN loss
D_fake = self.net_f_D(self.lab_f_s)
G_loss = 0
for D_fake_i in D_fake:
G_loss += torch.mean((D_fake_i - 1.0) ** 2)
self.loss_f_G = G_loss * self.opt.lambda_gan_feature
# task loss
lab_real = task.scale_pyramid(self.lab_s, size-1)
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_s_g, lab_real):
task_loss += self.l1loss(lab_fake_i, lab_real_i)
self.loss_lab_s = task_loss * self.opt.lambda_rec_lab
total_loss = self.loss_f_G + self.loss_lab_s
total_loss.backward()
def backward_real2depth(self):
# image2depth
network._freeze(self.net_s2t, self.net_img_D, self.net_f_D)
network._unfreeze(self.net_img2task)
fake = self.net_img2task.forward(self.img_t)
size = len(fake)
# Gan depth
self.lab_f_t = fake[0]
self.lab_t_g = fake[1:]
img_real = task.scale_pyramid(self.img_t, size - 1)
self.loss_lab_smooth = task.get_smooth_weight(self.lab_t_g, img_real, size-1) * self.opt.lambda_smooth
total_loss = self.loss_lab_smooth
total_loss.backward()
def optimize_parameters(self, epoch_iter):
self.forward()
# T2Net
self.optimizer_T2Net.zero_grad()
self.backward_synthesis2real()
self.backward_translated2depth()
self.backward_real2depth()
self.optimizer_T2Net.step()
# Discriminator
self.optimizer_D.zero_grad()
self.backward_D_feature()
self.backward_D_image()
if epoch_iter % 5 == 0:
self.optimizer_D.step()
for p in self.net_f_D.parameters():
p.data.clamp_(-0.01,0.01)
def validation_target(self):
lab_real = task.scale_pyramid(self.lab_t, len(self.lab_t_g))
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_t_g, lab_real):
task_loss += task.rec_loss(lab_fake_i, lab_real_i)
self.loss_lab_t = task_loss * self.opt.lambda_rec_lab
| 9,119 | 37.808511 | 130 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/test_model.py | import torch
from torch.autograd import Variable
from .base_model import BaseModel
from . import network
from util import util
from collections import OrderedDict
class TestModel(BaseModel):
def name(self):
return 'TestModel'
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
self.loss_names = []
self.visual_names =['img_s', 'img_t', 'img_s2t', 'lab_t_g']
self.model_names = ['img2task', 's2t']
#self.model_names = ['img2task']
# define the transform network
self.net_s2t = network.define_G(opt.image_nc, opt.image_nc, opt.ngf, opt.transform_layers, opt.norm,
opt.activation, opt.trans_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
# define the task network
self.net_img2task = network.define_G(opt.image_nc, opt.label_nc, opt.ngf, opt.task_layers, opt.norm,
opt.activation, opt.task_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
self.load_networks(opt.which_epoch)
def set_input(self, input):
self.input = input
self.img_source = input['img_source']
self.img_target = input['img_target']
if len(self.gpu_ids) > 0:
self.img_source = self.img_source.cuda()
self.img_target = self.img_target.cuda()
self.image_paths = input['img_target_paths']
def test(self):
self.img_s = Variable(self.img_source)
self.img_t = Variable(self.img_target)
with torch.no_grad():
self.img_s2t = self.net_s2t.forward(self.img_s)
self.lab_t_g = self.net_img2task.forward(self.img_t)
# save_results
def save_results(self, visualizer, wed_page):
img_source_paths = self.input['img_source_paths']
img_target_paths = self.input['img_target_paths']
for i in range(self.img_s.size(0)):
img_source = util.tensor2im(self.img_s.data[i])
img_target = util.tensor2im(self.img_t.data[i])
img_source2target = util.tensor2im(self.img_s2t[-1].data[i])
lab_fake_target = util.tensor2im(self.lab_t_g[-1].data[i])
visuals = OrderedDict([('img_s', img_source), ('img_s2t', img_source2target)])
print('process image ......%s' % img_source_paths[0])
visualizer.save_images(wed_page, visuals, img_source_paths)
img_source_paths.pop(0)
visuals = OrderedDict([('img_t', img_target), ('lab_t_g', lab_fake_target)])
print('process image ......%s' % img_target_paths[0])
visualizer.save_images(wed_page, visuals, img_target_paths)
img_target_paths.pop(0) | 2,883 | 39.619718 | 111 | py |
Synthetic2Realistic | Synthetic2Realistic-master/dataloader/data_loader.py | import random
from PIL import Image
import torchvision.transforms as transforms
import torch.utils.data as data
from .image_folder import make_dataset
import torchvision.transforms.functional as F
class CreateDataset(data.Dataset):
def initialize(self, opt):
self.opt = opt
self.img_source_paths, self.img_source_size = make_dataset(opt.img_source_file)
self.img_target_paths, self.img_target_size = make_dataset(opt.img_target_file)
if self.opt.isTrain:
self.lab_source_paths, self.lab_source_size = make_dataset(opt.lab_source_file)
# for visual results, not for training
self.lab_target_paths, self.lab_target_size = make_dataset(opt.lab_target_file)
self.transform_augment = get_transform(opt, True)
self.transform_no_augment = get_transform(opt, False)
def __getitem__(self, item):
index = random.randint(0, self.img_target_size - 1)
img_source_path = self.img_source_paths[item % self.img_source_size]
if self.opt.dataset_mode == 'paired':
img_target_path = self.img_target_paths[item % self.img_target_size]
elif self.opt.dataset_mode == 'unpaired':
img_target_path = self.img_target_paths[index]
else:
raise ValueError('Data mode [%s] is not recognized' % self.opt.dataset_mode)
img_source = Image.open(img_source_path).convert('RGB')
img_target = Image.open(img_target_path).convert('RGB')
img_source = img_source.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
img_target = img_target.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
if self.opt.isTrain:
lab_source_path = self.lab_source_paths[item % self.lab_source_size]
if self.opt.dataset_mode == 'paired':
lab_target_path = self.lab_target_paths[item % self.img_target_size]
elif self.opt.dataset_mode == 'unpaired':
lab_target_path = self.lab_target_paths[index]
else:
raise ValueError('Data mode [%s] is not recognized' % self.opt.dataset_mode)
lab_source = Image.open(lab_source_path)#.convert('RGB')
lab_target = Image.open(lab_target_path)#.convert('RGB')
lab_source = lab_source.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
lab_target = lab_target.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
img_source, lab_source, scale = paired_transform(self.opt, img_source, lab_source)
img_source = self.transform_augment(img_source)
lab_source = self.transform_no_augment(lab_source)
img_target, lab_target, scale = paired_transform(self.opt, img_target, lab_target)
img_target = self.transform_no_augment(img_target)
lab_target = self.transform_no_augment(lab_target)
return {'img_source': img_source, 'img_target': img_target,
'lab_source': lab_source, 'lab_target': lab_target,
'img_source_paths': img_source_path, 'img_target_paths': img_target_path,
'lab_source_paths': lab_source_path, 'lab_target_paths': lab_target_path
}
else:
img_source = self.transform_augment(img_source)
img_target = self.transform_no_augment(img_target)
return {'img_source': img_source, 'img_target': img_target,
'img_source_paths': img_source_path, 'img_target_paths': img_target_path,
}
def __len__(self):
return max(self.img_source_size, self.img_target_size)
def name(self):
return 'T^2Dataset'
def dataloader(opt):
datasets = CreateDataset()
datasets.initialize(opt)
dataset = data.DataLoader(datasets, batch_size=opt.batchSize, shuffle=opt.shuffle, num_workers=int(opt.nThreads))
return dataset
def paired_transform(opt, image, depth):
scale_rate = 1.0
if opt.flip:
n_flip = random.random()
if n_flip > 0.5:
image = F.hflip(image)
depth = F.hflip(depth)
if opt.rotation:
n_rotation = random.random()
if n_rotation > 0.5:
degree = random.randrange(-500, 500)/100
image = F.rotate(image, degree, Image.BICUBIC)
depth = F.rotate(depth, degree, Image.BILINEAR)
return image, depth, scale_rate
def get_transform(opt, augment):
transforms_list = []
if augment:
if opt.isTrain:
transforms_list.append(transforms.ColorJitter(brightness=0.0, contrast=0.0, saturation=0.0, hue=0.0))
transforms_list += [
transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
return transforms.Compose(transforms_list)
| 4,873 | 41.017241 | 117 | py |
TEC-reduced-model | TEC-reduced-model-main/setup.py | from setuptools import setup, find_packages
install_requires = [
"pybamm == 0.4.0",
"matplotlib",
"prettytable",
"jax",
"jaxlib",
"SciencePlots",
]
setup(
name="tec_reduced_model",
version="0.2",
author="Ferran Brosa Planella",
author_email="[email protected]",
packages=find_packages(),
license="LICENSE",
description='Code and data for the paper "Systematic derivation and validation of'
" a reduced thermal-electrochemical model for lithium-ion batteries using"
' asymptotic methods" by Ferran Brosa Planella, Muhammad Sheikh and W. Dhammika'
" Widanage (2020).",
install_requires=install_requires,
)
| 692 | 26.72 | 86 | py |
FishFSRNet | FishFSRNet-main/parsing/test_parsingnet.py | from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import dataset_parsingnet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import util
import torchvision
import parsingnet
net = parsingnet.ParsingNet()
net = util.prepare(net)
print(util.get_parameter_number(net))
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
testdata = dataset_parsingnet.Data(root=os.path.join(args.dir_data, args.data_test), args=args, train=False)
testset = DataLoader(testdata, batch_size=1, shuffle=False, num_workers=1)
pretrained_dict = torch.load('./epoch.pth', map_location='cuda:0')
net.load_state_dict(pretrained_dict)
net = util.prepare(net)
net.eval()
val_psnr = 0
val_ssim = 0
with torch.no_grad():
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result-test'), exist_ok=True)
net.eval()
timer_test = util.timer()
for batch, (lr, _, filename) in enumerate(testset):
lr = util.prepare(lr)
p = net(lr)
torchvision.utils.save_image(p[0],
os.path.join(args.save_path, args.writer_name, 'result-test',
'{}'.format(str(filename[0])[:-4] + ".png")))
print("Tesing over.")
| 1,315 | 36.6 | 108 | py |
FishFSRNet | FishFSRNet-main/parsing/parsingnet.py | import common
import torch.nn as nn
class ParsingNet(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(ParsingNet, self).__init__()
n_resblocks = 8
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
self.args = args
m_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
m_feature = [
conv(n_feats, 3, kernel_size)
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.feature = nn.Sequential(*m_feature)
def forward(self, x):
x = self.head(x)
res = self.body(x)
feature = self.feature(res)
return feature
| 906 | 24.914286 | 77 | py |
FishFSRNet | FishFSRNet-main/parsing/cbam.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=None):
super(ChannelGate, self).__init__()
if pool_types is None:
pool_types = ['avg', 'max']
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, int(gate_channels // reduction_ratio)),
nn.ReLU(),
nn.Linear(int(gate_channels // reduction_ratio), gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif pool_type == 'lp':
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif pool_type == 'var':
var_pool = variance_pool(x)
channel_att_raw = self.mlp(var_pool)
elif pool_type == 'lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class CSAR_SpatialGate(nn.Module):
def __init__(self, n_feats, gama=2):
super(CSAR_SpatialGate, self).__init__()
self.spatial_layer1 = nn.Conv2d(in_channels=n_feats, out_channels=gama * n_feats, kernel_size=1, stride=1)
self.spatial_layer2 = nn.ReLU()
self.spatial_layer3 = nn.Conv2d(in_channels=gama * n_feats, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x):
x_compress = self.spatial_layer1(x)
x_out = self.spatial_layer2(x_compress)
x_out = self.spatial_layer3(x_out)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
def variance_pool(x):
my_mean = x.mean(dim=3, keepdim=True).mean(dim=2, keepdim=True)
return (x - my_mean).pow(2).mean(dim=3, keepdim=False).mean(dim=2, keepdim=False).view(x.size()[0], x.size()[1], 1,
1)
| 3,309 | 34.978261 | 119 | py |
FishFSRNet | FishFSRNet-main/parsing/common.py | import torch.nn as nn
import torch
import cbam
import math
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True,
bn=False, act=nn.PReLU()):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
def get_parameters(model, bias):
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if bias:
yield m.bias
else:
yield m.weight
elif isinstance(m, nn.BatchNorm2d):
if bias:
yield m.bias
else:
yield m.weight
def default_conv(in_channels, out_channels, kernel_size, stride=1, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=stride, padding=(kernel_size // 2), bias=bias)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class invPixelShuffle(nn.Module):
def __init__(self, ratio=2):
super(invPixelShuffle, self).__init__()
self.ratio = ratio
def forward(self, tensor):
ratio = self.ratio
b = tensor.size(0)
ch = tensor.size(1)
y = tensor.size(2)
x = tensor.size(3)
assert x % ratio == 0 and y % ratio == 0, 'x, y, ratio : {}, {}, {}'.format(x, y, ratio)
return tensor.view(b, ch, y // ratio, ratio, x // ratio, ratio).permute(0, 1, 3, 5, 2, 4).contiguous().view(b,
-1,
y // ratio,
x // ratio)
class invUpsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(invPixelShuffle(2))
m.append(conv(n_feat * 4, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(invPixelShuffle(3))
m.append(conv(n_feat * 9, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(invUpsampler, self).__init__(*m)
class Refine(nn.Module):
def __init__(self, n_feats, conv=default_conv):
super(Refine, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
self.conv = nn.Sequential(*[ResBlock(conv, n_feats, kernel_size, act=act),
ResBlock(conv, n_feats, kernel_size, act=act)])
def forward(self, first, second):
resdual = second - first
res = self.conv(resdual)
res = res + second
return res
class Multi_scale_fusion_block(nn.Module):
def __init__(self, n_feats, scale):
super(Multi_scale_fusion_block, self).__init__()
self.scale = scale
if scale == 2:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.down2 = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)])
elif scale == 4:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
elif scale == 8:
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
self.up2 = nn.UpsamplingNearest2d(scale_factor=4)
self.refine2 = Refine(n_feats)
self.refine4 = Refine(n_feats)
self.refine8 = Refine(n_feats)
# self.attention = CA(conv=default_conv, n_feats=n_feats, kernel_size=1)
self.attention = cbam.ChannelGate(n_feats, reduction_ratio=4, pool_types=['avg', 'max', 'var'])
self.conv = nn.Conv2d(in_channels=n_feats * 3, out_channels=n_feats, kernel_size=1)
def forward(self, scale2, scale4, scale8, now):
if self.scale == 2:
scale4 = self.down1(scale4)
scale8 = self.down2(scale8)
elif self.scale == 4:
scale8 = self.down1(scale8)
scale2 = self.up1(scale2)
elif self.scale == 8:
scale4 = self.up1(scale4)
scale2 = self.up2(scale2)
feature1 = self.refine2(scale2, now)
feature2 = self.refine4(scale4, now)
feature3 = self.refine8(scale8, now)
fea = torch.cat((feature1, feature2, feature3), 1)
fea = self.conv(fea)
fea = self.attention(fea)
fea = fea + now
return fea
class PCSR1(nn.Module):
def __init__(self, conv, n_feats, kernel_size, bias=True, act=nn.ReLU(True), res_scale=1, gama=2, lamb=4):
super(PCSR1, self).__init__()
# First branch
m = []
for i in range(2):
if i == 0:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
m.append(act)
if i == 1:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
self.body = nn.Sequential(*m)
self.attention_layer1 = cbam.CSAR_SpatialGate(n_feats, gama=gama)
self.attention_layer2 = cbam.ChannelGate(n_feats, reduction_ratio=lamb, pool_types=['avg', 'max', 'var'])
self.conv = conv(2 * n_feats, n_feats, 1, bias=bias)
self.res_scale = res_scale
# Second branch
self.conv_feature = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_parsing = nn.Sequential(
*[nn.Conv2d(in_channels=3, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=3, stride=1,
padding=1)
self.attention_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x, p):
# First branch
res = self.body(x)
res1 = self.attention_layer1(res)
res2 = self.attention_layer2(res)
res = torch.cat((res1, res2), 1)
res = self.conv(res)
# Second branch
fea = self.conv_feature(x)
par = self.conv_parsing(p)
fea = torch.cat((fea, par), 1)
fea = self.conv_fusion(fea)
fea_fusion = torch.cat((fea, res), 1)
res = self.attention_fusion(fea_fusion)
res += x
return res
| 8,996 | 34.007782 | 127 | py |
FishFSRNet | FishFSRNet-main/parsing/util.py |
import torch
import numpy as np
import math
import cv2
def prepare(arg):
if torch.cuda.is_available():
# print(1)
arg = arg.cuda()
return arg
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def calc_metrics(img1, img2, crop_border=8, test_Y=True):
#
# print(img1.shape, img1.shape[2])
img1 = np.transpose(img1, (1, 2, 0))
img2 = np.transpose(img2, (1, 2, 0))
img1 = np.array(img1)
img2 = np.array(img2)
# print(img1.shape, img1.shape[2])
if test_Y and img1.shape[2] == 3: # evaluate on Y channel in YCbCr color space
im1_in = rgb2ycbcr(img1)
im2_in = rgb2ycbcr(img2)
else:
im1_in = img1
im2_in = img2
# print("img1_in.ndim: ", im1_in.ndim)
if im1_in.ndim == 3:
# cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border, :]
# cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_im1 = im1_in[:, crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[:, crop_border:-crop_border, crop_border:-crop_border]
elif im1_in.ndim == 2:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border]
else:
raise ValueError('Wrong image dimension: {}. Should be 2 or 3.'.format(im1_in.ndim))
# print("cropped: ", cropped_im1.shape, cropped_im2.shape)
psnr = calc_psnr(cropped_im1 * 255, cropped_im2 * 255)
ssim = calc_ssim(cropped_im1 * 255, cropped_im2 * 255)
# print(type(ssim))
return psnr, ssim
def calc_psnr(img1, img2):
# img1 and img2 have range [0, 255]
#
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
mse = np.mean((img1_np - img2_np)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1_np, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2_np, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1_np**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2_np**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1_np * img2_np, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calc_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
# print("img2: ", img2.shape)
# img1 = np.transpose(img1, (1, 2, 0))
# img2 = np.transpose(img2, (1, 2, 0))
# print("img2_np_trans", img2.shape)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
# print(img1.shape)
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
# print(img1.shape[2])
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
| 4,380 | 28.601351 | 92 | py |
FishFSRNet | FishFSRNet-main/parsing/main_parsingnet.py | import torch
import torch.optim as optim
from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch.nn as nn
import dataset_parsingnet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import util
import torchvision
from parsingnet import ParsingNet
net = ParsingNet()
net = util.prepare(net)
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
traindata = dataset_parsingnet.Data(root=os.path.join(args.dir_data, args.data_train), args=args, train=True)
trainset = DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=16)
valdata = dataset_parsingnet.Data(root=os.path.join(args.dir_data, args.data_val), args=args, train=False)
valset = DataLoader(valdata, batch_size=1, shuffle=False, num_workers=1)
criterion1 = nn.L1Loss()
optimizer = optim.Adam(params=net.parameters(), lr=args.lr, betas=(0.9, 0.99), eps=1e-8)
for i in range(args.epochs):
net.train()
train_loss = 0
bum = len(trainset)
for batch, (lr, hr, _) in enumerate(trainset):
lr, hr = util.prepare(lr), util.prepare(hr)
sr = net(lr)
loss = criterion1(sr, hr)
train_loss = train_loss + loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Epoch:{} loss: {:.3f}".format(i + 1, train_loss / (len(trainset)) * 255))
writer.add_scalar('train_loss', train_loss / (len(trainset)) * 255, i + 1)
os.makedirs(os.path.join(args.save_path, args.writer_name), exist_ok=True)
os.makedirs(os.path.join(args.save_path, args.writer_name, 'model'), exist_ok=True)
torch.save(net.state_dict(),
os.path.join(args.save_path, args.writer_name, 'model', 'epoch{}.pth'.format(i + 1)))
net.eval()
val_psnr_my = 0
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result'), exist_ok=True)
for batch, (lr, hr, filename) in enumerate(valset):
lr, hr = util.prepare(lr), util.prepare(hr)
sr = net(lr)
val_psnr_my = val_psnr_my + util.cal_psnr(hr[0].data.cpu(), sr[0].data.cpu())
print("Epoch:{} val psnr: {:.3f}".format(i + 1, val_psnr_my / (len(valset))))
writer.add_scalar("val_psnr_my", val_psnr_my / len(valset), i + 1)
| 2,264 | 40.181818 | 109 | py |
FishFSRNet | FishFSRNet-main/parsing/dataset_parsingnet.py | from torch.utils import data
import os
from PIL import Image
from torchvision import transforms
from torchvision.transforms import ToTensor
import numpy
import glob
class Data(data.Dataset):
def __init__(self, root, args, train=False):
# 返回指定路径下的文件和文件夹列表。
self.args = args
if args.scale == 4:
self.imgs_LR_path = os.path.join(root, 'LR_x4')
self.imgs_parsing_path = os.path.join(root, 'global_2_LR_x4')
elif args.scale == 8:
self.imgs_LR_path = os.path.join(root, 'LR')
self.imgs_parsing_path = os.path.join(root, 'global_2_LR')
elif args.scale == 16:
self.imgs_LR_path = os.path.join(root, 'LR_x16')
self.imgs_parsing_path = os.path.join(root, 'global_2_LR_x16')
self.imgs_LR = sorted(
glob.glob(os.path.join(self.imgs_LR_path, '*.png'))
)
self.imgs_parsing = sorted(
glob.glob(os.path.join(self.imgs_parsing_path, '*.png'))
)
self.transform = transforms.ToTensor()
self.train = train
def __getitem__(self, item):
img_path_LR = os.path.join(self.imgs_LR_path, self.imgs_LR[item])
img_path_parsing = os.path.join(self.imgs_parsing_path, self.imgs_parsing[item])
LR = Image.open(img_path_LR)
parsing = Image.open(img_path_parsing)
LR = numpy.array(LR)
parsing = numpy.array(parsing)
LR = ToTensor()(LR)
parsing = ToTensor()(parsing)
filename = os.path.basename(img_path_LR)
return LR, parsing, filename
def __len__(self):
return len(self.imgs_LR)
| 1,638 | 31.78 | 88 | py |
FishFSRNet | FishFSRNet-main/fsr/fishfsrnet.py | import common
import torch.nn.functional as F
import torch.nn as nn
import torch
def fish_block(args, conv=common.default_conv, n_feats=64, PCSR1=False):
kernel_size = 3
res = []
act = nn.ReLU(True)
if PCSR1:
res.append(common.PCSR1(
conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
))
res.append(common.PCSR1(
conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
))
else:
res.append(common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale))
res.append(common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale))
return res
class FISHNET(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(FISHNET, self).__init__()
n_resblocks = 8
n_feats = 64
kernel_size = 3
scale = 8
act = nn.ReLU(True)
self.args = args
# define head module
m_head = [conv(args.n_colors, n_feats, kernel_size)]
if args.refine2:
self.refine2 = nn.Sequential(*[common.Multi_scale_fusion_block(n_feats, scale=8),
common.Multi_scale_fusion_block(n_feats, scale=4),
common.Multi_scale_fusion_block(n_feats, scale=2),
common.Multi_scale_fusion_block(n_feats, scale=2),
common.Multi_scale_fusion_block(n_feats, scale=4),
common.Multi_scale_fusion_block(n_feats, scale=8),
])
# define body module
self.up1 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.up_stage1 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up2 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.up_stage2 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up3 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.up_stage3 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.down1 = nn.Sequential(*common.invUpsampler(conv, 2, n_feats, act=False))
self.down_stage1 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.down2 = nn.Sequential(*common.invUpsampler(conv, 2, n_feats, act=False))
self.down_stage2 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.down3 = nn.Sequential(*common.invUpsampler(conv, 2, n_feats, act=False))
self.down_stage3 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.conv_tail1 = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
self.conv = conv(n_feats, n_feats, 3)
self.up21 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.conv_tail2 = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
self.up2_stage1 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up22 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False)) # n_feats*3
self.up2_stage2 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up23 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.conv_tail3 = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
self.up2_stage3 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
# define tail module
m_tail = [
conv(n_feats, args.n_colors, kernel_size)
]
self.reduc = common.channelReduction()
self.head = nn.Sequential(*m_head)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, parsing=None):
intp = x
# print(parsing.shape)
if parsing is not None:
p2 = F.interpolate(parsing, scale_factor=2, mode='nearest')
p4 = F.interpolate(parsing, scale_factor=4, mode='nearest')
p8 = F.interpolate(parsing, scale_factor=8, mode='nearest')
# for i in range(len(parsing_list)):
# print(i, parsing_list[i].shape)
x = self.head(intp)
# print(x.shape)
x1 = self.up1(x)
if self.args.PCSR1:
x = self.up_stage1[0](x1, p2)
x = self.up_stage1[1](x, p2)
else:
x = self.up_stage1(x1)
x2 = self.up2(x)
if self.args.PCSR1:
x = self.up_stage2[0](x2, p4)
x = self.up_stage2[1](x, p4)
else:
x = self.up_stage2(x2)
x3 = self.up3(x)
if self.args.PCSR1:
res1 = self.up_stage3[0](x3, p8)
res1 = self.up_stage3[1](res1, p8)
else:
res1 = self.up_stage3(x3)
# if self.args.shift_mean:
# res1 = self.add_mean(res1)
if self.args.refine2:
inp = self.refine2[0](x1, x2, x3, res1)
else:
inp = torch.cat((x3, res1), 1)
inp = self.reduc(inp)
x4 = self.down1(inp)
if self.args.PCSR1:
x = self.down_stage1[0](x4, p4)
x = self.down_stage1[1](x, p4)
else:
x = self.down_stage1(x4)
if self.args.refine2:
inp1 = self.refine2[1](x1, x2, x3, x)
else:
inp1 = torch.cat((x, x2), 1)
inp1 = self.reduc(inp1)
x5 = self.down2(inp1)
if self.args.PCSR1:
x = self.down_stage2[0](x5, p2)
x = self.down_stage2[1](x, p2)
else:
x = self.down_stage2(x5)
if self.args.refine2:
inp2 = self.refine2[2](x1, x2, x3, x)
else:
inp2 = torch.cat((x, x1), 1)
inp2 = self.reduc(inp2)
x6 = self.down3(inp2)
if self.args.PCSR1:
x = self.down_stage3[0](x6, parsing)
x = self.down_stage3[1](x, parsing)
else:
x = self.down_stage3(x6)
if self.args.refine2:
inp3 = self.refine2[3](x6, x5, x4, x)
else:
inp3 = torch.cat((x, x6), 1)
inp3 = self.conv_tail1(inp3)
inp3 = self.conv(inp3)
x = self.up21(inp3)
if self.args.PCSR1:
x = self.up2_stage1[0](x, p2)
x = self.up2_stage1[1](x, p2)
else:
x = self.up2_stage1(x)
if self.args.refine2:
inp4 = self.refine2[4](x6, x5, x4, x)
else:
inp4 = torch.cat((x, x5), 1)
inp4 = self.conv_tail2(inp4)
x = self.up22(inp4)
if self.args.PCSR1:
x = self.up2_stage2[0](x, p4)
x = self.up2_stage2[1](x, p4)
else:
x = self.up2_stage2(x)
if self.args.refine2:
inp5 = self.refine2[5](x6, x5, x4, x)
else:
inp5 = torch.cat((x, x4), 1)
inp5 = self.conv_tail3(inp5)
x = self.up23(inp5)
if self.args.PCSR1:
res = self.up2_stage3[0](x, p8)
res = self.up2_stage3[0](res, p8)
else:
res = self.up2_stage3(x)
x = self.tail(res)
return x
| 7,540 | 32.665179 | 107 | py |
FishFSRNet | FishFSRNet-main/fsr/test.py | from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import dataset_parsing
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import util
import torchvision
from fishfsrnet import FISHNET
net = FISHNET(args)
net = util.prepare(net)
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
testdata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_test), args=args, train=False)
testset = DataLoader(testdata, batch_size=1, shuffle=False, num_workers=1)
pretrained_dict = torch.load('/epoch.pth', map_location='cuda:0')
net.load_state_dict(pretrained_dict)
net = util.prepare(net)
net.eval()
val_psnr = 0
val_ssim = 0
with torch.no_grad():
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result-test'), exist_ok=True)
net.eval()
for batch, (lr, hr, parsing, filename) in enumerate(testset):
lr, hr, parsing = util.prepare(lr), util.prepare(hr), util.prepare(parsing)
sr = net(lr, parsing)
psnr1, _ = util.calc_metrics(hr[0].data.cpu(), sr[0].data.cpu(), crop_border=8)
val_psnr = val_psnr + psnr1
torchvision.utils.save_image(sr[0],
os.path.join(args.save_path, args.writer_name, 'result-test',
'{}'.format(str(filename[0])[:-4] + ".png")))
print("Test psnr: {:.3f}".format(val_psnr / (len(testset))))
print(val_ssim / (len(testset)))
| 1,520 | 39.026316 | 105 | py |
FishFSRNet | FishFSRNet-main/fsr/cbam.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=None):
super(ChannelGate, self).__init__()
if pool_types is None:
pool_types = ['avg', 'max']
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, int(gate_channels // reduction_ratio)),
nn.ReLU(),
nn.Linear(int(gate_channels // reduction_ratio), gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif pool_type == 'lp':
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif pool_type == 'var':
var_pool = variance_pool(x)
channel_att_raw = self.mlp(var_pool)
elif pool_type == 'lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class CSAR_SpatialGate(nn.Module):
def __init__(self, n_feats, gama=2):
super(CSAR_SpatialGate, self).__init__()
self.spatial_layer1 = nn.Conv2d(in_channels=n_feats, out_channels=gama * n_feats, kernel_size=1, stride=1)
self.spatial_layer2 = nn.ReLU()
self.spatial_layer3 = nn.Conv2d(in_channels=gama * n_feats, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x):
x_compress = self.spatial_layer1(x)
x_out = self.spatial_layer2(x_compress)
x_out = self.spatial_layer3(x_out)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
def variance_pool(x):
my_mean = x.mean(dim=3, keepdim=True).mean(dim=2, keepdim=True)
return (x - my_mean).pow(2).mean(dim=3, keepdim=False).mean(dim=2, keepdim=False).view(x.size()[0], x.size()[1], 1,
1)
| 3,309 | 34.978261 | 119 | py |
FishFSRNet | FishFSRNet-main/fsr/common.py | import torch.nn as nn
import torch
import cbam
import math
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True,
bn=False, act=nn.PReLU()):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
def get_parameters(model, bias):
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if bias:
yield m.bias
else:
yield m.weight
elif isinstance(m, nn.BatchNorm2d):
if bias:
yield m.bias
else:
yield m.weight
def default_conv(in_channels, out_channels, kernel_size, stride=1, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=stride, padding=(kernel_size // 2), bias=bias)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class invPixelShuffle(nn.Module):
def __init__(self, ratio=2):
super(invPixelShuffle, self).__init__()
self.ratio = ratio
def forward(self, tensor):
ratio = self.ratio
b = tensor.size(0)
ch = tensor.size(1)
y = tensor.size(2)
x = tensor.size(3)
assert x % ratio == 0 and y % ratio == 0, 'x, y, ratio : {}, {}, {}'.format(x, y, ratio)
return tensor.view(b, ch, y // ratio, ratio, x // ratio, ratio).permute(0, 1, 3, 5, 2, 4).contiguous().view(b,
-1,
y // ratio,
x // ratio)
class invUpsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(invPixelShuffle(2))
m.append(conv(n_feat * 4, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(invPixelShuffle(3))
m.append(conv(n_feat * 9, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(invUpsampler, self).__init__(*m)
class Refine(nn.Module):
def __init__(self, n_feats, conv=default_conv):
super(Refine, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
self.conv = nn.Sequential(*[ResBlock(conv, n_feats, kernel_size, act=act),
ResBlock(conv, n_feats, kernel_size, act=act)])
def forward(self, first, second):
resdual = second - first
res = self.conv(resdual)
res = res + second
return res
class Multi_scale_fusion_block(nn.Module):
def __init__(self, n_feats, scale):
super(Multi_scale_fusion_block, self).__init__()
self.scale = scale
if scale == 2:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.down2 = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)])
elif scale == 4:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
elif scale == 8:
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
self.up2 = nn.UpsamplingNearest2d(scale_factor=4)
self.refine2 = Refine(n_feats)
self.refine4 = Refine(n_feats)
self.refine8 = Refine(n_feats)
# self.attention = CA(conv=default_conv, n_feats=n_feats, kernel_size=1)
self.attention = cbam.ChannelGate(n_feats, reduction_ratio=4, pool_types=['avg', 'max', 'var'])
self.conv = nn.Conv2d(in_channels=n_feats * 3, out_channels=n_feats, kernel_size=1)
def forward(self, scale2, scale4, scale8, now):
if self.scale == 2:
scale4 = self.down1(scale4)
scale8 = self.down2(scale8)
elif self.scale == 4:
scale8 = self.down1(scale8)
scale2 = self.up1(scale2)
elif self.scale == 8:
scale4 = self.up1(scale4)
scale2 = self.up2(scale2)
feature1 = self.refine2(scale2, now)
feature2 = self.refine4(scale4, now)
feature3 = self.refine8(scale8, now)
fea = torch.cat((feature1, feature2, feature3), 1)
fea = self.conv(fea)
fea = self.attention(fea)
fea = fea + now
return fea
class PCSR1(nn.Module):
def __init__(self, conv, n_feats, kernel_size, bias=True, act=nn.ReLU(True), res_scale=1, gama=2, lamb=4):
super(PCSR1, self).__init__()
# First branch
m = []
for i in range(2):
if i == 0:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
m.append(act)
if i == 1:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
self.body = nn.Sequential(*m)
self.attention_layer1 = cbam.CSAR_SpatialGate(n_feats, gama=gama)
self.attention_layer2 = cbam.ChannelGate(n_feats, reduction_ratio=lamb, pool_types=['avg', 'max', 'var'])
self.conv = conv(2 * n_feats, n_feats, 1, bias=bias)
self.res_scale = res_scale
# Second branch
self.conv_feature = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_parsing = nn.Sequential(
*[nn.Conv2d(in_channels=3, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=3, stride=1,
padding=1)
self.attention_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x, p):
# First branch
res = self.body(x)
res1 = self.attention_layer1(res)
res2 = self.attention_layer2(res)
res = torch.cat((res1, res2), 1)
res = self.conv(res)
# Second branch
fea = self.conv_feature(x)
par = self.conv_parsing(p)
fea = torch.cat((fea, par), 1)
fea = self.conv_fusion(fea)
fea_fusion = torch.cat((fea, res), 1)
res = self.attention_fusion(fea_fusion)
res += x
return res
| 8,996 | 34.007782 | 127 | py |
FishFSRNet | FishFSRNet-main/fsr/util.py |
import torch
import numpy as np
import math
import cv2
def prepare(arg):
if torch.cuda.is_available():
# print(1)
arg = arg.cuda()
return arg
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def calc_metrics(img1, img2, crop_border=8, test_Y=True):
#
# print(img1.shape, img1.shape[2])
img1 = np.transpose(img1, (1, 2, 0))
img2 = np.transpose(img2, (1, 2, 0))
img1 = np.array(img1)
img2 = np.array(img2)
# print(img1.shape, img1.shape[2])
if test_Y and img1.shape[2] == 3: # evaluate on Y channel in YCbCr color space
im1_in = rgb2ycbcr(img1)
im2_in = rgb2ycbcr(img2)
else:
im1_in = img1
im2_in = img2
# print("img1_in.ndim: ", im1_in.ndim)
if im1_in.ndim == 3:
# cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border, :]
# cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_im1 = im1_in[:, crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[:, crop_border:-crop_border, crop_border:-crop_border]
elif im1_in.ndim == 2:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border]
else:
raise ValueError('Wrong image dimension: {}. Should be 2 or 3.'.format(im1_in.ndim))
# print("cropped: ", cropped_im1.shape, cropped_im2.shape)
psnr = calc_psnr(cropped_im1 * 255, cropped_im2 * 255)
ssim = calc_ssim(cropped_im1 * 255, cropped_im2 * 255)
# print(type(ssim))
return psnr, ssim
def calc_psnr(img1, img2):
# img1 and img2 have range [0, 255]
#
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
mse = np.mean((img1_np - img2_np)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1_np, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2_np, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1_np**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2_np**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1_np * img2_np, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calc_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
# print("img2: ", img2.shape)
# img1 = np.transpose(img1, (1, 2, 0))
# img2 = np.transpose(img2, (1, 2, 0))
# print("img2_np_trans", img2.shape)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
# print(img1.shape)
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
# print(img1.shape[2])
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
| 4,380 | 28.601351 | 92 | py |
FishFSRNet | FishFSRNet-main/fsr/dataset_parsing.py | from torch.utils import data
import os
from PIL import Image
from torchvision.transforms import ToTensor
import numpy
import glob
import random
import numpy as np
def augment(lr, hr, p, hflip=True, rot=True):
# def _augment(img):
# if hflip: img = img[:, ::-1, :]
# if vflip: img = img[::-1, :, :]
# if rot90: img = img.transpose(1, 0, 2)
# return img
if random.random() > 0.5 and hflip:
lr = lr[:, ::-1, :]
hr = hr[:, ::-1, :]
p = p[:, ::-1, :]
# print("hflip")
if rot:
rot_rand = random.random()
if rot_rand > 0.75:
lr = np.rot90(lr, k=1, axes=(0, 1))
hr = np.rot90(hr, k=1, axes=(0, 1))
p = np.rot90(p, k=1, axes=(0, 1))
elif rot_rand > 0.5:
lr = np.rot90(lr, k=2, axes=(0, 1))
hr = np.rot90(hr, k=2, axes=(0, 1))
p = np.rot90(p, k=2, axes=(0, 1))
elif rot_rand > 0.25:
lr = np.rot90(lr, k=3, axes=(0, 1))
hr = np.rot90(hr, k=3, axes=(0, 1))
p = np.rot90(p, k=3, axes=(0, 1))
# print("rot")
return lr, hr, p
class Data(data.Dataset):
def __init__(self, root, args, train=False):
# 返回指定路径下的文件和文件夹列表。
self.args = args
self.imgs_HR_path = os.path.join(root, 'HR')
self.imgs_HR = sorted(
glob.glob(os.path.join(self.imgs_HR_path, '*.png'))
)
if self.args.scale == 8:
self.imgs_LR_path = os.path.join(root, 'LR')
elif self.args.scale == 16:
self.imgs_LR_path = os.path.join(root, 'LR_x16')
elif self.args.scale == 4:
self.imgs_LR_path = os.path.join(root, 'LR_x4')
self.imgs_LR = sorted(
glob.glob(os.path.join(self.imgs_LR_path, '*.png'))
)
if self.args.scale == 8:
self.imgs_parsing_path = os.path.join(root, 'Es_parsing')
elif self.args.scale == 16:
self.imgs_parsing_path = os.path.join(root, 'Es_parsing_x16')
elif self.args.scale == 4:
self.imgs_parsing_path = os.path.join(root, 'Es_parsing_x4')
self.imgs_parsing = sorted(
glob.glob(os.path.join(self.imgs_parsing_path, '*.png'))
)
self.train = train
def __getitem__(self, item):
img_path_LR = os.path.join(self.imgs_LR_path, self.imgs_LR[item])
img_path_HR = os.path.join(self.imgs_HR_path, self.imgs_HR[item])
img_path_parsing = os.path.join(self.imgs_parsing_path, self.imgs_parsing[item])
LR = Image.open(img_path_LR)
HR = Image.open(img_path_HR)
parsing = Image.open(img_path_parsing)
HR = numpy.array(HR)
LR = numpy.array(LR)
parsing = numpy.array(parsing)
if self.args.augment and self.train:
LR, HR, parsing = augment(LR, HR, parsing)
LR = np.ascontiguousarray(LR)
HR = np.ascontiguousarray(HR)
parsing = np.ascontiguousarray(parsing)
HR = ToTensor()(HR)
LR = ToTensor()(LR)
res = ToTensor()(parsing)
filename = os.path.basename(img_path_HR)
return LR, HR, res, filename
def __len__(self):
return len(self.imgs_HR)
| 3,235 | 31.36 | 88 | py |
FishFSRNet | FishFSRNet-main/fsr/main_parsing.py | from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import torch.optim as optim
import torch.nn as nn
import dataset_parsing
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import util
from fishfsrnet import FISHNET
net = FISHNET(args)
net = util.prepare(net)
# print(net)
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
traindata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_train), args=args, train=True)
trainset = DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=16)
valdata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_val), args=args, train=False)
valset = DataLoader(valdata, batch_size=1, shuffle=False, num_workers=1)
testdata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_test), args=args, train=False)
testset = DataLoader(testdata, batch_size=1, shuffle=False, num_workers=1)
criterion1 = nn.L1Loss()
optimizer = optim.Adam(params=net.parameters(), lr=lr, betas=(0.9, 0.99), eps=1e-8)
for i in range(args.epochs):
net.train()
train_loss = 0
bum = len(trainset)
for batch, (lr, hr, parsing, _) in enumerate(trainset):
lr, hr, parsing = util.prepare(lr), util.prepare(hr), util.prepare(parsing)
sr = net(lr, parsing)
loss = criterion1(sr, hr)
train_loss = train_loss + loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Epoch:{} loss: {:.3f}".format(i + 1, train_loss / (len(trainset)) * 255))
writer.add_scalar('train_loss', train_loss / (len(trainset)) * 255, i + 1)
os.makedirs(os.path.join(args.save_path, args.writer_name), exist_ok=True)
os.makedirs(os.path.join(args.save_path, args.writer_name, 'model'), exist_ok=True)
torch.save(net.state_dict(),
os.path.join(args.save_path, args.writer_name, 'model', 'epoch{}.pth'.format(i + 1)))
net.eval()
val_psnr = 0
val_ssim = 0
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result'), exist_ok=True)
for batch, (lr, hr, parsing, filename) in enumerate(valset):
lr, hr, parsing = util.prepare(lr), util.prepare(hr), util.prepare(parsing)
sr = net(lr, parsing)
psnr_c, ssim_c = util.calc_metrics(hr[0].data.cpu(), sr[0].data.cpu())
val_psnr = val_psnr + psnr_c
val_ssim = val_ssim + ssim_c
print("Epoch:{} val psnr: {:.3f}".format(i + 1, val_psnr / (len(valset))))
writer.add_scalar("val_psnr_DIC", val_psnr / len(valset), i + 1)
writer.add_scalar("val_ssim_DIC", val_ssim / len(valset), i + 1)
| 2,671 | 40.75 | 106 | py |
omni3d | omni3d-main/tools/train_net.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import os
import sys
import numpy as np
import copy
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.engine import (
default_argument_parser,
default_setup,
default_writers,
launch
)
from detectron2.solver import build_lr_scheduler
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("cubercnn")
sys.dont_write_bytecode = True
sys.path.append(os.getcwd())
np.set_printoptions(suppress=True)
from cubercnn.solver import build_optimizer, freeze_bn, PeriodicCheckpointerOnlyOne
from cubercnn.config import get_cfg_defaults
from cubercnn.data import (
load_omni3d_json,
DatasetMapper3D,
build_detection_train_loader,
build_detection_test_loader,
get_omni3d_categories,
simple_register
)
from cubercnn.evaluation import (
Omni3DEvaluator, Omni3Deval,
Omni3DEvaluationHelper,
inference_on_dataset
)
from cubercnn.modeling.proposal_generator import RPNWithIgnore
from cubercnn.modeling.roi_heads import ROIHeads3D
from cubercnn.modeling.meta_arch import RCNN3D, build_model
from cubercnn.modeling.backbone import build_dla_from_vision_fpn_backbone
from cubercnn import util, vis, data
import cubercnn.vis.logperf as utils_logperf
MAX_TRAINING_ATTEMPTS = 10
def do_test(cfg, model, iteration='final', storage=None):
filter_settings = data.get_filter_settings_from_cfg(cfg)
filter_settings['visibility_thres'] = cfg.TEST.VISIBILITY_THRES
filter_settings['truncation_thres'] = cfg.TEST.TRUNCATION_THRES
filter_settings['min_height_thres'] = 0.0625
filter_settings['max_depth'] = 1e8
dataset_names_test = cfg.DATASETS.TEST
only_2d = cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_3D == 0.0
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", 'iter_{}'.format(iteration))
eval_helper = Omni3DEvaluationHelper(
dataset_names_test,
filter_settings,
output_folder,
iter_label=iteration,
only_2d=only_2d,
)
for dataset_name in dataset_names_test:
"""
Cycle through each dataset and test them individually.
This loop keeps track of each per-image evaluation result,
so that it doesn't need to be re-computed for the collective.
"""
'''
Distributed Cube R-CNN inference
'''
data_loader = build_detection_test_loader(cfg, dataset_name)
results_json = inference_on_dataset(model, data_loader)
if comm.is_main_process():
'''
Individual dataset evaluation
'''
eval_helper.add_predictions(dataset_name, results_json)
eval_helper.save_predictions(dataset_name)
eval_helper.evaluate(dataset_name)
'''
Optionally, visualize some instances
'''
instances = torch.load(os.path.join(output_folder, dataset_name, 'instances_predictions.pth'))
log_str = vis.visualize_from_instances(
instances, data_loader.dataset, dataset_name,
cfg.INPUT.MIN_SIZE_TEST, os.path.join(output_folder, dataset_name),
MetadataCatalog.get('omni3d_model').thing_classes, iteration
)
logger.info(log_str)
if comm.is_main_process():
'''
Summarize each Omni3D Evaluation metric
'''
eval_helper.summarize_all()
def do_train(cfg, model, dataset_id_to_unknown_cats, dataset_id_to_src, resume=False):
max_iter = cfg.SOLVER.MAX_ITER
do_eval = cfg.TEST.EVAL_PERIOD > 0
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
# bookkeeping
checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler)
periodic_checkpointer = PeriodicCheckpointerOnlyOne(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter)
writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# create the dataloader
data_mapper = DatasetMapper3D(cfg, is_train=True)
data_loader = build_detection_train_loader(cfg, mapper=data_mapper, dataset_id_to_src=dataset_id_to_src)
# give the mapper access to dataset_ids
data_mapper.dataset_id_to_unknown_cats = dataset_id_to_unknown_cats
if cfg.MODEL.WEIGHTS_PRETRAIN != '':
# load ONLY the model, no checkpointables.
checkpointer.load(cfg.MODEL.WEIGHTS_PRETRAIN, checkpointables=[])
# determine the starting iteration, if resuming
start_iter = (checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1)
iteration = start_iter
logger.info("Starting training from iteration {}".format(start_iter))
if not cfg.MODEL.USE_BN:
freeze_bn(model)
world_size = comm.get_world_size()
# if the loss diverges for more than the below TOLERANCE
# as a percent of the iterations, the training will stop.
# This is only enabled if "STABILIZE" is on, which
# prevents a single example from exploding the training.
iterations_success = 0
iterations_explode = 0
# when loss > recent_loss * TOLERANCE, then it could be a
# diverging/failing model, which we should skip all updates for.
TOLERANCE = 4.0
GAMMA = 0.02 # rolling average weight gain
recent_loss = None # stores the most recent loss magnitude
data_iter = iter(data_loader)
# model.parameters() is surprisingly expensive at 150ms, so cache it
named_params = list(model.named_parameters())
with EventStorage(start_iter) as storage:
while True:
data = next(data_iter)
storage.iter = iteration
# forward
loss_dict = model(data)
losses = sum(loss_dict.values())
# reduce
loss_dict_reduced = {k: v.item() for k, v in allreduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
# sync up
comm.synchronize()
if recent_loss is None:
# init recent loss fairly high
recent_loss = losses_reduced*2.0
# Is stabilization enabled, and loss high or NaN?
diverging_model = cfg.MODEL.STABILIZE > 0 and \
(losses_reduced > recent_loss*TOLERANCE or \
not (np.isfinite(losses_reduced)) or np.isnan(losses_reduced))
if diverging_model:
# clip and warn the user.
losses = losses.clip(0, 1)
logger.warning('Skipping gradient update due to higher than normal loss {:.2f} vs. rolling mean {:.2f}, Dict-> {}'.format(
losses_reduced, recent_loss, loss_dict_reduced
))
else:
# compute rolling average of loss
recent_loss = recent_loss * (1-GAMMA) + losses_reduced*GAMMA
if comm.is_main_process():
# send loss scalars to tensorboard.
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
# backward and step
optimizer.zero_grad()
losses.backward()
# if the loss is not too high,
# we still want to check gradients.
if not diverging_model:
if cfg.MODEL.STABILIZE > 0:
for name, param in named_params:
if param.grad is not None:
diverging_model = torch.isnan(param.grad).any() or torch.isinf(param.grad).any()
if diverging_model:
logger.warning('Skipping gradient update due to inf/nan detection, loss is {}'.format(loss_dict_reduced))
break
# convert exploded to a float, then allreduce it,
# if any process gradients have exploded then we skip together.
diverging_model = torch.tensor(float(diverging_model)).cuda()
if world_size > 1:
dist.all_reduce(diverging_model)
# sync up
comm.synchronize()
if diverging_model > 0:
optimizer.zero_grad()
iterations_explode += 1
else:
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
iterations_success += 1
total_iterations = iterations_success + iterations_explode
# Only retry if we have trained sufficiently long relative
# to the latest checkpoint, which we would otherwise revert back to.
retry = (iterations_explode / total_iterations) >= cfg.MODEL.STABILIZE \
and (total_iterations > cfg.SOLVER.CHECKPOINT_PERIOD*1/2)
# Important for dist training. Convert to a float, then allreduce it,
# if any process gradients have exploded then we must skip together.
retry = torch.tensor(float(retry)).cuda()
if world_size > 1:
dist.all_reduce(retry)
# sync up
comm.synchronize()
# any processes need to retry
if retry > 0:
# instead of failing, try to resume the iteration instead.
logger.warning('!! Restarting training at {} iters. Exploding loss {:d}% of iters !!'.format(
iteration, int(100*(iterations_explode / (iterations_success + iterations_explode)))
))
# send these to garbage, for ideally a cleaner restart.
del data_mapper
del data_loader
del optimizer
del checkpointer
del periodic_checkpointer
return False
scheduler.step()
# Evaluate only when the loss is not diverging.
if not (diverging_model > 0) and \
(do_eval and ((iteration + 1) % cfg.TEST.EVAL_PERIOD) == 0 and iteration != (max_iter - 1)):
logger.info('Starting test for iteration {}'.format(iteration+1))
do_test(cfg, model, iteration=iteration+1, storage=storage)
comm.synchronize()
if not cfg.MODEL.USE_BN:
freeze_bn(model)
# Flush events
if iteration - start_iter > 5 and ((iteration + 1) % 20 == 0 or iteration == max_iter - 1):
for writer in writers:
writer.write()
# Do not bother checkpointing if there is potential for a diverging model.
if not (diverging_model > 0) and \
(iterations_explode / total_iterations) < 0.5*cfg.MODEL.STABILIZE:
periodic_checkpointer.step(iteration)
iteration += 1
if iteration >= max_iter:
break
# success
return True
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
get_cfg_defaults(cfg)
config_file = args.config_file
# store locally if needed
if config_file.startswith(util.CubeRCNNHandler.PREFIX):
config_file = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, config_file)
cfg.merge_from_file(config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="cubercnn")
filter_settings = data.get_filter_settings_from_cfg(cfg)
for dataset_name in cfg.DATASETS.TRAIN:
simple_register(dataset_name, filter_settings, filter_empty=True)
dataset_names_test = cfg.DATASETS.TEST
for dataset_name in dataset_names_test:
if not(dataset_name in cfg.DATASETS.TRAIN):
simple_register(dataset_name, filter_settings, filter_empty=False)
return cfg
def main(args):
cfg = setup(args)
logger.info('Preprocessing Training Datasets')
filter_settings = data.get_filter_settings_from_cfg(cfg)
priors = None
if args.eval_only:
category_path = os.path.join(util.file_parts(args.config_file)[0], 'category_meta.json')
# store locally if needed
if category_path.startswith(util.CubeRCNNHandler.PREFIX):
category_path = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, category_path)
metadata = util.load_json(category_path)
# register the categories
thing_classes = metadata['thing_classes']
id_map = {int(key):val for key, val in metadata['thing_dataset_id_to_contiguous_id'].items()}
MetadataCatalog.get('omni3d_model').thing_classes = thing_classes
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id = id_map
else:
# setup and join the data.
dataset_paths = [os.path.join('datasets', 'Omni3D', name + '.json') for name in cfg.DATASETS.TRAIN]
datasets = data.Omni3D(dataset_paths, filter_settings=filter_settings)
# determine the meta data given the datasets used.
data.register_and_store_model_metadata(datasets, cfg.OUTPUT_DIR, filter_settings)
thing_classes = MetadataCatalog.get('omni3d_model').thing_classes
dataset_id_to_contiguous_id = MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
'''
It may be useful to keep track of which categories are annotated/known
for each dataset in use, in case a method wants to use this information.
'''
infos = datasets.dataset['info']
if type(infos) == dict:
infos = [datasets.dataset['info']]
dataset_id_to_unknown_cats = {}
possible_categories = set(i for i in range(cfg.MODEL.ROI_HEADS.NUM_CLASSES + 1))
dataset_id_to_src = {}
for info in infos:
dataset_id = info['id']
known_category_training_ids = set()
if not dataset_id in dataset_id_to_src:
dataset_id_to_src[dataset_id] = info['source']
for id in info['known_category_ids']:
if id in dataset_id_to_contiguous_id:
known_category_training_ids.add(dataset_id_to_contiguous_id[id])
# determine and store the unknown categories.
unknown_categories = possible_categories - known_category_training_ids
dataset_id_to_unknown_cats[dataset_id] = unknown_categories
# log the per-dataset categories
logger.info('Available categories for {}'.format(info['name']))
logger.info([thing_classes[i] for i in (possible_categories & known_category_training_ids)])
# compute priors given the training data.
priors = util.compute_priors(cfg, datasets)
'''
The training loops can attempt to train for N times.
This catches a divergence or other failure modes.
'''
remaining_attempts = MAX_TRAINING_ATTEMPTS
while remaining_attempts > 0:
# build the training model.
model = build_model(cfg, priors=priors)
if remaining_attempts == MAX_TRAINING_ATTEMPTS:
# log the first attempt's settings.
logger.info("Model:\n{}".format(model))
if args.eval_only:
# skip straight to eval mode
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
# setup distributed training.
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()],
broadcast_buffers=False, find_unused_parameters=True
)
# train full model, potentially with resume.
if do_train(cfg, model, dataset_id_to_unknown_cats, dataset_id_to_src, resume=args.resume):
break
else:
# allow restart when a model fails to train.
remaining_attempts -= 1
del model
if remaining_attempts == 0:
# Exit if the model could not finish without diverging.
raise ValueError('Training failed')
return do_test(cfg, model)
def allreduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = comm.get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
) | 18,388 | 35.056863 | 138 | py |
omni3d | omni3d-main/demo/demo.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import os
import argparse
import sys
import numpy as np
from collections import OrderedDict
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.data import transforms as T
logger = logging.getLogger("detectron2")
sys.dont_write_bytecode = True
sys.path.append(os.getcwd())
np.set_printoptions(suppress=True)
from cubercnn.config import get_cfg_defaults
from cubercnn.modeling.proposal_generator import RPNWithIgnore
from cubercnn.modeling.roi_heads import ROIHeads3D
from cubercnn.modeling.meta_arch import RCNN3D, build_model
from cubercnn.modeling.backbone import build_dla_from_vision_fpn_backbone
from cubercnn import util, vis
def do_test(args, cfg, model):
list_of_ims = util.list_files(os.path.join(args.input_folder, ''), '*')
model.eval()
focal_length = args.focal_length
principal_point = args.principal_point
thres = args.threshold
output_dir = cfg.OUTPUT_DIR
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
augmentations = T.AugmentationList([T.ResizeShortestEdge(min_size, max_size, "choice")])
util.mkdir_if_missing(output_dir)
category_path = os.path.join(util.file_parts(args.config_file)[0], 'category_meta.json')
# store locally if needed
if category_path.startswith(util.CubeRCNNHandler.PREFIX):
category_path = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, category_path)
metadata = util.load_json(category_path)
cats = metadata['thing_classes']
for path in list_of_ims:
im_name = util.file_parts(path)[1]
im = util.imread(path)
if im is None:
continue
image_shape = im.shape[:2] # h, w
h, w = image_shape
if focal_length == 0:
focal_length_ndc = 4.0
focal_length = focal_length_ndc * h / 2
if len(principal_point) == 0:
px, py = w/2, h/2
else:
px, py = principal_point
K = np.array([
[focal_length, 0.0, px],
[0.0, focal_length, py],
[0.0, 0.0, 1.0]
])
aug_input = T.AugInput(im)
_ = augmentations(aug_input)
image = aug_input.image
batched = [{
'image': torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))).cuda(),
'height': image_shape[0], 'width': image_shape[1], 'K': K
}]
dets = model(batched)[0]['instances']
n_det = len(dets)
meshes = []
meshes_text = []
if n_det > 0:
for idx, (corners3D, center_cam, center_2D, dimensions, pose, score, cat_idx) in enumerate(zip(
dets.pred_bbox3D, dets.pred_center_cam, dets.pred_center_2D, dets.pred_dimensions,
dets.pred_pose, dets.scores, dets.pred_classes
)):
# skip
if score < thres:
continue
cat = cats[cat_idx]
bbox3D = center_cam.tolist() + dimensions.tolist()
meshes_text.append('{} {:.2f}'.format(cat, score))
color = [c/255.0 for c in util.get_color(idx)]
box_mesh = util.mesh_cuboid(bbox3D, pose.tolist(), color=color)
meshes.append(box_mesh)
print('File: {} with {} dets'.format(im_name, len(meshes)))
if len(meshes) > 0:
im_drawn_rgb, im_topdown, _ = vis.draw_scene_view(im, K, meshes, text=meshes_text, scale=im.shape[0], blend_weight=0.5, blend_weight_overlay=0.85)
if args.display:
im_concat = np.concatenate((im_drawn_rgb, im_topdown), axis=1)
vis.imshow(im_concat)
util.imwrite(im_drawn_rgb, os.path.join(output_dir, im_name+'_boxes.jpg'))
util.imwrite(im_topdown, os.path.join(output_dir, im_name+'_novel.jpg'))
else:
util.imwrite(im, os.path.join(output_dir, im_name+'_boxes.jpg'))
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
get_cfg_defaults(cfg)
config_file = args.config_file
# store locally if needed
if config_file.startswith(util.CubeRCNNHandler.PREFIX):
config_file = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, config_file)
cfg.merge_from_file(config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=True
)
with torch.no_grad():
do_test(args, cfg, model)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
epilog=None, formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument('--input-folder', type=str, help='list of image folders to process', required=True)
parser.add_argument("--focal-length", type=float, default=0, help="focal length for image inputs (in px)")
parser.add_argument("--principal-point", type=float, default=[], nargs=2, help="principal point for image inputs (in px)")
parser.add_argument("--threshold", type=float, default=0.25, help="threshold on score for visualizing")
parser.add_argument("--display", default=False, action="store_true", help="Whether to show the images in matplotlib",)
parser.add_argument("--eval-only", default=True, action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="Modify config options by adding 'KEY VALUE' pairs at the end of the command. "
"See config references at "
"https://detectron2.readthedocs.io/modules/config.html#config-references",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
) | 7,175 | 34.349754 | 158 | py |
omni3d | omni3d-main/cubercnn/solver/build.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from typing import Any, Dict, List, Set
from detectron2.solver.build import maybe_add_gradient_clipping
def build_optimizer(cfg, model):
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for key, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if isinstance(module, norm_module_types) and (cfg.SOLVER.WEIGHT_DECAY_NORM is not None):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM
elif key == "bias":
if (cfg.SOLVER.BIAS_LR_FACTOR is not None):
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
if (cfg.SOLVER.WEIGHT_DECAY_BIAS is not None):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
# these params do not need weight decay at all
# TODO parameterize these in configs instead.
if key in ['priors_dims_per_cat', 'priors_z_scales', 'priors_z_stats']:
weight_decay = 0.0
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.TYPE == 'sgd':
optimizer = torch.optim.SGD(
params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY
)
elif cfg.SOLVER.TYPE == 'adam':
optimizer = torch.optim.Adam(params, cfg.SOLVER.BASE_LR, eps=1e-02)
elif cfg.SOLVER.TYPE == 'adam+amsgrad':
optimizer = torch.optim.Adam(params, cfg.SOLVER.BASE_LR, amsgrad=True, eps=1e-02)
elif cfg.SOLVER.TYPE == 'adamw':
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR, eps=1e-02)
elif cfg.SOLVER.TYPE == 'adamw+amsgrad':
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR, amsgrad=True, eps=1e-02)
else:
raise ValueError('{} is not supported as an optimizer.'.format(cfg.SOLVER.TYPE))
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
def freeze_bn(network):
for _, module in network.named_modules():
if isinstance(module, torch.nn.BatchNorm2d):
module.eval()
module.track_running_stats = False
| 2,963 | 37.493506 | 100 | py |
omni3d | omni3d-main/cubercnn/evaluation/omni3d_evaluation.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import contextlib
import copy
import datetime
import io
import itertools
import json
import logging
import os
import time
from collections import defaultdict
from typing import List, Union
from typing import Tuple
import numpy as np
import pycocotools.mask as maskUtils
import torch
from detectron2.utils.memory import retry_if_cuda_oom
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.evaluation.coco_evaluation import COCOEvaluator
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table, log_every_n_seconds
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
from detectron2.utils.comm import get_world_size, is_main_process
import detectron2.utils.comm as comm
from detectron2.evaluation import (
DatasetEvaluators, inference_context, DatasetEvaluator
)
from collections import OrderedDict, abc
from contextlib import ExitStack, contextmanager
from torch import nn
import logging
from cubercnn.data import Omni3D
from pytorch3d import _C
import torch.nn.functional as F
from pytorch3d.ops.iou_box3d import _box_planes, _box_triangles
import cubercnn.vis.logperf as utils_logperf
from cubercnn.data import (
get_omni3d_categories,
simple_register
)
"""
This file contains
* Omni3DEvaluationHelper: a helper object to accumulate and summarize evaluation results
* Omni3DEval: a wrapper around COCOeval to perform 3D bounding evaluation in the detection setting
* Omni3DEvaluator: a wrapper around COCOEvaluator to collect results on each dataset
* Omni3DParams: parameters for the evaluation API
"""
logger = logging.getLogger(__name__)
# Defines the max cross of len(dts) * len(gts)
# which we will attempt to compute on a GPU.
# Fallback is safer computation on a CPU.
# 0 is disabled on GPU.
MAX_DTS_CROSS_GTS_FOR_IOU3D = 0
def _check_coplanar(boxes: torch.Tensor, eps: float = 1e-4) -> torch.BoolTensor:
"""
Checks that plane vertices are coplanar.
Returns a bool tensor of size B, where True indicates a box is coplanar.
"""
faces = torch.tensor(_box_planes, dtype=torch.int64, device=boxes.device)
verts = boxes.index_select(index=faces.view(-1), dim=1)
B = boxes.shape[0]
P, V = faces.shape
# (B, P, 4, 3) -> (B, P, 3)
v0, v1, v2, v3 = verts.reshape(B, P, V, 3).unbind(2)
# Compute the normal
e0 = F.normalize(v1 - v0, dim=-1)
e1 = F.normalize(v2 - v0, dim=-1)
normal = F.normalize(torch.cross(e0, e1, dim=-1), dim=-1)
# Check the fourth vertex is also on the same plane
mat1 = (v3 - v0).view(B, 1, -1) # (B, 1, P*3)
mat2 = normal.view(B, -1, 1) # (B, P*3, 1)
return (mat1.bmm(mat2).abs() < eps).view(B)
def _check_nonzero(boxes: torch.Tensor, eps: float = 1e-8) -> torch.BoolTensor:
"""
Checks that the sides of the box have a non zero area.
Returns a bool tensor of size B, where True indicates a box is nonzero.
"""
faces = torch.tensor(_box_triangles, dtype=torch.int64, device=boxes.device)
verts = boxes.index_select(index=faces.view(-1), dim=1)
B = boxes.shape[0]
T, V = faces.shape
# (B, T, 3, 3) -> (B, T, 3)
v0, v1, v2 = verts.reshape(B, T, V, 3).unbind(2)
normals = torch.cross(v1 - v0, v2 - v0, dim=-1) # (B, T, 3)
face_areas = normals.norm(dim=-1) / 2
return (face_areas > eps).all(1).view(B)
def box3d_overlap(
boxes_dt: torch.Tensor, boxes_gt: torch.Tensor,
eps_coplanar: float = 1e-4, eps_nonzero: float = 1e-8
) -> torch.Tensor:
"""
Computes the intersection of 3D boxes_dt and boxes_gt.
Inputs boxes_dt, boxes_gt are tensors of shape (B, 8, 3)
(where B doesn't have to be the same for boxes_dt and boxes_gt),
containing the 8 corners of the boxes, as follows:
(4) +---------+. (5)
| ` . | ` .
| (0) +---+-----+ (1)
| | | |
(7) +-----+---+. (6)|
` . | ` . |
(3) ` +---------+ (2)
NOTE: Throughout this implementation, we assume that boxes
are defined by their 8 corners exactly in the order specified in the
diagram above for the function to give correct results. In addition
the vertices on each plane must be coplanar.
As an alternative to the diagram, this is a unit bounding
box which has the correct vertex ordering:
box_corner_vertices = [
[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1],
]
Args:
boxes_dt: tensor of shape (N, 8, 3) of the coordinates of the 1st boxes
boxes_gt: tensor of shape (M, 8, 3) of the coordinates of the 2nd boxes
Returns:
iou: (N, M) tensor of the intersection over union which is
defined as: `iou = vol / (vol1 + vol2 - vol)`
"""
# Make sure predictions are coplanar and nonzero
invalid_coplanar = ~_check_coplanar(boxes_dt, eps=eps_coplanar)
invalid_nonzero = ~_check_nonzero(boxes_dt, eps=eps_nonzero)
ious = _C.iou_box3d(boxes_dt, boxes_gt)[1]
# Offending boxes are set to zero IoU
if invalid_coplanar.any():
ious[invalid_coplanar] = 0
print('Warning: skipping {:d} non-coplanar boxes at eval.'.format(int(invalid_coplanar.float().sum())))
if invalid_nonzero.any():
ious[invalid_nonzero] = 0
print('Warning: skipping {:d} zero volume boxes at eval.'.format(int(invalid_nonzero.float().sum())))
return ious
class Omni3DEvaluationHelper:
def __init__(self,
dataset_names,
filter_settings,
output_folder,
iter_label='-',
only_2d=False,
):
"""
A helper class to initialize, evaluate and summarize Omni3D metrics.
The evaluator relies on the detectron2 MetadataCatalog for keeping track
of category names and contiguous IDs. Hence, it is important to set
these variables appropriately.
# (list[str]) the category names in their contiguous order
MetadataCatalog.get('omni3d_model').thing_classes = ...
# (dict[int: int]) the mapping from Omni3D category IDs to the contiguous order
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
Args:
dataset_names (list[str]): the individual dataset splits for evaluation
filter_settings (dict): the filter settings used for evaluation, see
cubercnn/data/datasets.py get_filter_settings_from_cfg
output_folder (str): the output folder where results can be stored to disk.
iter_label (str): an optional iteration/label used within the summary
only_2d (bool): whether the evaluation mode should be 2D or 2D and 3D.
"""
self.dataset_names = dataset_names
self.filter_settings = filter_settings
self.output_folder = output_folder
self.iter_label = iter_label
self.only_2d = only_2d
# Each dataset evaluator is stored here
self.evaluators = OrderedDict()
# These are the main evaluation results
self.results = OrderedDict()
# These store store per-dataset results to be printed
self.results_analysis = OrderedDict()
self.results_omni3d = OrderedDict()
self.overall_imgIds = set()
self.overall_catIds = set()
# These store the evaluations for each category and area,
# concatenated from ALL evaluated datasets. Doing so avoids
# the need to re-compute them when accumulating results.
self.evals_per_cat_area2D = {}
self.evals_per_cat_area3D = {}
self.output_folders = {
dataset_name: os.path.join(self.output_folder, dataset_name)
for dataset_name in dataset_names
}
for dataset_name in self.dataset_names:
# register any datasets that need it
if MetadataCatalog.get(dataset_name).get('json_file') is None:
simple_register(dataset_name, filter_settings, filter_empty=False)
# create an individual dataset evaluator
self.evaluators[dataset_name] = Omni3DEvaluator(
dataset_name, output_dir=self.output_folders[dataset_name],
filter_settings=self.filter_settings, only_2d=self.only_2d,
eval_prox=('Objectron' in dataset_name or 'SUNRGBD' in dataset_name),
distributed=False, # actual evaluation should be single process
)
self.evaluators[dataset_name].reset()
self.overall_imgIds.update(set(self.evaluators[dataset_name]._omni_api.getImgIds()))
self.overall_catIds.update(set(self.evaluators[dataset_name]._omni_api.getCatIds()))
def add_predictions(self, dataset_name, predictions):
"""
Adds predictions to the evaluator for dataset_name. This can be any number of
predictions, including all predictions passed in at once or in batches.
Args:
dataset_name (str): the dataset split name which the predictions belong to
predictions (list[dict]): each item in the list is a dict as follows:
{
"image_id": <int> the unique image identifier from Omni3D,
"K": <np.array> 3x3 intrinsics matrix for the image,
"width": <int> image width,
"height": <int> image height,
"instances": [
{
"image_id": <int> the unique image identifier from Omni3D,
"category_id": <int> the contiguous category prediction IDs,
which can be mapped from Omni3D's category ID's using
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
"bbox": [float] 2D box as [x1, y1, x2, y2] used for IoU2D,
"score": <float> the confidence score for the object,
"depth": <float> the depth of the center of the object,
"bbox3D": list[list[float]] 8x3 corner vertices used for IoU3D,
}
...
]
}
"""
# concatenate incoming predictions
self.evaluators[dataset_name]._predictions += predictions
def save_predictions(self, dataset_name):
"""
Saves the predictions from dataset_name to disk, in a self.output_folder.
Args:
dataset_name (str): the dataset split name which should be saved.
"""
# save predictions to disk
output_folder_dataset = self.output_folders[dataset_name]
PathManager.mkdirs(output_folder_dataset)
file_path = os.path.join(output_folder_dataset, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self.evaluators[dataset_name]._predictions, f)
def evaluate(self, dataset_name):
"""
Runs the evaluation for an individual dataset split, assuming all
predictions have been passed in.
Args:
dataset_name (str): the dataset split name which should be evalated.
"""
if not dataset_name in self.results:
# run evaluation and cache
self.results[dataset_name] = self.evaluators[dataset_name].evaluate()
results = self.results[dataset_name]
logger.info('\n'+results['log_str_2D'].replace('mode=2D', '{} iter={} mode=2D'.format(dataset_name, self.iter_label)))
# store the partially accumulated evaluations per category per area
for key, item in results['bbox_2D_evals_per_cat_area'].items():
if not key in self.evals_per_cat_area2D:
self.evals_per_cat_area2D[key] = []
self.evals_per_cat_area2D[key] += item
if not self.only_2d:
# store the partially accumulated evaluations per category per area
for key, item in results['bbox_3D_evals_per_cat_area'].items():
if not key in self.evals_per_cat_area3D:
self.evals_per_cat_area3D[key] = []
self.evals_per_cat_area3D[key] += item
logger.info('\n'+results['log_str_3D'].replace('mode=3D', '{} iter={} mode=3D'.format(dataset_name, self.iter_label)))
# full model category names
category_names = self.filter_settings['category_names']
# The set of categories present in the dataset; there should be no duplicates
categories = {cat for cat in category_names if 'AP-{}'.format(cat) in results['bbox_2D']}
assert len(categories) == len(set(categories))
# default are all NaN
general_2D, general_3D, omni_2D, omni_3D = (np.nan,) * 4
# 2D and 3D performance for categories in dataset; and log
general_2D = np.mean([results['bbox_2D']['AP-{}'.format(cat)] for cat in categories])
if not self.only_2d:
general_3D = np.mean([results['bbox_3D']['AP-{}'.format(cat)] for cat in categories])
# 2D and 3D performance on Omni3D categories
omni3d_dataset_categories = get_omni3d_categories(dataset_name) # dataset-specific categories
if len(omni3d_dataset_categories - categories) == 0: # omni3d_dataset_categories is a subset of categories
omni_2D = np.mean([results['bbox_2D']['AP-{}'.format(cat)] for cat in omni3d_dataset_categories])
if not self.only_2d:
omni_3D = np.mean([results['bbox_3D']['AP-{}'.format(cat)] for cat in omni3d_dataset_categories])
self.results_omni3d[dataset_name] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Performance analysis
extras_AP15, extras_AP25, extras_AP50, extras_APn, extras_APm, extras_APf = (np.nan,)*6
if not self.only_2d:
extras_AP15 = results['bbox_3D']['AP15']
extras_AP25 = results['bbox_3D']['AP25']
extras_AP50 = results['bbox_3D']['AP50']
extras_APn = results['bbox_3D']['APn']
extras_APm = results['bbox_3D']['APm']
extras_APf = results['bbox_3D']['APf']
self.results_analysis[dataset_name] = {
"iters": self.iter_label,
"AP2D": general_2D, "AP3D": general_3D,
"AP3D@15": extras_AP15, "AP3D@25": extras_AP25, "AP3D@50": extras_AP50,
"AP3D-N": extras_APn, "AP3D-M": extras_APm, "AP3D-F": extras_APf
}
# Performance per category
results_cat = OrderedDict()
for cat in category_names:
cat_2D, cat_3D = (np.nan,) * 2
if 'AP-{}'.format(cat) in results['bbox_2D']:
cat_2D = results['bbox_2D']['AP-{}'.format(cat)]
if not self.only_2d:
cat_3D = results['bbox_3D']['AP-{}'.format(cat)]
if not np.isnan(cat_2D) or not np.isnan(cat_3D):
results_cat[cat] = {"AP2D": cat_2D, "AP3D": cat_3D}
utils_logperf.print_ap_category_histogram(dataset_name, results_cat)
def summarize_all(self,):
'''
Report collective metrics when possible for the the Omni3D dataset.
This uses pre-computed evaluation results from each dataset,
which were aggregated and cached while evaluating individually.
This process simply re-accumulate and summarizes them.
'''
# First, double check that we have all the evaluations
for dataset_name in self.dataset_names:
if not dataset_name in self.results:
self.evaluate(dataset_name)
thing_classes = MetadataCatalog.get('omni3d_model').thing_classes
catId2contiguous = MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
ordered_things = [thing_classes[catId2contiguous[cid]] for cid in self.overall_catIds]
categories = set(ordered_things)
evaluator2D = Omni3Deval(mode='2D')
evaluator2D.params.catIds = list(self.overall_catIds)
evaluator2D.params.imgIds = list(self.overall_imgIds)
evaluator2D.evalImgs = True
evaluator2D.evals_per_cat_area = self.evals_per_cat_area2D
evaluator2D._paramsEval = copy.deepcopy(evaluator2D.params)
evaluator2D.accumulate()
summarize_str2D = evaluator2D.summarize()
precisions = evaluator2D.eval['precision']
metrics = ["AP", "AP50", "AP75", "AP95", "APs", "APm", "APl"]
results2D = {
metric: float(
evaluator2D.stats[idx] * 100 if evaluator2D.stats[idx] >= 0 else "nan"
)
for idx, metric in enumerate(metrics)
}
for idx, name in enumerate(ordered_things):
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results2D.update({"AP-" + "{}".format(name): float(ap * 100)})
evaluator3D = Omni3Deval(mode='3D')
evaluator3D.params.catIds = list(self.overall_catIds)
evaluator3D.params.imgIds = list(self.overall_imgIds)
evaluator3D.evalImgs = True
evaluator3D.evals_per_cat_area = self.evals_per_cat_area3D
evaluator3D._paramsEval = copy.deepcopy(evaluator3D.params)
evaluator3D.accumulate()
summarize_str3D = evaluator3D.summarize()
precisions = evaluator3D.eval['precision']
metrics = ["AP", "AP15", "AP25", "AP50", "APn", "APm", "APf"]
results3D = {
metric: float(
evaluator3D.stats[idx] * 100 if evaluator3D.stats[idx] >= 0 else "nan"
)
for idx, metric in enumerate(metrics)
}
for idx, name in enumerate(ordered_things):
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results3D.update({"AP-" + "{}".format(name): float(ap * 100)})
# All concat categories
general_2D, general_3D = (np.nan,) * 2
general_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in categories])
if not self.only_2d:
general_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in categories])
# Analysis performance
extras_AP15, extras_AP25, extras_AP50, extras_APn, extras_APm, extras_APf = (np.nan,) * 6
if not self.only_2d:
extras_AP15 = results3D['AP15']
extras_AP25 = results3D['AP25']
extras_AP50 = results3D['AP50']
extras_APn = results3D['APn']
extras_APm = results3D['APm']
extras_APf = results3D['APf']
self.results_analysis["<Concat>"] = {
"iters": self.iter_label,
"AP2D": general_2D, "AP3D": general_3D,
"AP3D@15": extras_AP15, "AP3D@25": extras_AP25, "AP3D@50": extras_AP50,
"AP3D-N": extras_APn, "AP3D-M": extras_APm, "AP3D-F": extras_APf
}
# Omni3D Outdoor performance
omni_2D, omni_3D = (np.nan,) * 2
omni3d_outdoor_categories = get_omni3d_categories("omni3d_out")
if len(omni3d_outdoor_categories - categories) == 0:
omni_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in omni3d_outdoor_categories])
if not self.only_2d:
omni_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in omni3d_outdoor_categories])
self.results_omni3d["Omni3D_Out"] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Omni3D Indoor performance
omni_2D, omni_3D = (np.nan,) * 2
omni3d_indoor_categories = get_omni3d_categories("omni3d_in")
if len(omni3d_indoor_categories - categories) == 0:
omni_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in omni3d_indoor_categories])
if not self.only_2d:
omni_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in omni3d_indoor_categories])
self.results_omni3d["Omni3D_In"] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Omni3D performance
omni_2D, omni_3D = (np.nan,) * 2
omni3d_categories = get_omni3d_categories("omni3d")
if len(omni3d_categories - categories) == 0:
omni_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in omni3d_categories])
if not self.only_2d:
omni_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in omni3d_categories])
self.results_omni3d["Omni3D"] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Per-category performance for the cumulative datasets
results_cat = OrderedDict()
for cat in self.filter_settings['category_names']:
cat_2D, cat_3D = (np.nan,) * 2
if 'AP-{}'.format(cat) in results2D:
cat_2D = results2D['AP-{}'.format(cat)]
if not self.only_2d:
cat_3D = results3D['AP-{}'.format(cat)]
if not np.isnan(cat_2D) or not np.isnan(cat_3D):
results_cat[cat] = {"AP2D": cat_2D, "AP3D": cat_3D}
utils_logperf.print_ap_category_histogram("<Concat>", results_cat)
utils_logperf.print_ap_analysis_histogram(self.results_analysis)
utils_logperf.print_ap_omni_histogram(self.results_omni3d)
def inference_on_dataset(model, data_loader):
"""
Run model on the data_loader.
Also benchmark the inference speed of `model.__call__` accurately.
The model will be used in eval mode.
Args:
model (callable): a callable which takes an object from
`data_loader` and returns some outputs.
If it's an nn.Module, it will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = get_world_size()
distributed = num_devices > 1
logger.info("Start inference on {} batches".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_data_time = 0
total_compute_time = 0
total_eval_time = 0
inference_json = []
with ExitStack() as stack:
if isinstance(model, nn.Module):
stack.enter_context(inference_context(model))
stack.enter_context(torch.no_grad())
start_data_time = time.perf_counter()
for idx, inputs in enumerate(data_loader):
total_data_time += time.perf_counter() - start_data_time
if idx == num_warmup:
start_time = time.perf_counter()
total_data_time = 0
total_compute_time = 0
total_eval_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs)
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
start_eval_time = time.perf_counter()
for input, output in zip(inputs, outputs):
prediction = {
"image_id": input["image_id"],
"K": input["K"],
"width": input["width"],
"height": input["height"],
}
# convert to json format
instances = output["instances"].to('cpu')
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
# store in overall predictions
inference_json.append(prediction)
total_eval_time += time.perf_counter() - start_eval_time
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
data_seconds_per_iter = total_data_time / iters_after_start
compute_seconds_per_iter = total_compute_time / iters_after_start
eval_seconds_per_iter = total_eval_time / iters_after_start
total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
(
f"Inference done {idx + 1}/{total}. "
f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
f"Total: {total_seconds_per_iter:.4f} s/iter. "
f"ETA={eta}"
),
n=5,
)
start_data_time = time.perf_counter()
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
if distributed:
comm.synchronize()
inference_json = comm.gather(inference_json, dst=0)
inference_json = list(itertools.chain(*inference_json))
if not comm.is_main_process():
return []
return inference_json
class Omni3DEvaluator(COCOEvaluator):
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
max_dets_per_image=None,
use_fast_impl=False,
eval_prox=False,
only_2d=False,
filter_settings={},
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. For now, support only for "bbox".
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
max_dets_per_image (int): limit on the maximum number of detections per image.
By default in COCO, this limit is to 100, but this can be customized
to be greater, as is needed in evaluation metrics AP fixed and AP pool
(see https://arxiv.org/pdf/2102.01066.pdf)
This doesn't affect keypoint evaluation.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
eval_prox (bool): whether to perform proximity evaluation. For datasets that are not
exhaustively annotated.
only_2d (bool): evaluates only 2D performance if set to True
filter_settions: settings for the dataset loader. TBD
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
self._eval_prox = eval_prox
self._only_2d = only_2d
self._filter_settings = filter_settings
# COCOeval requires the limit on the number of detections per image (maxDets) to be a list
# with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
# 3rd element (100) is used as the limit on the number of detections per image when
# evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
# we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
if max_dets_per_image is None:
max_dets_per_image = [1, 10, 100]
else:
max_dets_per_image = [1, 10, max_dets_per_image]
self._max_dets_per_image = max_dets_per_image
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._omni_api = Omni3D([json_file], filter_settings)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._omni_api.dataset
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
# Optional image keys to keep when available
img_keys_optional = ["p2"]
for input, output in zip(inputs, outputs):
prediction = {
"image_id": input["image_id"],
"K": input["K"],
"width": input["width"],
"height": input["height"],
}
# store optional keys when available
for img_key in img_keys_optional:
if img_key in input:
prediction.update({img_key: input[img_key]})
# already in COCO format
if type(output["instances"]) == list:
prediction["instances"] = output["instances"]
# tensor instances format
else:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(
instances, input["image_id"]
)
if len(prediction) > 1:
self._predictions.append(prediction)
def _derive_omni_results(self, omni_eval, iou_type, mode, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
omni_eval (None or Omni3Deval): None represents no predictions from model.
iou_type (str):
mode (str): either "2D" or "3D"
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
assert mode in ["2D", "3D"]
metrics = {
"2D": ["AP", "AP50", "AP75", "AP95", "APs", "APm", "APl"],
"3D": ["AP", "AP15", "AP25", "AP50", "APn", "APm", "APf"],
}[mode]
if iou_type != "bbox":
raise ValueError("Support only for bbox evaluation.")
if omni_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(
omni_eval.stats[idx] * 100 if omni_eval.stats[idx] >= 0 else "nan"
)
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {} in {} mode: \n".format(iou_type, mode)
+ create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = omni_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_table = itertools.zip_longest(
*[results_flatten[i::N_COLS] for i in range(N_COLS)]
)
table = tabulate(
results_table,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info(
"Per-category {} AP in {} mode: \n".format(iou_type, mode) + table
)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
omni_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(omni_results)
omni3d_global_categories = MetadataCatalog.get('omni3d_model').thing_classes
# the dataset results will store only the categories that are present
# in the corresponding dataset, all others will be dropped.
dataset_results = []
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = (
self._metadata.thing_dataset_id_to_contiguous_id
)
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert (
min(all_contiguous_ids) == 0
and max(all_contiguous_ids) == num_classes - 1
)
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in omni_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
cat_name = omni3d_global_categories[category_id]
if cat_name in self._metadata.thing_classes:
dataset_results.append(result)
# replace the results with the filtered
# instances that are in vocabulary.
omni_results = dataset_results
if self._output_dir:
file_path = os.path.join(self._output_dir, "omni_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(omni_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox"}, f"Got unknown task: {task}!"
evals, log_strs = (
_evaluate_predictions_on_omni(
self._omni_api,
omni_results,
task,
img_ids=img_ids,
only_2d=self._only_2d,
eval_prox=self._eval_prox,
)
if len(omni_results) > 0
else None # cocoapi does not handle empty results very well
)
modes = evals.keys()
for mode in modes:
res = self._derive_omni_results(
evals[mode],
task,
mode,
class_names=self._metadata.get("thing_classes"),
)
self._results[task + "_" + format(mode)] = res
self._results[task + "_" + format(mode) + '_evalImgs'] = evals[mode].evalImgs
self._results[task + "_" + format(mode) + '_evals_per_cat_area'] = evals[mode].evals_per_cat_area
self._results["log_str_2D"] = log_strs["2D"]
if "3D" in log_strs:
self._results["log_str_3D"] = log_strs["3D"]
def _evaluate_predictions_on_omni(
omni_gt,
omni_results,
iou_type,
img_ids=None,
only_2d=False,
eval_prox=False,
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(omni_results) > 0
log_strs, evals = {}, {}
omni_dt = omni_gt.loadRes(omni_results)
modes = ["2D"] if only_2d else ["2D", "3D"]
for mode in modes:
omni_eval = Omni3Deval(
omni_gt, omni_dt, iouType=iou_type, mode=mode, eval_prox=eval_prox
)
if img_ids is not None:
omni_eval.params.imgIds = img_ids
omni_eval.evaluate()
omni_eval.accumulate()
log_str = omni_eval.summarize()
log_strs[mode] = log_str
evals[mode] = omni_eval
return evals, log_strs
def instances_to_coco_json(instances, img_id):
num_instances = len(instances)
if num_instances == 0:
return []
boxes = BoxMode.convert(
instances.pred_boxes.tensor.numpy(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
).tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
if hasattr(instances, "pred_bbox3D"):
bbox3D = instances.pred_bbox3D.tolist()
center_cam = instances.pred_center_cam.tolist()
center_2D = instances.pred_center_2D.tolist()
dimensions = instances.pred_dimensions.tolist()
pose = instances.pred_pose.tolist()
else:
# dummy
bbox3D = np.ones([num_instances, 8, 3]).tolist()
center_cam = np.ones([num_instances, 3]).tolist()
center_2D = np.ones([num_instances, 2]).tolist()
dimensions = np.ones([num_instances, 3]).tolist()
pose = np.ones([num_instances, 3, 3]).tolist()
results = []
for k in range(num_instances):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
"depth": np.array(bbox3D[k])[:, 2].mean(),
"bbox3D": bbox3D[k],
"center_cam": center_cam[k],
"center_2D": center_2D[k],
"dimensions": dimensions[k],
"pose": pose[k],
}
results.append(result)
return results
# ---------------------------------------------------------------------
# Omni3DParams
# ---------------------------------------------------------------------
class Omni3DParams:
"""
Params for the Omni evaluation API
"""
def setDet2DParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(
0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True
)
self.recThrs = np.linspace(
0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True
)
self.maxDets = [1, 10, 100]
self.areaRng = [
[0 ** 2, 1e5 ** 2],
[0 ** 2, 32 ** 2],
[32 ** 2, 96 ** 2],
[96 ** 2, 1e5 ** 2],
]
self.areaRngLbl = ["all", "small", "medium", "large"]
self.useCats = 1
def setDet3DParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(
0.05, 0.5, int(np.round((0.5 - 0.05) / 0.05)) + 1, endpoint=True
)
self.recThrs = np.linspace(
0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True
)
self.maxDets = [1, 10, 100]
self.areaRng = [[0, 1e5], [0, 10], [10, 35], [35, 1e5]]
self.areaRngLbl = ["all", "near", "medium", "far"]
self.useCats = 1
def __init__(self, mode="2D"):
"""
Args:
iouType (str): defines 2D or 3D evaluation parameters.
One of {"2D", "3D"}
"""
if mode == "2D":
self.setDet2DParams()
elif mode == "3D":
self.setDet3DParams()
else:
raise Exception("mode %s not supported" % (mode))
self.iouType = "bbox"
self.mode = mode
# the proximity threshold defines the neighborhood
# when evaluating on non-exhaustively annotated datasets
self.proximity_thresh = 0.3
# ---------------------------------------------------------------------
# Omni3Deval
# ---------------------------------------------------------------------
class Omni3Deval(COCOeval):
"""
Wraps COCOeval for 2D or 3D box evaluation depending on mode
"""
def __init__(
self, cocoGt=None, cocoDt=None, iouType="bbox", mode="2D", eval_prox=False
):
"""
Initialize COCOeval using coco APIs for Gt and Dt
Args:
cocoGt: COCO object with ground truth annotations
cocoDt: COCO object with detection results
iouType: (str) defines the evaluation type. Supports only "bbox" now.
mode: (str) defines whether to evaluate 2D or 3D performance.
One of {"2D", "3D"}
eval_prox: (bool) if True, performs "Proximity Evaluation", i.e.
evaluates detections in the proximity of the ground truth2D boxes.
This is used for datasets which are not exhaustively annotated.
"""
if not iouType:
print("iouType not specified. use default iouType bbox")
elif iouType != "bbox":
print("no support for %s iouType" % (iouType))
self.mode = mode
if mode not in ["2D", "3D"]:
raise Exception("mode %s not supported" % (mode))
self.eval_prox = eval_prox
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
# per-image per-category evaluation results [KxAxI] elements
self.evalImgs = defaultdict(list)
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Omni3DParams(mode) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if cocoGt is not None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
self.evals_per_cat_area = None
def _prepare(self):
"""
Prepare ._gts and ._dts for evaluation based on params
"""
p = self.params
if p.useCats:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# set ignore flag
ignore_flag = "ignore2D" if self.mode == "2D" else "ignore3D"
for gt in gts:
gt[ignore_flag] = gt[ignore_flag] if ignore_flag in gt else 0
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt["image_id"], gt["category_id"]].append(gt)
for dt in dts:
self._dts[dt["image_id"], dt["category_id"]].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
assert self.evalImgs, 'Please run evaluate() first'
tic = time.time()
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
catid_list = [k for n, k in enumerate(p.catIds) if k in setK]
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
has_precomputed_evals = not (self.evals_per_cat_area is None)
if has_precomputed_evals:
evals_per_cat_area = self.evals_per_cat_area
else:
evals_per_cat_area = {}
# retrieve E at each category, area range, and max number of detections
for k, (k0, catId) in enumerate(zip(k_list, catid_list)):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
if has_precomputed_evals:
E = evals_per_cat_area[(catId, a)]
else:
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
evals_per_cat_area[(catId, a)] = E
if len(E) == 0:
continue
for m, maxDet in enumerate(m_list):
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0)
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.evals_per_cat_area = evals_per_cat_area
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
"""
print("Running per image evaluation...")
p = self.params
print("Evaluate annotation type *{}*".format(p.iouType))
tic = time.time()
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
catIds = p.catIds if p.useCats else [-1]
# loop through images, area range, max detection number
self.ious = {
(imgId, catId): self.computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
maxDet = p.maxDets[-1]
self.evalImgs = [
self.evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("DONE (t={:0.2f}s).".format(toc - tic))
def computeIoU(self, imgId, catId):
"""
ComputeIoU computes the IoUs by sorting based on "score"
for either 2D boxes (in 2D mode) or 3D boxes (in 3D mode)
"""
device = (torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu"))
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0 : p.maxDets[-1]]
if p.iouType == "bbox":
if self.mode == "2D":
g = [g["bbox"] for g in gt]
d = [d["bbox"] for d in dt]
elif self.mode == "3D":
g = [g["bbox3D"] for g in gt]
d = [d["bbox3D"] for d in dt]
else:
raise Exception("unknown iouType for iou computation")
# compute iou between each dt and gt region
# iscrowd is required in builtin maskUtils so we
# use a dummy buffer for it
iscrowd = [0 for o in gt]
if self.mode == "2D":
ious = maskUtils.iou(d, g, iscrowd)
elif len(d) > 0 and len(g) > 0:
# For 3D eval, we want to run IoU in CUDA if available
if torch.cuda.is_available() and len(d) * len(g) < MAX_DTS_CROSS_GTS_FOR_IOU3D:
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
dd = torch.tensor(d, device=device, dtype=torch.float32)
gg = torch.tensor(g, device=device, dtype=torch.float32)
ious = box3d_overlap(dd, gg).cpu().numpy()
else:
ious = []
in_prox = None
if self.eval_prox:
g = [g["bbox"] for g in gt]
d = [d["bbox"] for d in dt]
iscrowd = [0 for o in gt]
ious2d = maskUtils.iou(d, g, iscrowd)
if type(ious2d) == list:
in_prox = []
else:
in_prox = ious2d > p.proximity_thresh
return ious, in_prox
def evaluateImg(self, imgId, catId, aRng, maxDet):
"""
Perform evaluation for single category and image
Returns:
dict (single image results)
"""
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
flag_range = "area" if self.mode == "2D" else "depth"
flag_ignore = "ignore2D" if self.mode == "2D" else "ignore3D"
for g in gt:
if g[flag_ignore] or (g[flag_range] < aRng[0] or g[flag_range] > aRng[1]):
g["_ignore"] = 1
else:
g["_ignore"] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dtind[0:maxDet]]
# load computed ious
ious = (
self.ious[imgId, catId][0][:, gtind]
if len(self.ious[imgId, catId][0]) > 0
else self.ious[imgId, catId][0]
)
if self.eval_prox:
in_prox = (
self.ious[imgId, catId][1][:, gtind]
if len(self.ious[imgId, catId][1]) > 0
else self.ious[imgId, catId][1]
)
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g["_ignore"] for g in gt])
dtIg = np.zeros((T, D))
if not len(ious) == 0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, g in enumerate(gt):
# in case of proximity evaluation, if not in proximity continue
if self.eval_prox and not in_prox[dind, gind]:
continue
# if this gt already matched, continue
if gtm[tind, gind] > 0:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
# continue to next gt unless better match made
if ious[dind, gind] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]["id"]
gtm[tind, m] = d["id"]
# set unmatched detections outside of area range to ignore
a = np.array(
[d[flag_range] < aRng[0] or d[flag_range] > aRng[1] for d in dt]
).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
# in case of proximity evaluation, ignore detections which are far from gt regions
if self.eval_prox and len(in_prox) > 0:
dt_far = in_prox.any(1) == 0
dtIg = np.logical_or(dtIg, np.repeat(dt_far.reshape((1, len(dt))), T, 0))
# store results for given image and category
return {
"image_id": imgId,
"category_id": catId,
"aRng": aRng,
"maxDet": maxDet,
"dtIds": [d["id"] for d in dt],
"gtIds": [g["id"] for g in gt],
"dtMatches": dtm,
"gtMatches": gtm,
"dtScores": [d["score"] for d in dt],
"gtIgnore": gtIg,
"dtIgnore": dtIg,
}
def summarize(self):
"""
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
"""
def _summarize(mode, ap=1, iouThr=None, areaRng="all", maxDets=100, log_str=""):
p = self.params
eval = self.eval
if mode == "2D":
iStr = (" {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}")
elif mode == "3D":
iStr = " {:<18} {} @[ IoU={:<9} | depth={:>6s} | maxDets={:>3d} ] = {:0.3f}"
titleStr = "Average Precision" if ap == 1 else "Average Recall"
typeStr = "(AP)" if ap == 1 else "(AR)"
iouStr = (
"{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
if iouThr is None
else "{:0.2f}".format(iouThr)
)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = eval["precision"]
# IoU
if iouThr is not None:
t = np.where(np.isclose(iouThr, p.iouThrs.astype(float)))[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = eval["recall"]
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
if log_str != "":
log_str += "\n"
log_str += "mode={} ".format(mode) + \
iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)
return mean_s, log_str
def _summarizeDets(mode):
params = self.params
# the thresholds here, define the thresholds printed in `derive_omni_results`
thres = [0.5, 0.75, 0.95] if mode == "2D" else [0.15, 0.25, 0.50]
stats = np.zeros((13,))
stats[0], log_str = _summarize(mode, 1)
stats[1], log_str = _summarize(
mode, 1, iouThr=thres[0], maxDets=params.maxDets[2], log_str=log_str
)
stats[2], log_str = _summarize(
mode, 1, iouThr=thres[1], maxDets=params.maxDets[2], log_str=log_str
)
stats[3], log_str = _summarize(
mode, 1, iouThr=thres[2], maxDets=params.maxDets[2], log_str=log_str
)
stats[4], log_str = _summarize(
mode,
1,
areaRng=params.areaRngLbl[1],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[5], log_str = _summarize(
mode,
1,
areaRng=params.areaRngLbl[2],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[6], log_str = _summarize(
mode,
1,
areaRng=params.areaRngLbl[3],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[7], log_str = _summarize(
mode, 0, maxDets=params.maxDets[0], log_str=log_str
)
stats[8], log_str = _summarize(
mode, 0, maxDets=params.maxDets[1], log_str=log_str
)
stats[9], log_str = _summarize(
mode, 0, maxDets=params.maxDets[2], log_str=log_str
)
stats[10], log_str = _summarize(
mode,
0,
areaRng=params.areaRngLbl[1],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[11], log_str = _summarize(
mode,
0,
areaRng=params.areaRngLbl[2],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[12], log_str = _summarize(
mode,
0,
areaRng=params.areaRngLbl[3],
maxDets=params.maxDets[2],
log_str=log_str,
)
return stats, log_str
if not self.eval:
raise Exception("Please run accumulate() first")
stats, log_str = _summarizeDets(self.mode)
self.stats = stats
return log_str | 65,081 | 37.171261 | 168 | py |
omni3d | omni3d-main/cubercnn/vis/vis.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import math
import torch
from copy import deepcopy
from pytorch3d.structures.meshes import join_meshes_as_scene
from pytorch3d.transforms.so3 import (
so3_relative_angle,
)
from matplotlib.path import Path
from cubercnn import util
def interp_color(dist, bounds=[0, 1], color_lo=(0,0, 250), color_hi=(0, 250, 250)):
percent = (dist - bounds[0]) / (bounds[1] - bounds[0])
b = color_lo[0] * (1 - percent) + color_hi[0] * percent
g = color_lo[1] * (1 - percent) + color_hi[1] * percent
r = color_lo[2] * (1 - percent) + color_hi[2] * percent
return (b, g, r)
def draw_bev(canvas_bev, z3d, l3d, w3d, x3d, ry3d, color=(0, 200, 200), scale=1, thickness=2):
w = l3d * scale
l = w3d * scale
x = x3d * scale
z = z3d * scale
r = ry3d*-1
corners1 = np.array([
[-w / 2, -l / 2, 1],
[+w / 2, -l / 2, 1],
[+w / 2, +l / 2, 1],
[-w / 2, +l / 2, 1]
])
ry = np.array([
[+math.cos(r), -math.sin(r), 0],
[+math.sin(r), math.cos(r), 0],
[0, 0, 1],
])
corners2 = ry.dot(corners1.T).T
corners2[:, 0] += w/2 + x + canvas_bev.shape[1] / 2
corners2[:, 1] += l/2 + z
draw_line(canvas_bev, corners2[0], corners2[1], color=color, thickness=thickness)
draw_line(canvas_bev, corners2[1], corners2[2], color=color, thickness=thickness)
draw_line(canvas_bev, corners2[2], corners2[3], color=color, thickness=thickness)
draw_line(canvas_bev, corners2[3], corners2[0], color=color, thickness=thickness)
def draw_line(im, v0, v1, color=(0, 200, 200), thickness=1):
cv2.line(im, (int(v0[0]), int(v0[1])), (int(v1[0]), int(v1[1])), color, thickness)
def create_colorbar(height, width, color_lo=(0,0, 250), color_hi=(0, 250, 250)):
im = np.zeros([height, width, 3])
for h in range(0, height):
color = interp_color(h + 0.5, [0, height], color_hi, color_lo)
im[h, :, 0] = (color[0])
im[h, :, 1] = (color[1])
im[h, :, 2] = (color[2])
return im.astype(np.uint8)
def visualize_from_instances(detections, dataset, dataset_name, min_size_test, output_folder, category_names_official, iteration=''):
vis_folder = os.path.join(output_folder, 'vis')
util.mkdir_if_missing(vis_folder)
log_str = ''
xy_errors = []
z_errors = []
w3d_errors = []
h3d_errors = []
l3d_errors = []
dim_errors = []
ry_errors = []
n_cats = len(category_names_official)
thres = np.sqrt(1/n_cats)
for imind, im_obj in enumerate(detections):
write_sample = ((imind % 50) == 0)
annos = dataset._dataset[imind]['annotations']
gt_boxes_2d = np.array([anno['bbox'] for anno in annos])
if len(gt_boxes_2d)==0:
continue
gt_boxes_2d[:, 2] += gt_boxes_2d[:, 0]
gt_boxes_2d[:, 3] += gt_boxes_2d[:, 1]
gt_boxes_cat = np.array([anno['category_id'] for anno in annos])
if write_sample:
data_obj = dataset[imind]
assert(data_obj['image_id'] == im_obj['image_id'])
im = util.imread(data_obj['file_name'])
K = np.array(im_obj['K'])
K_inv = np.linalg.inv(K)
sf = im_obj['height'] / min_size_test
for instance in im_obj['instances']:
cat = category_names_official[instance['category_id']]
score = instance['score']
x1, y1, w, h = instance['bbox']
x2 = x1 + w
y2 = y1 + h
alpha, h3d, w3d, l3d, x3d, y3d, z3d, ry3d = (-1,)*8
w3d, h3d, l3d = instance['dimensions']
# unproject
cen_2d = np.array(instance['center_2D'] + [1])
z3d = instance['center_cam'][2]
# get rotation (y-axis only)
ry3d = np.array(instance['pose'])
valid_gt_inds = np.flatnonzero(instance['category_id'] == gt_boxes_cat)
if len(valid_gt_inds) > 0:
quality_matrix = util.iou(np.array([[x1, y1, x2, y2]]), gt_boxes_2d[valid_gt_inds])
nearest_gt = quality_matrix.argmax(axis=1)[0]
nearest_gt_iou = quality_matrix.max(axis=1)[0]
valid_match = nearest_gt_iou >= 0.5
else:
valid_match = False
if valid_match:
gt_x1, gt_y1, gt_w, gt_h = annos[valid_gt_inds[nearest_gt]]['bbox']
gt_x3d, gt_y3d, gt_z3d = annos[valid_gt_inds[nearest_gt]]['center_cam']
gt_w3d, gt_h3d, gt_l3d = annos[valid_gt_inds[nearest_gt]]['dimensions']
gt_cen_2d = K @ np.array([gt_x3d, gt_y3d, gt_z3d])
gt_cen_2d /= gt_cen_2d[2]
gt_pose = annos[valid_gt_inds[nearest_gt]]['pose']
gt_ry3d = np.array(gt_pose)
if valid_match:
# compute errors
xy_errors.append(np.sqrt(((cen_2d[:2] - gt_cen_2d[:2])**2).sum()))
z_errors.append(np.abs(z3d - gt_z3d))
w3d_errors.append(np.abs(w3d - gt_w3d))
h3d_errors.append(np.abs(h3d - gt_h3d))
l3d_errors.append(np.abs(l3d - gt_l3d))
dim_errors.append(np.sqrt((w3d - gt_w3d)**2 + (h3d - gt_h3d)**2 + (l3d - gt_l3d)**2))
try:
ry_errors.append(so3_relative_angle(torch.from_numpy(ry3d).unsqueeze(0), torch.from_numpy(gt_ry3d).unsqueeze(0), cos_bound=1).item())
except:
pass
# unproject point to 3D
x3d, y3d, z3d = (K_inv @ (z3d*cen_2d))
# let us visualize the detections now
if write_sample and score > thres:
color = util.get_color(instance['category_id'])
draw_3d_box(im, K, [x3d, y3d, z3d, w3d, h3d, l3d], ry3d, color=color, thickness=int(np.round(3*im.shape[0]/500)), draw_back=False)
draw_text(im, '{}, z={:.1f}, s={:.2f}'.format(cat, z3d, score), [x1, y1, w, h], scale=0.50*im.shape[0]/500, bg_color=color)
if write_sample:
util.imwrite(im, os.path.join(vis_folder, '{:06d}.jpg'.format(imind)))
# safety in case all rotation matrices failed.
if len(ry_errors) == 0:
ry_errors = [1000, 1000]
log_str += dataset_name + 'iter={}, xy({:.2f}), z({:.2f}), whl({:.2f}, {:.2f}, {:.2f}), ry({:.2f})\n'.format(
iteration,
np.mean(xy_errors), np.mean(z_errors),
np.mean(w3d_errors), np.mean(h3d_errors), np.mean(l3d_errors),
np.mean(ry_errors),
)
return log_str
def imshow(im, fig_num=None):
if fig_num is not None: plt.figure(fig_num)
if len(im.shape) == 2:
im = np.tile(im, [3, 1, 1]).transpose([1, 2, 0])
plt.imshow(cv2.cvtColor(im.astype(np.uint8), cv2.COLOR_RGB2BGR))
plt.show()
def draw_scene_view(im, K, meshes, text=None, scale=1000, R=None, T=None, zoom_factor=1.0, mode='front_and_novel', blend_weight=0.80, blend_weight_overlay=1.0, ground_bounds=None, canvas=None, zplane=0.05):
"""
Draws a scene from multiple different modes.
Args:
im (array): the image to draw onto
K (array): the 3x3 matrix for projection to camera to screen
meshes ([Mesh]): a list of meshes to draw into the scene
text ([str]): optional strings to draw per mesh
scale (int): the size of the square novel view canvas (pixels)
R (array): a single 3x3 matrix defining the novel view
T (array): a 3x vector defining the position of the novel view
zoom_factor (float): an optional amount to zoom out (>1) or in (<1)
mode (str): supports ['2D_only', 'front', 'novel', 'front_and_novel'] where
front implies the front-facing camera view and novel is based on R,T
blend_weight (float): blend factor for box edges over the RGB
blend_weight_overlay (float): blends the RGB image with the rendered meshes
ground_bounds (tuple): max_y3d, x3d_start, x3d_end, z3d_start, z3d_end for the Ground floor or
None to let the renderer to estimate the ground bounds in the novel view itself.
canvas (array): if the canvas doesn't change it can be faster to re-use it. Optional.
zplane (float): a plane of depth to solve intersection when
vertex points project behind the camera plane.
"""
if R is None:
R = util.euler2mat([np.pi/3, 0, 0])
if mode == '2D_only':
im_drawn_rgb = deepcopy(im)
# go in order of reverse depth
for mesh_idx in reversed(np.argsort([mesh.verts_padded().cpu().mean(1)[0, 1] for mesh in meshes])):
mesh = meshes[mesh_idx]
verts3D = mesh.verts_padded()[0].numpy()
verts2D = (K @ verts3D.T) / verts3D[:, -1]
color = [min(255, c*255*1.25) for c in mesh.textures.verts_features_padded()[0,0].tolist()]
x1 = verts2D[0, :].min()
y1 = verts2D[1, :].min()
x2 = verts2D[0, :].max()
y2 = verts2D[1, :].max()
draw_2d_box(im_drawn_rgb, [x1, y1, x2-x1, y2-y1], color=color, thickness=max(2, int(np.round(3*im_drawn_rgb.shape[0]/1250))))
if text is not None:
draw_text(im_drawn_rgb, '{}'.format(text[mesh_idx]), [x1, y1], scale=0.50*im_drawn_rgb.shape[0]/500, bg_color=color)
return im_drawn_rgb
else:
meshes_scene = join_meshes_as_scene(meshes).cuda()
device = meshes_scene.device
meshes_scene.textures = meshes_scene.textures.to(device)
cameras = util.get_camera(K, im.shape[1], im.shape[0]).to(device)
renderer = util.get_basic_renderer(cameras, im.shape[1], im.shape[0], use_color=True).to(device)
if mode in ['front_and_novel', 'front']:
'''
Render full scene from image view
'''
im_drawn_rgb = deepcopy(im)
# save memory if not blending the render
if blend_weight > 0:
rendered_img, _ = renderer(meshes_scene)
sil_mask = rendered_img[0, :, :, 3].cpu().numpy() > 0.1
rendered_img = (rendered_img[0, :, :, :3].cpu().numpy() * 255).astype(np.uint8)
im_drawn_rgb[sil_mask] = rendered_img[sil_mask] * blend_weight + im_drawn_rgb[sil_mask] * (1 - blend_weight)
'''
Draw edges for image view
'''
# go in order of reverse depth
for mesh_idx in reversed(np.argsort([mesh.verts_padded().cpu().mean(1)[0, 1] for mesh in meshes])):
mesh = meshes[mesh_idx]
verts3D = mesh.verts_padded()[0].cpu().numpy()
verts2D = (K @ verts3D.T) / verts3D[:, -1]
color = [min(255, c*255*1.25) for c in mesh.textures.verts_features_padded()[0,0].tolist()]
draw_3d_box_from_verts(
im_drawn_rgb, K, verts3D, color=color,
thickness=max(2, int(np.round(3*im_drawn_rgb.shape[0]/1250))),
draw_back=False, draw_top=False, zplane=zplane
)
x1 = verts2D[0, :].min() #min(verts2D[0, (verts2D[0, :] > 0) & (verts2D[0, :] < im_drawn_rgb.shape[1])])
y1 = verts2D[1, :].min() #min(verts2D[1, (verts2D[1, :] > 0) & (verts2D[1, :] < im_drawn_rgb.shape[0])])
if text is not None:
draw_text(im_drawn_rgb, '{}'.format(text[mesh_idx]), [x1, y1], scale=0.50*im_drawn_rgb.shape[0]/500, bg_color=color)
if blend_weight_overlay < 1.0 and blend_weight_overlay > 0.0:
im_drawn_rgb = im_drawn_rgb * blend_weight_overlay + deepcopy(im) * (1 - blend_weight_overlay)
if mode == 'front':
return im_drawn_rgb
elif mode in ['front_and_novel', 'novel']:
'''
Render from a new view
'''
has_canvas_already = canvas is not None
if not has_canvas_already:
canvas = np.ones((scale, scale, 3))
view_R = torch.from_numpy(R).float().to(device)
if T is None:
center = (meshes_scene.verts_padded().min(1).values + meshes_scene.verts_padded().max(1).values).unsqueeze(0)/2
else:
center = torch.from_numpy(T).float().to(device).view(1, 1, 3)
verts_rotated = meshes_scene.verts_padded().clone()
verts_rotated -= center
verts_rotated = (view_R @ verts_rotated[0].T).T.unsqueeze(0)
K_novelview = deepcopy(K)
K_novelview[0, -1] *= scale / im.shape[1]
K_novelview[1, -1] *= scale / im.shape[0]
cameras = util.get_camera(K_novelview, scale, scale).to(device)
renderer = util.get_basic_renderer(cameras, scale, scale, use_color=True).to(device)
margin = 0.01
if T is None:
max_trials = 10000
zoom_factor = 100.0
zoom_factor_in = zoom_factor
while max_trials:
zoom_factor_in = zoom_factor_in*0.95
verts = verts_rotated.clone()
verts[:, :, -1] += center[:, :, -1]*zoom_factor_in
verts_np = verts.cpu().numpy()
proj = ((K_novelview @ verts_np[0].T) / verts_np[:, :, -1])
# some vertices are extremely close or negative...
# this implies we have zoomed in too much
if (verts[0, :, -1] < 0.25).any():
break
# left or above image
elif (proj[:2, :] < scale*margin).any():
break
# right or below borders
elif (proj[:2, :] > scale*(1 - margin)).any():
break
# everything is in view.
zoom_factor = zoom_factor_in
max_trials -= 1
zoom_out_bias = center[:, :, -1].item()
else:
zoom_out_bias = 1.0
verts_rotated[:, :, -1] += zoom_out_bias*zoom_factor
meshes_novel_view = meshes_scene.clone().update_padded(verts_rotated)
rendered_img, _ = renderer(meshes_novel_view)
im_novel_view = (rendered_img[0, :, :, :3].cpu().numpy() * 255).astype(np.uint8)
sil_mask = rendered_img[0, :, :, 3].cpu().numpy() > 0.1
center_np = center.cpu().numpy()
view_R_np = view_R.cpu().numpy()
if not has_canvas_already:
if ground_bounds is None:
min_x3d, _, min_z3d = meshes_scene.verts_padded().min(1).values[0, :].tolist()
max_x3d, max_y3d, max_z3d = meshes_scene.verts_padded().max(1).values[0, :].tolist()
# go for grid projection, but with extremely bad guess at bounds
x3d_start = np.round(min_x3d - (max_x3d - min_x3d)*50)
x3d_end = np.round(max_x3d + (max_x3d - min_x3d)*50)
z3d_start = np.round(min_z3d - (max_z3d - min_z3d)*50)
z3d_end = np.round(max_z3d + (max_z3d - min_z3d)*50)
grid_xs = np.arange(x3d_start, x3d_end)
grid_zs = np.arange(z3d_start, z3d_end)
xs_mesh, zs_mesh = np.meshgrid(grid_xs, grid_zs)
ys_mesh = np.ones_like(xs_mesh)*max_y3d
point_mesh = np.concatenate((xs_mesh[:, :, np.newaxis], ys_mesh[:, :, np.newaxis], zs_mesh[:, :, np.newaxis]), axis=2)
point_mesh_orig = deepcopy(point_mesh)
mesh_shape = point_mesh.shape
point_mesh = view_R_np @ (point_mesh - center_np).transpose(2, 0, 1).reshape(3, -1)
point_mesh[-1] += zoom_out_bias*zoom_factor
point_mesh[-1, :] = point_mesh[-1, :].clip(0.25)
point_mesh_2D = (K_novelview @ point_mesh) / point_mesh[-1]
point_mesh_2D[-1] = point_mesh[-1]
point_mesh = point_mesh.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
point_mesh_2D = point_mesh_2D.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
maskx = (point_mesh_2D[:, :, 0].T >= -50) & (point_mesh_2D[:, :, 0].T < scale+50) & (point_mesh_2D[:, :, 2].T > 0)
maskz = (point_mesh_2D[:, :, 1].T >= -50) & (point_mesh_2D[:, :, 1].T < scale+50) & (point_mesh_2D[:, :, 2].T > 0)
# invalid scene?
if (not maskz.any()) or (not maskx.any()):
return im, im, canvas
# go for grid projection again!! but with sensible bounds
x3d_start = np.round(point_mesh[:, :, 0].T[maskx].min() - 10)
x3d_end = np.round(point_mesh[:, :, 0].T[maskx].max() + 10)
z3d_start = np.round(point_mesh_orig[:, :, 2].T[maskz].min() - 10)
z3d_end = np.round(point_mesh_orig[:, :, 2].T[maskz].max() + 10)
else:
max_y3d, x3d_start, x3d_end, z3d_start, z3d_end = ground_bounds
grid_xs = np.arange(x3d_start, x3d_end)
grid_zs = np.arange(z3d_start, z3d_end)
xs_mesh, zs_mesh = np.meshgrid(grid_xs, grid_zs)
ys_mesh = np.ones_like(xs_mesh)*max_y3d
point_mesh = np.concatenate((xs_mesh[:, :, np.newaxis], ys_mesh[:, :, np.newaxis], zs_mesh[:, :, np.newaxis]), axis=2)
mesh_shape = point_mesh.shape
point_mesh = view_R_np @ (point_mesh - center_np).transpose(2, 0, 1).reshape(3, -1)
point_mesh[-1] += zoom_out_bias*zoom_factor
point_mesh[-1, :] = point_mesh[-1, :].clip(0.25)
point_mesh_2D = (K_novelview @ point_mesh) / point_mesh[-1]
point_mesh_2D[-1] = point_mesh[-1]
point_mesh = point_mesh.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
point_mesh_2D = point_mesh_2D.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
bg_color = (225,)*3
line_color = (175,)*3
canvas[:, :, 0] = bg_color[0]
canvas[:, :, 1] = bg_color[1]
canvas[:, :, 2] = bg_color[2]
lines_to_draw = set()
for grid_row_idx in range(1, len(grid_zs)):
pre_z = grid_zs[grid_row_idx-1]
cur_z = grid_zs[grid_row_idx]
for grid_col_idx in range(1, len(grid_xs)):
pre_x = grid_xs[grid_col_idx-1]
cur_x = grid_xs[grid_col_idx]
p1 = point_mesh_2D[grid_row_idx-1, grid_col_idx-1]
valid1 = p1[-1] > 0
p2 = point_mesh_2D[grid_row_idx-1, grid_col_idx]
valid2 = p2[-1] > 0
if valid1 and valid2:
line = (tuple(p1[:2].astype(int).tolist()), tuple(p2[:2].astype(int).tolist()))
lines_to_draw.add(line)
# draw vertical line from the previous row
p1 = point_mesh_2D[grid_row_idx-1, grid_col_idx-1]
valid1 = p1[-1] > 0
p2 = point_mesh_2D[grid_row_idx, grid_col_idx-1]
valid2 = p2[-1] > 0
if valid1 and valid2:
line = (tuple(p1[:2].astype(int).tolist()), tuple(p2[:2].astype(int).tolist()))
lines_to_draw.add(line)
for line in lines_to_draw:
draw_line(canvas, line[0], line[1], color=line_color, thickness=max(1, int(np.round(3*scale/1250))))
im_novel_view[~sil_mask] = canvas[~sil_mask]
'''
Draw edges for novel view
'''
# apply novel view to meshes
meshes_novel = []
for mesh in meshes:
mesh_novel = mesh.clone().to(device)
verts_rotated = mesh_novel.verts_padded()
verts_rotated -= center
verts_rotated = (view_R @ verts_rotated[0].T).T.unsqueeze(0)
verts_rotated[:, :, -1] += zoom_out_bias*zoom_factor
mesh_novel = mesh_novel.update_padded(verts_rotated)
meshes_novel.append(mesh_novel)
# go in order of reverse depth
for mesh_idx in reversed(np.argsort([mesh.verts_padded().cpu().mean(1)[0, 1] for mesh in meshes_novel])):
mesh = meshes_novel[mesh_idx]
verts3D = mesh.verts_padded()[0].cpu().numpy()
verts2D = (K_novelview @ verts3D.T) / verts3D[:, -1]
color = [min(255, c*255*1.25) for c in mesh.textures.verts_features_padded()[0,0].tolist()]
draw_3d_box_from_verts(
im_novel_view, K_novelview, verts3D, color=color,
thickness=max(2, int(np.round(3*im_novel_view.shape[0]/1250))),
draw_back=False, draw_top=False, zplane=zplane
)
x1 = verts2D[0, :].min()
y1 = verts2D[1, :].min()
if text is not None:
draw_text(im_novel_view, '{}'.format(text[mesh_idx]), [x1, y1], scale=0.50*im_novel_view.shape[0]/500, bg_color=color)
if mode == 'front_and_novel':
return im_drawn_rgb, im_novel_view, canvas
else:
return im_novel_view, canvas
else:
raise ValueError('No visualization written for {}'.format(mode))
def get_polygon_grid(im, poly_verts):
nx = im.shape[1]
ny = im.shape[0]
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
path = Path(poly_verts)
grid = path.contains_points(points)
grid = grid.reshape((ny, nx))
return grid
def draw_circle(im, pos, radius=5, thickness=1, color=(250, 100, 100), fill=True):
if fill: thickness = -1
cv2.circle(im, (int(pos[0]), int(pos[1])), radius, color=color, thickness=thickness)
def draw_transparent_polygon(im, verts, blend=0.5, color=(0, 255, 255)):
mask = get_polygon_grid(im, verts[:4, :])
im[mask, 0] = im[mask, 0] * blend + (1 - blend) * color[0]
im[mask, 1] = im[mask, 1] * blend + (1 - blend) * color[1]
im[mask, 2] = im[mask, 2] * blend + (1 - blend) * color[2]
def draw_3d_box_from_verts(im, K, verts3d, color=(0, 200, 200), thickness=1, draw_back=False, draw_top=False, zplane=0.05, eps=1e-4):
"""
Draws a scene from multiple different modes.
Args:
im (array): the image to draw onto
K (array): the 3x3 matrix for projection to camera to screen
verts3d (array): the 8x3 matrix of vertices in camera space
color (tuple): color in RGB scaled [0, 255)
thickness (float): the line thickness for opencv lines
draw_back (bool): whether a backface should be highlighted
draw_top (bool): whether the top face should be highlighted
zplane (float): a plane of depth to solve intersection when
vertex points project behind the camera plane.
"""
if isinstance(K, torch.Tensor):
K = K.detach().cpu().numpy()
if isinstance(verts3d, torch.Tensor):
verts3d = verts3d.detach().cpu().numpy()
# reorder
bb3d_lines_verts = [[0, 1], [1, 2], [2, 3], [3, 0], [1, 5], [5, 6], [6, 2], [4, 5], [4, 7], [6, 7], [0, 4], [3, 7]]
# define back and top vetice planes
back_idxs = [4, 0, 3, 7]
top_idxs = [4, 0, 1, 5]
for (i, j) in bb3d_lines_verts:
v0 = verts3d[i]
v1 = verts3d[j]
z0, z1 = v0[-1], v1[-1]
if (z0 >= zplane or z1 >= zplane):
# computer intersection of v0, v1 and zplane
s = (zplane - z0) / max((z1 - z0), eps)
new_v = v0 + s * (v1 - v0)
if (z0 < zplane) and (z1 >= zplane):
# i0 vertex is behind the plane
v0 = new_v
elif (z0 >= zplane) and (z1 < zplane):
# i1 vertex is behind the plane
v1 = new_v
v0_proj = (K @ v0)/max(v0[-1], eps)
v1_proj = (K @ v1)/max(v1[-1], eps)
# project vertices
cv2.line(im,
(int(v0_proj[0]), int(v0_proj[1])),
(int(v1_proj[0]), int(v1_proj[1])),
color, thickness
)
# dont draw the planes if a vertex is out of bounds
draw_back &= np.all(verts3d[back_idxs, -1] >= zplane)
draw_top &= np.all(verts3d[top_idxs, -1] >= zplane)
if draw_back or draw_top:
# project to image
verts2d = (K @ verts3d.T).T
verts2d /= verts2d[:, -1][:, np.newaxis]
if type(verts2d) == torch.Tensor:
verts2d = verts2d.detach().cpu().numpy()
if draw_back:
draw_transparent_polygon(im, verts2d[back_idxs, :2], blend=0.5, color=color)
if draw_top:
draw_transparent_polygon(im, verts2d[top_idxs, :2], blend=0.5, color=color)
def draw_3d_box(im, K, box3d, R, color=(0, 200, 200), thickness=1, draw_back=False, draw_top=False, view_R=None, view_T=None):
verts2d, verts3d = util.get_cuboid_verts(K, box3d, R, view_R=view_R, view_T=view_T)
draw_3d_box_from_verts(im, K, verts3d, color=color, thickness=thickness, draw_back=draw_back, draw_top=draw_top)
def draw_text(im, text, pos, scale=0.4, color='auto', font=cv2.FONT_HERSHEY_SIMPLEX, bg_color=(0, 255, 255),
blend=0.33, lineType=1):
text = str(text)
pos = [int(pos[0]), int(pos[1])]
if color == 'auto':
if bg_color is not None:
color = (0, 0, 0) if ((bg_color[0] + bg_color[1] + bg_color[2])/3) > 127.5 else (255, 255, 255)
else:
color = (0, 0, 0)
if bg_color is not None:
text_size, _ = cv2.getTextSize(text, font, scale, lineType)
x_s = int(np.clip(pos[0], a_min=0, a_max=im.shape[1]))
x_e = int(np.clip(x_s + text_size[0] - 1 + 4, a_min=0, a_max=im.shape[1]))
y_s = int(np.clip(pos[1] - text_size[1] - 2, a_min=0, a_max=im.shape[0]))
y_e = int(np.clip(pos[1] + 1 - 2, a_min=0, a_max=im.shape[0]))
im[y_s:y_e + 1, x_s:x_e + 1, 0] = im[y_s:y_e + 1, x_s:x_e + 1, 0]*blend + bg_color[0] * (1 - blend)
im[y_s:y_e + 1, x_s:x_e + 1, 1] = im[y_s:y_e + 1, x_s:x_e + 1, 1]*blend + bg_color[1] * (1 - blend)
im[y_s:y_e + 1, x_s:x_e + 1, 2] = im[y_s:y_e + 1, x_s:x_e + 1, 2]*blend + bg_color[2] * (1 - blend)
pos[0] = int(np.clip(pos[0] + 2, a_min=0, a_max=im.shape[1]))
pos[1] = int(np.clip(pos[1] - 2, a_min=0, a_max=im.shape[0]))
cv2.putText(im, text, tuple(pos), font, scale, color, lineType)
def draw_transparent_square(im, pos, alpha=1, radius=5, color=(250, 100, 100)):
l = pos[1] - radius
r = pos[1] + radius
t = pos[0] - radius
b = pos[0] + radius
if (np.array([l, r, t, b]) >= 0).any():
l = np.clip(np.floor(l), 0, im.shape[0]).astype(int)
r = np.clip(np.floor(r), 0, im.shape[0]).astype(int)
t = np.clip(np.floor(t), 0, im.shape[1]).astype(int)
b = np.clip(np.floor(b), 0, im.shape[1]).astype(int)
# blend
im[l:r + 1, t:b + 1, 0] = im[l:r + 1, t:b + 1, 0] * alpha + color[0] * (1 - alpha)
im[l:r + 1, t:b + 1, 1] = im[l:r + 1, t:b + 1, 1] * alpha + color[1] * (1 - alpha)
im[l:r + 1, t:b + 1, 2] = im[l:r + 1, t:b + 1, 2] * alpha + color[2] * (1 - alpha)
def draw_2d_box(im, box, color=(0, 200, 200), thickness=1):
x = box[0]
y = box[1]
w = box[2]
h = box[3]
x2 = (x + w) - 1
y2 = (y + h) - 1
cv2.rectangle(im, (int(x), int(y)), (int(x2), int(y2)), color, thickness)
def imhstack(im1, im2):
sf = im1.shape[0] / im2.shape[0]
if sf > 1:
im2 = cv2.resize(im2, (int(im2.shape[1] / sf), im1.shape[0]))
elif sf < 1:
im1 = cv2.resize(im1, (int(im1.shape[1] / sf), im2.shape[0]))
im_concat = np.hstack((im1, im2))
return im_concat
def imvstack(im1, im2):
sf = im1.shape[1] / im2.shape[1]
if sf > 1:
im2 = cv2.resize(im2, (int(im2.shape[0] / sf), im1.shape[1]))
elif sf < 1:
im1 = cv2.resize(im1, (int(im1.shape[0] / sf), im2.shape[1]))
im_concat = np.vstack((im1, im2))
return im_concat | 29,091 | 38.154778 | 206 | py |
omni3d | omni3d-main/cubercnn/util/math_util.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import math
import numpy as np
import pandas as pd
from typing import Tuple, List
from copy import copy
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import SoftPhongShader
import cv2
import torch
from pytorch3d.structures import Meshes
from detectron2.structures import BoxMode
from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures.meshes import (
Meshes,
)
from pytorch3d.renderer import (
PerspectiveCameras,
RasterizationSettings,
MeshRasterizer
)
from pytorch3d.renderer import (
PerspectiveCameras,
SoftSilhouetteShader,
RasterizationSettings,
MeshRasterizer
)
from detectron2.data import (
MetadataCatalog,
)
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.renderer import MeshRenderer as MR
UNIT_CUBE = np.array([
[-0.5, -0.5, -0.5],
[ 0.5, -0.5, -0.5],
[ 0.5, 0.5, -0.5],
[-0.5, 0.5, -0.5],
[-0.5, -0.5, 0.5],
[ 0.5, -0.5, 0.5],
[ 0.5, 0.5, 0.5],
[-0.5, 0.5, 0.5]
])
def upto_2Pi(val):
out = val
# constrain between [0, 2pi)
while out >= 2*math.pi: out -= math.pi * 2
while out < 0: out += math.pi * 2
return out
def upto_Pi(val):
out = val
# constrain between [0, pi)
while out >= math.pi: out -= math.pi
while out < 0: out += math.pi
return out
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
# adopted from https://www.learnopencv.com/rotation-matrix-to-euler-angles/
def mat2euler(R):
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
#singular = sy < 1e-6
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
return np.array([x, y, z])
# Calculates Rotation Matrix given euler angles.
# adopted from https://www.learnopencv.com/rotation-matrix-to-euler-angles/
def euler2mat(euler):
R_x = np.array([[1, 0, 0],
[0, math.cos(euler[0]), -math.sin(euler[0])],
[0, math.sin(euler[0]), math.cos(euler[0])]
])
R_y = np.array([[math.cos(euler[1]), 0, math.sin(euler[1])],
[0, 1, 0],
[-math.sin(euler[1]), 0, math.cos(euler[1])]
])
R_z = np.array([[math.cos(euler[2]), -math.sin(euler[2]), 0],
[math.sin(euler[2]), math.cos(euler[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def to_float_tensor(input):
data_type = type(input)
if data_type != torch.Tensor:
input = torch.tensor(input)
return input.float()
def get_cuboid_verts_faces(box3d=None, R=None):
"""
Computes vertices and faces from a 3D cuboid representation.
Args:
bbox3d (flexible): [[X Y Z W H L]]
R (flexible): [np.array(3x3)]
Returns:
verts: the 3D vertices of the cuboid in camera space
faces: the vertex indices per face
"""
if box3d is None:
box3d = [0, 0, 0, 1, 1, 1]
# make sure types are correct
box3d = to_float_tensor(box3d)
if R is not None:
R = to_float_tensor(R)
squeeze = len(box3d.shape) == 1
if squeeze:
box3d = box3d.unsqueeze(0)
if R is not None:
R = R.unsqueeze(0)
n = len(box3d)
x3d = box3d[:, 0].unsqueeze(1)
y3d = box3d[:, 1].unsqueeze(1)
z3d = box3d[:, 2].unsqueeze(1)
w3d = box3d[:, 3].unsqueeze(1)
h3d = box3d[:, 4].unsqueeze(1)
l3d = box3d[:, 5].unsqueeze(1)
'''
v4_____________________v5
/| /|
/ | / |
/ | / |
/___|_________________/ |
v0| | |v1 |
| | | |
| | | |
| | | |
| |_________________|___|
| / v7 | /v6
| / | /
| / | /
|/_____________________|/
v3 v2
'''
verts = to_float_tensor(torch.zeros([n, 3, 8], device=box3d.device))
# setup X
verts[:, 0, [0, 3, 4, 7]] = -l3d / 2
verts[:, 0, [1, 2, 5, 6]] = l3d / 2
# setup Y
verts[:, 1, [0, 1, 4, 5]] = -h3d / 2
verts[:, 1, [2, 3, 6, 7]] = h3d / 2
# setup Z
verts[:, 2, [0, 1, 2, 3]] = -w3d / 2
verts[:, 2, [4, 5, 6, 7]] = w3d / 2
if R is not None:
# rotate
verts = R @ verts
# translate
verts[:, 0, :] += x3d
verts[:, 1, :] += y3d
verts[:, 2, :] += z3d
verts = verts.transpose(1, 2)
faces = torch.tensor([
[0, 1, 2], # front TR
[2, 3, 0], # front BL
[1, 5, 6], # right TR
[6, 2, 1], # right BL
[4, 0, 3], # left TR
[3, 7, 4], # left BL
[5, 4, 7], # back TR
[7, 6, 5], # back BL
[4, 5, 1], # top TR
[1, 0, 4], # top BL
[3, 2, 6], # bottom TR
[6, 7, 3], # bottom BL
]).float().unsqueeze(0).repeat([n, 1, 1])
if squeeze:
verts = verts.squeeze()
faces = faces.squeeze()
return verts, faces.to(verts.device)
def get_cuboid_verts(K, box3d, R=None, view_R=None, view_T=None):
# make sure types are correct
K = to_float_tensor(K)
box3d = to_float_tensor(box3d)
if R is not None:
R = to_float_tensor(R)
squeeze = len(box3d.shape) == 1
if squeeze:
box3d = box3d.unsqueeze(0)
if R is not None:
R = R.unsqueeze(0)
n = len(box3d)
if len(K.shape) == 2:
K = K.unsqueeze(0).repeat([n, 1, 1])
corners_3d, _ = get_cuboid_verts_faces(box3d, R)
if view_T is not None:
corners_3d -= view_T.view(1, 1, 3)
if view_R is not None:
corners_3d = (view_R @ corners_3d[0].T).T.unsqueeze(0)
if view_T is not None:
corners_3d[:, :, -1] += view_T.view(1, 1, 3)[:, :, -1]*1.25
# project to 2D
corners_2d = K @ corners_3d.transpose(1, 2)
corners_2d[:, :2, :] = corners_2d[:, :2, :] / corners_2d[:, 2, :].unsqueeze(1)
corners_2d = corners_2d.transpose(1, 2)
if squeeze:
corners_3d = corners_3d.squeeze()
corners_2d = corners_2d.squeeze()
return corners_2d, corners_3d
def approx_eval_resolution(h, w, scale_min=0, scale_max=1e10):
"""
Approximates the resolution an image with h x w resolution would
run through a model at which constrains the scale to a min and max.
Args:
h (int): input resolution height
w (int): input resolution width
scale_min (int): minimum scale allowed to resize too
scale_max (int): maximum scale allowed to resize too
Returns:
h (int): output resolution height
w (int): output resolution width
sf (float): scaling factor that was applied
which can convert from original --> network resolution.
"""
orig_h = h
# first resize to min
sf = scale_min / min(h, w)
h *= sf
w *= sf
# next resize to max
sf = min(scale_max / max(h, w), 1.0)
h *= sf
w *= sf
return h, w, h/orig_h
def compute_priors(cfg, datasets, max_cluster_rounds=1000, min_points_for_std=5):
"""
Computes priors via simple averaging or a custom K-Means clustering.
"""
annIds = datasets.getAnnIds()
anns = datasets.loadAnns(annIds)
data_raw = []
category_names = MetadataCatalog.get('omni3d_model').thing_classes
virtual_depth = cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_DEPTH
virtual_focal = cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_FOCAL
test_scale_min = cfg.INPUT.MIN_SIZE_TEST
test_scale_max = cfg.INPUT.MAX_SIZE_TEST
'''
Accumulate the annotations while discarding the 2D center information
(hence, keeping only the 2D and 3D scale information, and properties.)
'''
for ann_idx, ann in enumerate(anns):
category_name = ann['category_name'].lower()
ignore = ann['ignore']
dataset_id = ann['dataset_id']
image_id = ann['image_id']
fy = datasets.imgs[image_id]['K'][1][1]
im_h = datasets.imgs[image_id]['height']
im_w = datasets.imgs[image_id]['width']
f = 2 * fy / im_h
if cfg.DATASETS.MODAL_2D_BOXES and 'bbox2D_tight' in ann and ann['bbox2D_tight'][0] != -1:
x, y, w, h = BoxMode.convert(ann['bbox2D_tight'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif cfg.DATASETS.TRUNC_2D_BOXES and 'bbox2D_trunc' in ann and not np.all([val==-1 for val in ann['bbox2D_trunc']]):
x, y, w, h = BoxMode.convert(ann['bbox2D_trunc'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif 'bbox2D_proj' in ann:
x, y, w, h = BoxMode.convert(ann['bbox2D_proj'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
else:
continue
x3d, y3d, z3d = ann['center_cam']
w3d, h3d, l3d = ann['dimensions']
test_h, test_w, sf = approx_eval_resolution(im_h, im_w, test_scale_min, test_scale_max)
# scale everything to test resolution
h *= sf
w *= sf
if virtual_depth:
virtual_to_real = compute_virtual_scale_from_focal_spaces(fy, im_h, virtual_focal, test_h)
real_to_virtual = 1/virtual_to_real
z3d *= real_to_virtual
scale = np.sqrt(h**2 + w**2)
if (not ignore) and category_name in category_names:
data_raw.append([category_name, w, h, x3d, y3d, z3d, w3d, h3d, l3d, w3d*h3d*l3d, dataset_id, image_id, fy, f, scale])
# TODO pandas is fairly inefficient to rely on for large scale.
df_raw = pd.DataFrame(data_raw, columns=[
'name',
'w', 'h', 'x3d', 'y3d', 'z3d',
'w3d', 'h3d', 'l3d', 'volume',
'dataset', 'image',
'fy', 'f', 'scale'
])
priors_bins = []
priors_dims_per_cat = []
priors_z3d_per_cat = []
priors_y3d_per_cat = []
# compute priors for z and y globally
priors_z3d = [df_raw.z3d.mean(), df_raw.z3d.std()]
priors_y3d = [df_raw.y3d.mean(), df_raw.y3d.std()]
n_bins = cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS
# Each prior is pre-computed per category
for cat in category_names:
df_cat = df_raw[df_raw.name == cat]
'''
First compute static variable statistics
'''
scales = torch.FloatTensor(np.array(df_cat.scale))
n = len(scales)
if n > 0:
priors_dims_per_cat.append([[df_cat.w3d.mean(), df_cat.h3d.mean(), df_cat.l3d.mean()], [df_cat.w3d.std(), df_cat.h3d.std(), df_cat.l3d.std()]])
priors_z3d_per_cat.append([df_cat.z3d.mean(), df_cat.z3d.std()])
priors_y3d_per_cat.append([df_cat.y3d.mean(), df_cat.y3d.std()])
else:
# dummy data.
priors_dims_per_cat.append([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
priors_z3d_per_cat.append([50, 50])
priors_y3d_per_cat.append([1, 10])
'''
Next compute Z cluster statistics based on y and area
'''
def compute_cluster_scale_mean(scales, assignments, n_bins, match_quality):
cluster_scales = []
for bin in range(n_bins):
in_cluster = assignments==bin
if in_cluster.sum() < min_points_for_std:
in_cluster[match_quality[:, bin].topk(min_points_for_std)[1]] = True
scale = scales[in_cluster].mean()
cluster_scales.append(scale.item())
return torch.FloatTensor(cluster_scales)
if n_bins > 1:
if n < min_points_for_std:
print('Warning {} category has only {} valid samples...'.format(cat, n))
# dummy data since category doesn't have available samples.
max_scale = cfg.MODEL.ANCHOR_GENERATOR.SIZES[-1][-1]
min_scale = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0][0]
base = (max_scale / min_scale) ** (1 / (n_bins - 1))
cluster_scales = np.array([min_scale * (base ** i) for i in range(0, n_bins)])
# default values are unused anyways in training. but range linearly
# from 100 to 1 and ascend with 2D scale.
bin_priors_z = [[b, 15] for b in np.arange(100, 1, -(100-1)/n_bins)]
priors_bins.append((cat, cluster_scales.tolist(), bin_priors_z))
assert len(bin_priors_z) == n_bins, 'Broken default bin scaling.'
else:
max_scale = scales.max()
min_scale = scales.min()
base = (max_scale / min_scale) ** (1 / (n_bins - 1))
cluster_scales = torch.FloatTensor([min_scale * (base ** i) for i in range(0, n_bins)])
best_score = -np.inf
for round in range(max_cluster_rounds):
# quality scores for gts and clusters (n x n_bins)
match_quality = -(cluster_scales.unsqueeze(0) - scales.unsqueeze(1)).abs()
# assign to best clusters
scores, assignments_round = match_quality.max(1)
round_score = scores.mean().item()
if np.round(round_score, 5) > best_score:
best_score = round_score
assignments = assignments_round
# make new clusters
cluster_scales = compute_cluster_scale_mean(scales, assignments, n_bins, match_quality)
else:
break
bin_priors_z = []
for bin in range(n_bins):
in_cluster = assignments == bin
# not enough in the cluster to compute reliable stats?
# fill it with the topk others
if in_cluster.sum() < min_points_for_std:
in_cluster[match_quality[:, bin].topk(min_points_for_std)[1]] = True
# move to numpy for indexing pandas
in_cluster = in_cluster.numpy()
z3d_mean = df_cat.z3d[in_cluster].mean()
z3d_std = df_cat.z3d[in_cluster].std()
bin_priors_z.append([z3d_mean, z3d_std])
priors_bins.append((cat, cluster_scales.numpy().tolist(), bin_priors_z))
priors = {
'priors_dims_per_cat': priors_dims_per_cat,
'priors_z3d_per_cat': priors_z3d_per_cat,
'priors_y3d_per_cat': priors_y3d_per_cat,
'priors_bins': priors_bins,
'priors_y3d': priors_y3d,
'priors_z3d': priors_z3d,
}
return priors
def convert_3d_box_to_2d(K, box3d, R=None, clipw=0, cliph=0, XYWH=True, min_z=0.20):
"""
Converts a 3D box to a 2D box via projection.
Args:
K (np.array): intrinsics matrix 3x3
bbox3d (flexible): [[X Y Z W H L]]
R (flexible): [np.array(3x3)]
clipw (int): clip invalid X to the image bounds. Image width is usually used here.
cliph (int): clip invalid Y to the image bounds. Image height is usually used here.
XYWH (bool): returns in XYWH if true, otherwise XYXY format.
min_z: the threshold for how close a vertex is allowed to be before being
considered as invalid for projection purposes.
Returns:
box2d (flexible): the 2D box results.
behind_camera (bool): whether the projection has any points behind the camera plane.
fully_behind (bool): all points are behind the camera plane.
"""
# bounds used for vertices behind image plane
topL_bound = torch.tensor([[0, 0, 0]]).float()
topR_bound = torch.tensor([[clipw-1, 0, 0]]).float()
botL_bound = torch.tensor([[0, cliph-1, 0]]).float()
botR_bound = torch.tensor([[clipw-1, cliph-1, 0]]).float()
# make sure types are correct
K = to_float_tensor(K)
box3d = to_float_tensor(box3d)
if R is not None:
R = to_float_tensor(R)
squeeze = len(box3d.shape) == 1
if squeeze:
box3d = box3d.unsqueeze(0)
if R is not None:
R = R.unsqueeze(0)
n = len(box3d)
verts2d, verts3d = get_cuboid_verts(K, box3d, R)
# any boxes behind camera plane?
verts_behind = verts2d[:, :, 2] <= min_z
behind_camera = verts_behind.any(1)
verts_signs = torch.sign(verts3d)
# check for any boxes projected behind image plane corners
topL = verts_behind & (verts_signs[:, :, 0] < 0) & (verts_signs[:, :, 1] < 0)
topR = verts_behind & (verts_signs[:, :, 0] > 0) & (verts_signs[:, :, 1] < 0)
botL = verts_behind & (verts_signs[:, :, 0] < 0) & (verts_signs[:, :, 1] > 0)
botR = verts_behind & (verts_signs[:, :, 0] > 0) & (verts_signs[:, :, 1] > 0)
# clip values to be in bounds for invalid points
verts2d[topL] = topL_bound
verts2d[topR] = topR_bound
verts2d[botL] = botL_bound
verts2d[botR] = botR_bound
x, xi = verts2d[:, :, 0].min(1)
y, yi = verts2d[:, :, 1].min(1)
x2, x2i = verts2d[:, :, 0].max(1)
y2, y2i = verts2d[:, :, 1].max(1)
fully_behind = verts_behind.all(1)
width = x2 - x
height = y2 - y
if XYWH:
box2d = torch.cat((x.unsqueeze(1), y.unsqueeze(1), width.unsqueeze(1), height.unsqueeze(1)), dim=1)
else:
box2d = torch.cat((x.unsqueeze(1), y.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1)), dim=1)
if squeeze:
box2d = box2d.squeeze()
behind_camera = behind_camera.squeeze()
fully_behind = fully_behind.squeeze()
return box2d, behind_camera, fully_behind
#
def compute_virtual_scale_from_focal_spaces(f, H, f0, H0):
"""
Computes the scaling factor of depth from f0, H0 to f, H
Args:
f (float): the desired [virtual] focal length (px)
H (float): the desired [virtual] height (px)
f0 (float): the initial [real] focal length (px)
H0 (float): the initial [real] height (px)
Returns:
the scaling factor float to convert form (f0, H0) --> (f, H)
"""
return (H0 * f) / (f0 * H)
def R_to_allocentric(K, R, u=None, v=None):
"""
Convert a rotation matrix or series of rotation matrices to allocentric
representation given a 2D location (u, v) in pixels.
When u or v are not available, we fall back on the principal point of K.
"""
if type(K) == torch.Tensor:
fx = K[:, 0, 0]
fy = K[:, 1, 1]
sx = K[:, 0, 2]
sy = K[:, 1, 2]
n = len(K)
oray = torch.stack(((u - sx)/fx, (v - sy)/fy, torch.ones_like(u))).T
oray = oray / torch.linalg.norm(oray, dim=1).unsqueeze(1)
angle = torch.acos(oray[:, -1])
axis = torch.zeros_like(oray)
axis[:, 0] = axis[:, 0] - oray[:, 1]
axis[:, 1] = axis[:, 1] + oray[:, 0]
norms = torch.linalg.norm(axis, dim=1)
valid_angle = angle > 0
M = axis_angle_to_matrix(angle.unsqueeze(1)*axis/norms.unsqueeze(1))
R_view = R.clone()
R_view[valid_angle] = torch.bmm(M[valid_angle].transpose(2, 1), R[valid_angle])
else:
fx = K[0][0]
fy = K[1][1]
sx = K[0][2]
sy = K[1][2]
if u is None:
u = sx
if v is None:
v = sy
oray = np.array([(u - sx)/fx, (v - sy)/fy, 1])
oray = oray / np.linalg.norm(oray)
cray = np.array([0, 0, 1])
angle = math.acos(cray.dot(oray))
if angle != 0:
axis = np.cross(cray, oray)
axis_torch = torch.from_numpy(angle*axis/np.linalg.norm(axis)).float()
R_view = np.dot(axis_angle_to_matrix(axis_torch).numpy().T, R)
else:
R_view = R
return R_view
def R_from_allocentric(K, R_view, u=None, v=None):
"""
Convert a rotation matrix or series of rotation matrices to egocentric
representation given a 2D location (u, v) in pixels.
When u or v are not available, we fall back on the principal point of K.
"""
if type(K) == torch.Tensor:
fx = K[:, 0, 0]
fy = K[:, 1, 1]
sx = K[:, 0, 2]
sy = K[:, 1, 2]
n = len(K)
oray = torch.stack(((u - sx)/fx, (v - sy)/fy, torch.ones_like(u))).T
oray = oray / torch.linalg.norm(oray, dim=1).unsqueeze(1)
angle = torch.acos(oray[:, -1])
axis = torch.zeros_like(oray)
axis[:, 0] = axis[:, 0] - oray[:, 1]
axis[:, 1] = axis[:, 1] + oray[:, 0]
norms = torch.linalg.norm(axis, dim=1)
valid_angle = angle > 0
M = axis_angle_to_matrix(angle.unsqueeze(1)*axis/norms.unsqueeze(1))
R = R_view.clone()
R[valid_angle] = torch.bmm(M[valid_angle], R_view[valid_angle])
else:
fx = K[0][0]
fy = K[1][1]
sx = K[0][2]
sy = K[1][2]
if u is None:
u = sx
if v is None:
v = sy
oray = np.array([(u - sx)/fx, (v - sy)/fy, 1])
oray = oray / np.linalg.norm(oray)
cray = np.array([0, 0, 1])
angle = math.acos(cray.dot(oray))
if angle != 0:
#axis = np.cross(cray, oray)
axis = np.array([-oray[1], oray[0], 0])
axis_torch = torch.from_numpy(angle*axis/np.linalg.norm(axis)).float()
R = np.dot(axis_angle_to_matrix(axis_torch).numpy(), R_view)
else:
R = R_view
return R
def render_depth_map(K, box3d, pose, width, height, device=None):
cameras = get_camera(K, width, height)
renderer = get_basic_renderer(cameras, width, height)
mesh = mesh_cuboid(box3d, pose)
if device is not None:
cameras = cameras.to(device)
renderer = renderer.to(device)
mesh = mesh.to(device)
im_rendered, fragment = renderer(mesh)
silhouettes = im_rendered[:, :, :, -1] > 0
zbuf = fragment.zbuf[:, :, :, 0]
zbuf[zbuf==-1] = math.inf
depth_map, depth_map_inds = zbuf.min(dim=0)
return silhouettes, depth_map, depth_map_inds
def estimate_visibility(K, box3d, pose, width, height, device=None):
silhouettes, depth_map, depth_map_inds = render_depth_map(K, box3d, pose, width, height, device=device)
n = silhouettes.shape[0]
visibilies = []
for annidx in range(n):
area = silhouettes[annidx].sum()
visible = (depth_map_inds[silhouettes[annidx]] == annidx).sum()
visibilies.append((visible / area).item())
return visibilies
def estimate_truncation(K, box3d, R, imW, imH):
box2d, out_of_bounds, fully_behind = convert_3d_box_to_2d(K, box3d, R, imW, imH)
if fully_behind:
return 1.0
box2d = box2d.detach().cpu().numpy().tolist()
box2d_XYXY = BoxMode.convert(box2d, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
image_box = np.array([0, 0, imW-1, imH-1])
truncation = 1 - iou(np.array(box2d_XYXY)[np.newaxis], image_box[np.newaxis], ign_area_b=True)
return truncation.item()
def mesh_cuboid(box3d=None, R=None, color=None):
verts, faces = get_cuboid_verts_faces(box3d, R)
if verts.ndim == 2:
verts = to_float_tensor(verts).unsqueeze(0)
faces = to_float_tensor(faces).unsqueeze(0)
ninstances = len(verts)
if (isinstance(color, Tuple) or isinstance(color, List)) and len(color) == 3:
color = torch.tensor(color).view(1, 1, 3).expand(ninstances, 8, 3).float()
# pass in a tensor of colors per box
elif color.ndim == 2:
color = to_float_tensor(color).unsqueeze(1).expand(ninstances, 8, 3).float()
device = verts.device
mesh = Meshes(verts=verts, faces=faces, textures=None if color is None else TexturesVertex(verts_features=color).to(device))
return mesh
def get_camera(K, width, height, switch_hands=True, R=None, T=None):
K = to_float_tensor(K)
if switch_hands:
K = K @ torch.tensor([
[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]
]).float()
fx = K[0, 0]
fy = K[1, 1]
px = K[0, 2]
py = K[1, 2]
if R is None:
camera = PerspectiveCameras(
focal_length=((fx, fy),), principal_point=((px, py),),
image_size=((height, width),), in_ndc=False
)
else:
camera = PerspectiveCameras(
focal_length=((fx, fy),), principal_point=((px, py),),
image_size=((height, width),), in_ndc=False, R=R, T=T
)
return camera
def get_basic_renderer(cameras, width, height, use_color=False):
raster_settings = RasterizationSettings(
image_size=(height, width),
blur_radius=0 if use_color else np.log(1. / 1e-4 - 1.) * 1e-4,
faces_per_pixel=1,
perspective_correct=False,
)
if use_color:
# SoftPhongShader, HardPhongShader, HardFlatShader, SoftGouraudShader
lights = PointLights(location=[[0.0, 0.0, 0.0]])
shader = SoftPhongShader(cameras=cameras, lights=lights)
else:
shader = SoftSilhouetteShader()
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings,
),
shader=shader
)
return renderer
class MeshRenderer(MR):
def __init__(self, rasterizer, shader):
super().__init__(rasterizer, shader)
def forward(self, meshes_world, **kwargs) -> torch.Tensor:
fragments = self.rasterizer(meshes_world, **kwargs)
images = self.shader(fragments, meshes_world, **kwargs)
return images, fragments
def iou(box_a, box_b, mode='cross', ign_area_b=False):
"""
Computes the amount of Intersection over Union (IoU) between two different sets of boxes.
Args:
box_a (array or tensor): Mx4 boxes, defined by [x1, y1, x2, y2]
box_a (array or tensor): Nx4 boxes, defined by [x1, y1, x2, y2]
mode (str): either 'cross' or 'list', where cross will check all combinations of box_a and
box_b hence MxN array, and list expects the same size list M == N, hence returns Mx1 array.
ign_area_b (bool): if true then we ignore area of b. e.g., checking % box a is inside b
"""
data_type = type(box_a)
# this mode computes the IoU in the sense of cross.
# i.e., box_a = M x 4, box_b = N x 4 then the output is M x N
if mode == 'cross':
inter = intersect(box_a, box_b, mode=mode)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1]))
area_b = ((box_b[:, 2] - box_b[:, 0]) *
(box_b[:, 3] - box_b[:, 1]))
# torch.Tensor
if data_type == torch.Tensor:
union = area_a.unsqueeze(0)
if not ign_area_b:
union = union + area_b.unsqueeze(1) - inter
return (inter / union).permute(1, 0)
# np.ndarray
elif data_type == np.ndarray:
union = np.expand_dims(area_a, 0)
if not ign_area_b:
union = union + np.expand_dims(area_b, 1) - inter
return (inter / union).T
# unknown type
else:
raise ValueError('unknown data type {}'.format(data_type))
# this mode compares every box in box_a with target in box_b
# i.e., box_a = M x 4 and box_b = M x 4 then output is M x 1
elif mode == 'list':
inter = intersect(box_a, box_b, mode=mode)
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])
area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])
union = area_a + area_b - inter
return inter / union
else:
raise ValueError('unknown mode {}'.format(mode))
def intersect(box_a, box_b, mode='cross'):
"""
Computes the amount of intersect between two different sets of boxes.
Args:
box_a (nparray): Mx4 boxes, defined by [x1, y1, x2, y2]
box_a (nparray): Nx4 boxes, defined by [x1, y1, x2, y2]
mode (str): either 'cross' or 'list', where cross will check all combinations of box_a and
box_b hence MxN array, and list expects the same size list M == N, hence returns Mx1 array.
data_type (type): either torch.Tensor or np.ndarray, we automatically determine otherwise
"""
# determine type
data_type = type(box_a)
# this mode computes the intersect in the sense of cross.
# i.e., box_a = M x 4, box_b = N x 4 then the output is M x N
if mode == 'cross':
# np.ndarray
if data_type == np.ndarray:
max_xy = np.minimum(box_a[:, 2:4], np.expand_dims(box_b[:, 2:4], axis=1))
min_xy = np.maximum(box_a[:, 0:2], np.expand_dims(box_b[:, 0:2], axis=1))
inter = np.clip((max_xy - min_xy), a_min=0, a_max=None)
elif data_type == torch.Tensor:
max_xy = torch.min(box_a[:, 2:4], box_b[:, 2:4].unsqueeze(1))
min_xy = torch.max(box_a[:, 0:2], box_b[:, 0:2].unsqueeze(1))
inter = torch.clamp((max_xy - min_xy), 0)
# unknown type
else:
raise ValueError('type {} is not implemented'.format(data_type))
return inter[:, :, 0] * inter[:, :, 1]
# this mode computes the intersect in the sense of list_a vs. list_b.
# i.e., box_a = M x 4, box_b = M x 4 then the output is Mx1
elif mode == 'list':
# torch.Tesnor
if data_type == torch.Tensor:
max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])
min_xy = torch.max(box_a[:, :2], box_b[:, :2])
inter = torch.clamp((max_xy - min_xy), 0)
# np.ndarray
elif data_type == np.ndarray:
max_xy = np.min(box_a[:, 2:], box_b[:, 2:])
min_xy = np.max(box_a[:, :2], box_b[:, :2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=None)
# unknown type
else:
raise ValueError('unknown data type {}'.format(data_type))
return inter[:, 0] * inter[:, 1]
else:
raise ValueError('unknown mode {}'.format(mode))
def scaled_sigmoid(vals, min=0.0, max=1.0):
"""
Simple helper function for a scaled sigmoid.
The output is bounded by (min, max)
Args:
vals (Tensor): input logits to scale
min (Tensor or float): the minimum value to scale to.
max (Tensor or float): the maximum value to scale to.
"""
return min + (max-min)*torch.sigmoid(vals) | 31,079 | 30.779141 | 167 | py |
omni3d | omni3d-main/cubercnn/data/dataset_mapper.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import copy
import torch
import numpy as np
from detectron2.structures import BoxMode, Keypoints
from detectron2.data import detection_utils
from detectron2.data import transforms as T
from detectron2.data import (
DatasetMapper
)
from detectron2.structures import (
Boxes,
BoxMode,
Instances,
)
class DatasetMapper3D(DatasetMapper):
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = detection_utils.read_image(dataset_dict["file_name"], format=self.image_format)
detection_utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
# no need for additoinal processing at inference
if not self.is_train:
return dataset_dict
if "annotations" in dataset_dict:
dataset_id = dataset_dict['dataset_id']
K = np.array(dataset_dict['K'])
unknown_categories = self.dataset_id_to_unknown_cats[dataset_id]
# transform and pop off annotations
annos = [
transform_instance_annotations(obj, transforms, K=K)
for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0
]
# convert to instance format
instances = annotations_to_instances(annos, image_shape, unknown_categories)
dataset_dict["instances"] = detection_utils.filter_empty_instances(instances)
return dataset_dict
'''
Cached for mirroring annotations
'''
_M1 = np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, -1]
])
_M2 = np.array([
[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]
])
def transform_instance_annotations(annotation, transforms, *, K):
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
bbox = transforms.apply_box(np.array([bbox]))[0]
annotation["bbox"] = bbox
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if annotation['center_cam'][2] != 0:
# project the 3D box annotation XYZ_3D to screen
point3D = annotation['center_cam']
point2D = K @ np.array(point3D)
point2D[:2] = point2D[:2] / point2D[-1]
annotation["center_cam_proj"] = point2D.tolist()
# apply coords transforms to 2D box
annotation["center_cam_proj"][0:2] = transforms.apply_coords(
point2D[np.newaxis][:, :2]
)[0].tolist()
keypoints = (K @ np.array(annotation["bbox3D_cam"]).T).T
keypoints[:, 0] /= keypoints[:, -1]
keypoints[:, 1] /= keypoints[:, -1]
if annotation['ignore']:
# all keypoints marked as not visible
# 0 - unknown, 1 - not visible, 2 visible
keypoints[:, 2] = 1
else:
valid_keypoints = keypoints[:, 2] > 0
# 0 - unknown, 1 - not visible, 2 visible
keypoints[:, 2] = 2
keypoints[valid_keypoints, 2] = 2
# in place
transforms.apply_coords(keypoints[:, :2])
annotation["keypoints"] = keypoints.tolist()
# manually apply mirror for pose
for transform in transforms:
# horrizontal flip?
if isinstance(transform, T.HFlipTransform):
pose = _M1 @ np.array(annotation["pose"]) @ _M2
annotation["pose"] = pose.tolist()
annotation["R_cam"] = pose.tolist()
return annotation
def annotations_to_instances(annos, image_size, unknown_categories):
# init
target = Instances(image_size)
# add classes, 2D boxes, 3D boxes and poses
target.gt_classes = torch.tensor([int(obj["category_id"]) for obj in annos], dtype=torch.int64)
target.gt_boxes = Boxes([BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos])
target.gt_boxes3D = torch.FloatTensor([anno['center_cam_proj'] + anno['dimensions'] + anno['center_cam'] for anno in annos])
target.gt_poses = torch.FloatTensor([anno['pose'] for anno in annos])
n = len(target.gt_classes)
# do keypoints?
target.gt_keypoints = Keypoints(torch.FloatTensor([anno['keypoints'] for anno in annos]))
gt_unknown_category_mask = torch.zeros(max(unknown_categories)+1, dtype=bool)
gt_unknown_category_mask[torch.tensor(list(unknown_categories))] = True
# include available category indices as tensor with GTs
target.gt_unknown_category_mask = gt_unknown_category_mask.unsqueeze(0).repeat([n, 1])
return target
| 5,231 | 32.538462 | 128 | py |
omni3d | omni3d-main/cubercnn/data/build.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import itertools
import logging
import numpy as np
import math
from collections import defaultdict
import torch.utils.data
from detectron2.config import configurable
from detectron2.utils.logger import _log_api_usage
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import (
InferenceSampler,
RepeatFactorTrainingSampler,
TrainingSampler
)
from detectron2.data.build import (
filter_images_with_only_crowd_annotations,
build_batch_data_loader,
trivial_batch_collator
)
def get_detection_dataset_dicts(names, filter_empty=True, **kwargs):
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts
def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None, dataset_id_to_src=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
if mapper is None:
mapper = DatasetMapper(cfg, True)
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
balance_datasets = cfg.DATALOADER.BALANCE_DATASETS
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if balance_datasets:
assert dataset_id_to_src is not None, 'Need dataset sources.'
dataset_source_to_int = {val:i for i, val in enumerate(set(dataset_id_to_src.values()))}
dataset_ids_per_img = [dataset_source_to_int[dataset_id_to_src[img['dataset_id']]] for img in dataset]
dataset_ids = np.unique(dataset_ids_per_img)
# only one source? don't re-weight then.
if len(dataset_ids) == 1:
weights_per_img = torch.ones(len(dataset_ids_per_img)).float()
# compute per-dataset weights.
else:
counts = np.bincount(dataset_ids_per_img)
counts = [counts[id] for id in dataset_ids]
weights = [1 - count/np.sum(counts) for count in counts]
weights = [weight/np.min(weights) for weight in weights]
weights_per_img = torch.zeros(len(dataset_ids_per_img)).float()
dataset_ids_per_img = torch.FloatTensor(dataset_ids_per_img).long()
# copy weights
for dataset_id, weight in zip(dataset_ids, weights):
weights_per_img[dataset_ids_per_img == dataset_id] = weight
# no special sampling whatsoever
if sampler_name == "TrainingSampler" and not balance_datasets:
sampler = TrainingSampler(len(dataset))
# balance the weight sampling by datasets
elif sampler_name == "TrainingSampler" and balance_datasets:
sampler = RepeatFactorTrainingSampler(weights_per_img)
# balance the weight sampling by categories
elif sampler_name == "RepeatFactorTrainingSampler" and not balance_datasets:
repeat_factors = repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
# balance the weight sampling by categories AND by dataset frequency
elif sampler_name == "RepeatFactorTrainingSampler" and balance_datasets:
repeat_factors = repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
repeat_factors *= weights_per_img
repeat_factors /= repeat_factors.min().item()
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
}
def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
"""
Compute (fractional) per-image repeat factors based on category frequency.
The repeat factor for an image is a function of the frequency of the rarest
category labeled in that image. The "frequency of category c" in [0, 1] is defined
as the fraction of images in the training set (without repeats) in which category c
appears.
See :paper:`lvis` (>= v2) Appendix B.2.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
repeat_thresh (float): frequency threshold below which data is repeated.
If the frequency is half of `repeat_thresh`, the image will be
repeated twice.
Returns:
torch.Tensor:
the i-th element is the repeat factor for the dataset image at index i.
"""
# 1. For each category c, compute the fraction of images that contain it: f(c)
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts: # For each image (without repeats)
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
for cat_id in cat_ids:
if cat_id < 0: continue
category_freq[cat_id] += 1
num_images = len(dataset_dicts)
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t / f(c)))
category_rep = {
cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
rep_factors = []
for dataset_dict in dataset_dicts:
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids if cat_id >= 0}, default=1.0)
rep_factors.append(rep_factor)
return torch.tensor(rep_factors, dtype=torch.float32)
@configurable(from_config=_train_loader_from_config)
def build_detection_train_loader(dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0):
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torch.utils.data.sampler.Sampler)
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers
)
def _test_loader_from_config(cfg, dataset_name, mapper=None):
if isinstance(dataset_name, str):
dataset_name = [dataset_name]
dataset = get_detection_dataset_dicts(
dataset_name,
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {"dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS}
@configurable(from_config=_test_loader_from_config)
def build_detection_test_loader(dataset, *, mapper, sampler=None, num_workers=0):
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
| 9,407 | 39.551724 | 128 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/dla.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import detectron2.utils.comm as comm
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone.fpn import FPN
BatchNorm = nn.BatchNorm2d
"""
Adapted models from repositories
Deep Layer Aggregation CVPR 2018
https://github.com/ucbdrive/dla
BSD-3 Licence https://github.com/ucbdrive/dla/blob/master/LICENSE
Geometry Uncertainty Projection Network for Monocular 3D Object Detection, ICCV 2021
https://github.com/SuperMHP/GUPNet/blob/main/code/lib/backbones/dla.py
MIT Licence https://github.com/SuperMHP/GUPNet/blob/main/LICENSE
"""
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return os.path.join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# load model only on main process
# to prevent redundent model caching
if comm.is_main_process():
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
del model_weights['fc.weight']
del model_weights['fc.bias']
self.load_state_dict(model_weights)
def dla34(pretrained=False, tricks=False, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
if tricks:
model.load_pretrained_model(data='imagenet', name='dla34+tricks', hash='24a49e58')
else:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=False, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla46_c', hash='2bfd52c3')
return model
def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla46x_c', hash='d761bae7')
return model
def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=False, tricks=False, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained:
if tricks:
model.load_pretrained_model(data='imagenet', name='dla60+tricks', hash='14488826')
else:
model.load_pretrained_model(data='imagenet', name='dla60', hash='24839fc4')
return model
def dla60x(pretrained=False, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x', hash='d15cacda')
return model
def dla102(pretrained=False, tricks=False, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
if tricks:
model.load_pretrained_model(data='imagenet', name='dla102+tricks', hash='27a30eac')
else:
model.load_pretrained_model(data='imagenet', name='dla102', hash='d94d9790')
return model
def dla102x(pretrained=False, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102x', hash='ad62be81')
return model
def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102x2', hash='262837b6')
return model
def dla169(pretrained=False, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla169', hash='0914e092')
return model
class DLABackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
if cfg.MODEL.DLA.TYPE == 'dla34':
base = dla34(pretrained=pretrained, tricks=cfg.MODEL.DLA.TRICKS)
self._out_feature_channels = {'p2': 64, 'p3': 128, 'p4': 256, 'p5': 512, 'p6': 512}
elif cfg.MODEL.DLA.TYPE == 'dla46_c':
base = dla46_c(pretrained=pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 64, 'p4': 128, 'p5': 256, 'p6': 256}
elif cfg.MODEL.DLA.TYPE == 'dla46x_c':
base = dla46x_c(pretrained=pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 64, 'p4': 128, 'p5': 256, 'p6': 256}
elif cfg.MODEL.DLA.TYPE == 'dla60x_c':
base = dla60x_c(pretrained=pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 64, 'p4': 128, 'p5': 256, 'p6': 256}
elif cfg.MODEL.DLA.TYPE == 'dla60':
base = dla60(pretrained=pretrained, tricks=cfg.MODEL.DLA.TRICKS)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla60x':
base = dla60x(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla102':
base = dla102(pretrained=pretrained, tricks=cfg.MODEL.DLA.TRICKS)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla102x':
base = dla102x(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla102x2':
base = dla102x2(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla169':
base = dla169(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
self.base_layer = base.base_layer
self.level0 = base.level0
self.level1 = base.level1
self.level2 = base.level2
self.level3 = base.level3
self.level4 = base.level4
self.level5 = base.level5
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
base_layer = self.base_layer(x)
level0 = self.level0(base_layer)
level1 = self.level1(level0)
level2 = self.level2(level1)
level3 = self.level3(level2)
level4 = self.level4(level3)
level5 = self.level5(level4)
level6 = F.max_pool2d(level5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = level2
outputs['p3'] = level3
outputs['p4'] = level4
outputs['p5'] = level5
outputs['p6'] = level6
return outputs
@BACKBONE_REGISTRY.register()
def build_dla_from_vision_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = DLABackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | 18,904 | 36.287968 | 98 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/resnet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
from detectron2.modeling.backbone.resnet import build_resnet_backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class ResNet(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
if cfg.MODEL.RESNETS.DEPTH == 18:
base = models.resnet18(pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 128, 'p4': 256, 'p5': 512, 'p6': 512}
elif cfg.MODEL.RESNETS.DEPTH == 34:
base = models.resnet34(pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 128, 'p4': 256, 'p5': 512, 'p6': 512}
elif cfg.MODEL.RESNETS.DEPTH == 50:
base = models.resnet50(pretrained)
self._out_feature_channels = {'p2': 256, 'p3': 512, 'p4': 1024, 'p5': 2048, 'p6': 2048}
elif cfg.MODEL.RESNETS.DEPTH == 101:
base = models.resnet101(pretrained)
self._out_feature_channels = {'p2': 256, 'p3': 512, 'p4': 1024, 'p5': 2048, 'p6': 2048}
else:
raise ValueError('No configuration currently supporting depth of {}'.format(cfg.MODEL.RESNETS.DEPTH))
self.conv1 = base.conv1
self.bn1 = base.bn1
self.relu = base.relu
self.maxpool = base.maxpool
self.layer1 = base.layer1
self.layer2 = base.layer2
self.layer3 = base.layer3
self.layer4 = base.layer4
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
p2 = self.layer1(x)
p3 = self.layer2(p2)
p4 = self.layer3(p3)
p5 = self.layer4(p4)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = p2
outputs['p3'] = p3
outputs['p4'] = p4
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_resnet_from_vision_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
if cfg.MODEL.RESNETS.TORCHVISION:
bottom_up = ResNet(cfg, input_shape, pretrained=imagenet_pretrain)
else:
# use the MSRA modeling logic to build the backbone.
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 3,333 | 33.371134 | 113 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/mnasnet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class MNASNetBackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
base = models.mnasnet1_0(pretrained)
base = base.layers
self.base = base
self._out_feature_channels = {'p2': 24, 'p3': 40, 'p4': 96, 'p5': 320, 'p6': 320}
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
p2 = self.base[0:9](x)
p3 = self.base[9](p2)
p4 = self.base[10:12](p3)
p5 = self.base[12:14](p4)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = p2
outputs['p3'] = p3
outputs['p4'] = p4
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_mnasnet_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = MNASNetBackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 1,936 | 29.265625 | 89 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/densenet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class DenseNetBackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
base = models.densenet121(pretrained)
base = base.features
self.base = base
self._out_feature_channels = {'p2': 256, 'p3': 512, 'p4': 1024, 'p5': 1024, 'p6': 1024}
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
db1 = self.base[0:5](x)
db2 = self.base[5:7](db1)
db3 = self.base[7:9](db2)
p5 = self.base[9:](db3)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = db1
outputs['p3'] = db2
outputs['p4'] = db3
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_densenet_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = DenseNetBackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE
)
return backbone | 1,952 | 29.515625 | 95 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/shufflenet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class ShufflenetBackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
base = models.shufflenet_v2_x1_0(pretrained)
self.conv1 = base.conv1
self.maxpool = base.maxpool
self.stage2 = base.stage2
self.stage3 = base.stage3
self.stage4 = base.stage4
self.conv5 = base.conv5
self._out_feature_channels = {'p2': 24, 'p3': 116, 'p4': 232, 'p5': 464, 'p6': 464}
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
x = self.conv1(x)
p2 = self.maxpool(x)
p3 = self.stage2(p2)
p4 = self.stage3(p3)
p5 = self.stage4(p4)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = p2
outputs['p3'] = p3
outputs['p4'] = p4
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_shufflenet_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = ShufflenetBackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 2,113 | 29.2 | 91 | py |
omni3d | omni3d-main/cubercnn/modeling/meta_arch/rcnn3d.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from typing import Dict, List, Optional
import torch
import numpy as np
from detectron2.layers import ShapeSpec, batched_nms
from detectron2.utils.visualizer import Visualizer
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import Instances
from detectron2.utils.events import get_event_storage
from detectron2.data import MetadataCatalog
from detectron2.modeling.backbone import Backbone, BACKBONE_REGISTRY
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.utils.logger import _log_api_usage
from detectron2.modeling.meta_arch import (
META_ARCH_REGISTRY, GeneralizedRCNN
)
from cubercnn.modeling.roi_heads import build_roi_heads
from detectron2.data import MetadataCatalog
from pytorch3d.transforms import rotation_6d_to_matrix
from cubercnn.modeling.roi_heads import build_roi_heads
from cubercnn import util, vis
@META_ARCH_REGISTRY.register()
class RCNN3D(GeneralizedRCNN):
@classmethod
def from_config(cls, cfg, priors=None):
backbone = build_backbone(cfg, priors=priors)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape(), priors=priors),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
# scaling factor for the sample relative to its original scale
# e.g., how much has the image been upsampled by? or downsampled?
im_scales_ratio = [info['height'] / im.shape[1] for (info, im) in zip(batched_inputs, images)]
# The unmodified intrinsics for the image
Ks = [torch.FloatTensor(info['K']) for info in batched_inputs]
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
instances, detector_losses = self.roi_heads(
images, features, proposals,
Ks, im_scales_ratio,
gt_instances
)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0 and storage.iter > 0:
self.visualize_training(batched_inputs, proposals, instances)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: List[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
assert not self.training
images = self.preprocess_image(batched_inputs)
# scaling factor for the sample relative to its original scale
# e.g., how much has the image been upsampled by? or downsampled?
im_scales_ratio = [info['height'] / im.shape[1] for (info, im) in zip(batched_inputs, images)]
# The unmodified intrinsics for the image
Ks = [torch.FloatTensor(info['K']) for info in batched_inputs]
features = self.backbone(images.tensor)
# Pass oracle 2D boxes into the RoI heads
if type(batched_inputs == list) and np.any(['oracle2D' in b for b in batched_inputs]):
oracles = [b['oracle2D'] for b in batched_inputs]
results, _ = self.roi_heads(images, features, oracles, Ks, im_scales_ratio, None)
# normal inference
else:
proposals, _ = self.proposal_generator(images, features, None)
results, _ = self.roi_heads(images, features, proposals, Ks, im_scales_ratio, None)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def visualize_training(self, batched_inputs, proposals, instances):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
instances (list): a list that contains predicted RoIhead instances. Both
batched_inputs and proposals should have the same length.
"""
storage = get_event_storage()
# minimum number of boxes to try to visualize per image
max_vis_prop = 20
if not hasattr(self, 'thing_classes'):
self.thing_classes = MetadataCatalog.get('omni3d_model').thing_classes
self.num_classes = len(self.thing_classes)
for input, prop, instances_i in zip(batched_inputs, proposals, instances):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
img_3DGT = np.ascontiguousarray(img.copy()[:, :, [2, 1, 1]]) # BGR
img_3DPR = np.ascontiguousarray(img.copy()[:, :, [2, 1, 1]]) # BGR
'''
Visualize the 2D GT and proposal predictions
'''
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img_rpn = np.concatenate((anno_img, prop_img), axis=1)
vis_img_rpn = vis_img_rpn.transpose(2, 0, 1)
storage.put_image("Left: GT 2D bounding boxes; Right: Predicted 2D proposals", vis_img_rpn)
'''
Visualize the 3D GT and predictions
'''
K = torch.tensor(input['K'], device=self.device)
scale = input['height']/img.shape[0]
fx, sx = (val.item()/scale for val in K[0, [0, 2]])
fy, sy = (val.item()/scale for val in K[1, [1, 2]])
K_scaled = torch.tensor(
[[1/scale, 0 , 0], [0, 1/scale, 0], [0, 0, 1.0]],
dtype=torch.float32, device=self.device
) @ K
gts_per_image = input["instances"]
gt_classes = gts_per_image.gt_classes
# Filter out irrelevant groundtruth
fg_selection_mask = (gt_classes != -1) & (gt_classes < self.num_classes)
gt_classes = gt_classes[fg_selection_mask]
gt_class_names = [self.thing_classes[cls_idx] for cls_idx in gt_classes]
gt_boxes = gts_per_image.gt_boxes.tensor[fg_selection_mask] # 2D boxes
gt_poses = gts_per_image.gt_poses[fg_selection_mask] # GT poses
# projected 2D center, depth, w, h, l, 3D center
gt_boxes3D = gts_per_image.gt_boxes3D[fg_selection_mask]
# this box may have been mirrored and scaled so
# we need to recompute XYZ in 3D by backprojecting.
gt_z = gt_boxes3D[:, 2]
gt_x3D = gt_z * (gt_boxes3D[:, 0] - sx)/fx
gt_y3D = gt_z * (gt_boxes3D[:, 1] - sy)/fy
# put together the GT boxes
gt_center_3D = torch.stack((gt_x3D, gt_y3D, gt_z)).T
gt_boxes3D_XYZ_WHL = torch.cat((gt_center_3D, gt_boxes3D[:, 3:6]), dim=1)
gt_colors = torch.tensor(
[util.get_color(i) for i in range(len(gt_boxes3D_XYZ_WHL))],
device=self.device
)/255.0
gt_meshes = util.mesh_cuboid(gt_boxes3D_XYZ_WHL, gt_poses, gt_colors)
# perform a simple NMS, which is not cls dependent.
keep = batched_nms(
instances_i.pred_boxes.tensor,
instances_i.scores,
torch.zeros(len(instances_i.scores), dtype=torch.long, device=instances_i.scores.device),
self.roi_heads.box_predictor.test_nms_thresh
)
keep = keep[:max_vis_prop]
num_to_visualize = len(keep)
pred_xyzwhl = torch.cat((instances_i.pred_center_cam[keep], instances_i.pred_dimensions[keep]), dim=1)
pred_pose = instances_i.pred_pose[keep]
pred_colors = torch.tensor(
[util.get_color(i) for i in range(num_to_visualize)],
device=self.device
)/255.0
pred_boxes = instances_i.pred_boxes[keep]
pred_scores = instances_i.scores[keep]
pred_classes = instances_i.pred_classes[keep]
pred_class_names = ['{} {:.2f}'.format(self.thing_classes[cls_idx], score) for cls_idx, score in zip(pred_classes, pred_scores)]
pred_meshes = util.mesh_cuboid(pred_xyzwhl, pred_pose, pred_colors)
# convert to lists
pred_meshes = [pred_meshes.__getitem__(i).detach() for i in range(len(pred_meshes))]
gt_meshes = [gt_meshes.__getitem__(i) for i in range(len(gt_meshes))]
img_3DPR = vis.draw_scene_view(img_3DPR, K_scaled.cpu().numpy(), pred_meshes, text=pred_class_names, mode='front', blend_weight=0.0, blend_weight_overlay=0.85)
img_3DGT = vis.draw_scene_view(img_3DGT, K_scaled.cpu().numpy(), gt_meshes, text=gt_class_names, mode='front', blend_weight=0.0, blend_weight_overlay=0.85)
# horizontal stack 3D GT and pred left/right
vis_img_3d = np.concatenate((img_3DGT, img_3DPR), axis=1)
vis_img_3d = vis_img_3d[:, :, [2, 1, 0]] # RGB
vis_img_3d = vis_img_3d.astype(np.uint8).transpose(2, 0, 1)
storage.put_image("Left: GT 3D cuboids; Right: Predicted 3D cuboids", vis_img_3d)
break # only visualize one image in a batch
def build_model(cfg, priors=None):
"""
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
Note that it does not load any weights from ``cfg``.
"""
meta_arch = cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(cfg, priors=priors)
model.to(torch.device(cfg.MODEL.DEVICE))
_log_api_usage("modeling.meta_arch." + meta_arch)
return model
def build_backbone(cfg, input_shape=None, priors=None):
"""
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
Returns:
an instance of :class:`Backbone`
"""
if input_shape is None:
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
backbone_name = cfg.MODEL.BACKBONE.NAME
backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape, priors)
assert isinstance(backbone, Backbone)
return backbone | 11,688 | 41.974265 | 171 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/fast_rcnn.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from re import L
import torch
from torch.nn import functional as F
from typing import List, Tuple
from fvcore.nn import giou_loss, smooth_l1_loss
from detectron2.utils.events import get_event_storage
from detectron2.layers import cat, cross_entropy, nonzero_tuple, batched_nms
from detectron2.structures import Instances, Boxes
from detectron2.modeling.roi_heads.fast_rcnn import (
FastRCNNOutputLayers, _log_classification_stats
)
from cubercnn.modeling.proposal_generator.rpn import matched_pairwise_iou
def fast_rcnn_inference(
boxes: List[torch.Tensor],
scores: List[torch.Tensor],
image_shapes: List[Tuple[int, int]],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def fast_rcnn_inference_single_image(
boxes,
scores,
image_shape: Tuple[int, int],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = Boxes(boxes.reshape(-1, 4))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# 1. Filter results based on detection scores. It can make NMS more efficient
# by filtering out low-confidence detections.
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores_full = scores[filter_inds[:, 0]]
scores = scores[filter_mask]
# 2. Apply NMS for each class independently.
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds, scores_full = boxes[keep], scores[keep], filter_inds[keep], scores_full[keep]
result = Instances(image_shape)
result.pred_boxes = Boxes(boxes)
result.scores = scores
result.scores_full = scores_full
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
class FastRCNNOutputs(FastRCNNOutputLayers):
def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def losses(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
Returns:
Dict[str, Tensor]: dict of losses
"""
scores, proposal_deltas = predictions
# parse classification outputs
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
# parse box regression outputs
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
normalize_factor = max(gt_classes.numel(), 1.0)
'''
Standard Faster R-CNN losses
'''
_log_classification_stats(scores, gt_classes)
loss_cls = cross_entropy(scores, gt_classes, reduction="mean")
loss_box_reg = self.box_reg_loss(proposal_boxes, gt_boxes, proposal_deltas, gt_classes, reduction="none")
loss_box_reg = (loss_box_reg).sum() / normalize_factor
losses = {
"BoxHead/loss_cls": loss_cls,
"BoxHead/loss_box_reg": loss_box_reg,
}
return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes, reduction='mean'):
"""
Args:
All boxes are tensors with the same shape Rx(4 or 5).
gt_classes is a long tensor of shape R, the gt class label of each proposal.
R shall be the number of proposals.
"""
box_dim = proposal_boxes.shape[1] # 4 or 5
# Regression loss is only computed for foreground proposals (those matched to a GT)
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]
if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
fg_pred_deltas = pred_deltas[fg_inds]
else:
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
fg_inds, gt_classes[fg_inds]
]
if reduction == 'mean':
if self.box_reg_loss_type == "smooth_l1":
gt_pred_deltas = self.box2box_transform.get_deltas(
proposal_boxes[fg_inds],
gt_boxes[fg_inds],
)
loss_box_reg = smooth_l1_loss(
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
fg_pred_deltas, proposal_boxes[fg_inds]
)
loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
# The reg loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty
elif reduction == 'none':
if self.box_reg_loss_type == "smooth_l1":
gt_pred_deltas = self.box2box_transform.get_deltas(
proposal_boxes[fg_inds],
gt_boxes[fg_inds],
)
loss_box_reg = smooth_l1_loss(
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="none"
)
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
# return non-reduced type
return loss_box_reg
else:
raise ValueError(f"Invalid bbox reg reduction type '{reduction}'")
| 11,154 | 41.576336 | 113 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/cube_head.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.utils.registry import Registry
from typing import Dict
from detectron2.layers import ShapeSpec
from torch import nn
import torch
import numpy as np
import fvcore.nn.weight_init as weight_init
from pytorch3d.transforms.rotation_conversions import _copysign
from pytorch3d.transforms import (
rotation_6d_to_matrix,
euler_angles_to_matrix,
quaternion_to_matrix
)
ROI_CUBE_HEAD_REGISTRY = Registry("ROI_CUBE_HEAD")
@ROI_CUBE_HEAD_REGISTRY.register()
class CubeHead(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
#-------------------------------------------
# Settings
#-------------------------------------------
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.use_conf = cfg.MODEL.ROI_CUBE_HEAD.USE_CONFIDENCE
self.z_type = cfg.MODEL.ROI_CUBE_HEAD.Z_TYPE
self.pose_type = cfg.MODEL.ROI_CUBE_HEAD.POSE_TYPE
self.cluster_bins = cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS
self.shared_fc = cfg.MODEL.ROI_CUBE_HEAD.SHARED_FC
#-------------------------------------------
# Feature generator
#-------------------------------------------
num_conv = cfg.MODEL.ROI_CUBE_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_CUBE_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_CUBE_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_CUBE_HEAD.FC_DIM
conv_dims = [conv_dim] * num_conv
fc_dims = [fc_dim] * num_fc
assert len(conv_dims) + len(fc_dims) > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
if self.shared_fc:
self.feature_generator = nn.Sequential()
else:
self.feature_generator_XY = nn.Sequential()
self.feature_generator_dims = nn.Sequential()
self.feature_generator_pose = nn.Sequential()
self.feature_generator_Z = nn.Sequential()
if self.use_conf:
self.feature_generator_conf = nn.Sequential()
# create fully connected layers for Cube Head
for k, fc_dim in enumerate(fc_dims):
fc_dim_in = int(np.prod(self._output_size))
self._output_size = fc_dim
if self.shared_fc:
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator.add_module("fc{}".format(k + 1), fc)
self.feature_generator.add_module("fc_relu{}".format(k + 1), nn.ReLU())
else:
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_dims.add_module("fc{}".format(k + 1), fc)
self.feature_generator_dims.add_module("fc_relu{}".format(k + 1), nn.ReLU())
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_XY.add_module("fc{}".format(k + 1), fc)
self.feature_generator_XY.add_module("fc_relu{}".format(k + 1), nn.ReLU())
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_pose.add_module("fc{}".format(k + 1), fc)
self.feature_generator_pose.add_module("fc_relu{}".format(k + 1), nn.ReLU())
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_Z.add_module("fc{}".format(k + 1), fc)
self.feature_generator_Z.add_module("fc_relu{}".format(k + 1), nn.ReLU())
if self.use_conf:
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_conf.add_module("fc{}".format(k + 1), fc)
self.feature_generator_conf.add_module("fc_relu{}".format(k + 1), nn.ReLU())
#-------------------------------------------
# 3D outputs
#-------------------------------------------
# Dimensions in meters (width, height, length)
self.bbox_3D_dims = nn.Linear(self._output_size, self.num_classes*3)
nn.init.normal_(self.bbox_3D_dims.weight, std=0.001)
nn.init.constant_(self.bbox_3D_dims.bias, 0)
cluster_bins = self.cluster_bins if self.cluster_bins > 1 else 1
# XY
self.bbox_3D_center_deltas = nn.Linear(self._output_size, self.num_classes*2)
nn.init.normal_(self.bbox_3D_center_deltas.weight, std=0.001)
nn.init.constant_(self.bbox_3D_center_deltas.bias, 0)
# Pose
if self.pose_type == '6d':
self.bbox_3D_pose = nn.Linear(self._output_size, self.num_classes*6)
elif self.pose_type == 'quaternion':
self.bbox_3D_pose = nn.Linear(self._output_size, self.num_classes*4)
elif self.pose_type == 'euler':
self.bbox_3D_pose = nn.Linear(self._output_size, self.num_classes*3)
else:
raise ValueError('Cuboid pose type {} is not recognized'.format(self.pose_type))
nn.init.normal_(self.bbox_3D_pose.weight, std=0.001)
nn.init.constant_(self.bbox_3D_pose.bias, 0)
# Z
self.bbox_3D_center_depth = nn.Linear(self._output_size, self.num_classes*cluster_bins)
nn.init.normal_(self.bbox_3D_center_depth.weight, std=0.001)
nn.init.constant_(self.bbox_3D_center_depth.bias, 0)
# Optionally, box confidence
if self.use_conf:
self.bbox_3D_uncertainty = nn.Linear(self._output_size, self.num_classes*1)
nn.init.normal_(self.bbox_3D_uncertainty.weight, std=0.001)
nn.init.constant_(self.bbox_3D_uncertainty.bias, 5)
def forward(self, x):
n = x.shape[0]
box_z = None
box_uncert = None
box_2d_deltas = None
if self.shared_fc:
features = self.feature_generator(x)
box_2d_deltas = self.bbox_3D_center_deltas(features)
box_dims = self.bbox_3D_dims(features)
box_pose = self.bbox_3D_pose(features)
box_z = self.bbox_3D_center_depth(features)
if self.use_conf:
box_uncert = self.bbox_3D_uncertainty(features).clip(0.01)
else:
box_2d_deltas = self.bbox_3D_center_deltas(self.feature_generator_XY(x))
box_dims = self.bbox_3D_dims(self.feature_generator_dims(x))
box_pose = self.bbox_3D_pose(self.feature_generator_pose(x))
box_z = self.bbox_3D_center_depth(self.feature_generator_Z(x))
if self.use_conf:
box_uncert = self.bbox_3D_uncertainty(self.feature_generator_conf(x)).clip(0.01)
# Pose
if self.pose_type == '6d':
box_pose = rotation_6d_to_matrix(box_pose.view(-1, 6))
elif self.pose_type == 'quaternion':
quats = box_pose.view(-1, 4)
quats_scales = (quats * quats).sum(1)
quats = quats / _copysign(torch.sqrt(quats_scales), quats[:, 0])[:, None]
box_pose = quaternion_to_matrix(quats)
elif self.pose_type == 'euler':
box_pose = euler_angles_to_matrix(box_pose.view(-1, 3), 'XYZ')
box_2d_deltas = box_2d_deltas.view(n, self.num_classes, 2)
box_dims = box_dims.view(n, self.num_classes, 3)
box_pose = box_pose.view(n, self.num_classes, 3, 3)
if self.cluster_bins > 1:
box_z = box_z.view(n, self.cluster_bins, self.num_classes, -1)
else:
box_z = box_z.view(n, self.num_classes, -1)
return box_2d_deltas, box_z, box_dims, box_pose, box_uncert
def build_cube_head(cfg, input_shape: Dict[str, ShapeSpec]):
name = cfg.MODEL.ROI_CUBE_HEAD.NAME
return ROI_CUBE_HEAD_REGISTRY.get(name)(cfg, input_shape) | 8,064 | 38.925743 | 96 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/roi_heads.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import numpy as np
import cv2
from typing import Dict, List, Tuple
import torch
from torch import nn
import torch.nn.functional as F
from pytorch3d.transforms.so3 import (
so3_relative_angle
)
from detectron2.config import configurable
from detectron2.structures import Instances, Boxes, pairwise_iou, pairwise_ioa
from detectron2.layers import ShapeSpec, nonzero_tuple
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads import (
StandardROIHeads, ROI_HEADS_REGISTRY, select_foreground_proposals,
)
from detectron2.modeling.poolers import ROIPooler
from cubercnn.modeling.roi_heads.cube_head import build_cube_head
from cubercnn.modeling.proposal_generator.rpn import subsample_labels
from cubercnn.modeling.roi_heads.fast_rcnn import FastRCNNOutputs
from cubercnn import util
logger = logging.getLogger(__name__)
E_CONSTANT = 2.71828183
SQRT_2_CONSTANT = 1.41421356
def build_roi_heads(cfg, input_shape, priors=None):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.ROI_HEADS.NAME
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape, priors=priors)
@ROI_HEADS_REGISTRY.register()
class ROIHeads3D(StandardROIHeads):
@configurable
def __init__(
self,
*,
ignore_thresh: float,
cube_head: nn.Module,
cube_pooler: nn.Module,
loss_w_3d: float,
loss_w_xy: float,
loss_w_z: float,
loss_w_dims: float,
loss_w_pose: float,
loss_w_joint: float,
use_confidence: float,
inverse_z_weight: bool,
z_type: str,
pose_type: str,
cluster_bins: int,
priors = None,
dims_priors_enabled = None,
dims_priors_func = None,
disentangled_loss=None,
virtual_depth=None,
virtual_focal=None,
test_scale=None,
allocentric_pose=None,
chamfer_pose=None,
scale_roi_boxes=None,
**kwargs,
):
super().__init__(**kwargs)
self.scale_roi_boxes = scale_roi_boxes
# rotation settings
self.allocentric_pose = allocentric_pose
self.chamfer_pose = chamfer_pose
# virtual settings
self.virtual_depth = virtual_depth
self.virtual_focal = virtual_focal
# loss weights, <=0 is off
self.loss_w_3d = loss_w_3d
self.loss_w_xy = loss_w_xy
self.loss_w_z = loss_w_z
self.loss_w_dims = loss_w_dims
self.loss_w_pose = loss_w_pose
self.loss_w_joint = loss_w_joint
# loss modes
self.disentangled_loss = disentangled_loss
self.inverse_z_weight = inverse_z_weight
# misc
self.test_scale = test_scale
self.ignore_thresh = ignore_thresh
# related to network outputs
self.z_type = z_type
self.pose_type = pose_type
self.use_confidence = use_confidence
# related to priors
self.cluster_bins = cluster_bins
self.dims_priors_enabled = dims_priors_enabled
self.dims_priors_func = dims_priors_func
# if there is no 3D loss, then we don't need any heads.
if loss_w_3d > 0:
self.cube_head = cube_head
self.cube_pooler = cube_pooler
# the dimensions could rely on pre-computed priors
if self.dims_priors_enabled and priors is not None:
self.priors_dims_per_cat = nn.Parameter(torch.FloatTensor(priors['priors_dims_per_cat']).unsqueeze(0))
else:
self.priors_dims_per_cat = nn.Parameter(torch.ones(1, self.num_classes, 2, 3))
# Optionally, refactor priors and store them in the network params
if self.cluster_bins > 1 and priors is not None:
# the depth could have been clustered based on 2D scales
priors_z_scales = torch.stack([torch.FloatTensor(prior[1]) for prior in priors['priors_bins']])
self.priors_z_scales = nn.Parameter(priors_z_scales)
else:
self.priors_z_scales = nn.Parameter(torch.ones(self.num_classes, self.cluster_bins))
# the depth can be based on priors
if self.z_type == 'clusters':
assert self.cluster_bins > 1, 'To use z_type of priors, there must be more than 1 cluster bin'
if priors is None:
self.priors_z_stats = nn.Parameter(torch.ones(self.num_classes, self.cluster_bins, 2).float())
else:
# stats
priors_z_stats = torch.cat([torch.FloatTensor(prior[2]).unsqueeze(0) for prior in priors['priors_bins']])
self.priors_z_stats = nn.Parameter(priors_z_stats)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec], priors=None):
ret = super().from_config(cfg, input_shape)
# pass along priors
ret["box_predictor"] = FastRCNNOutputs(cfg, ret['box_head'].output_shape)
ret.update(cls._init_cube_head(cfg, input_shape))
ret["priors"] = priors
return ret
@classmethod
def _init_cube_head(self, cfg, input_shape: Dict[str, ShapeSpec]):
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
pooler_resolution = cfg.MODEL.ROI_CUBE_HEAD.POOLER_RESOLUTION
pooler_sampling_ratio = cfg.MODEL.ROI_CUBE_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_CUBE_HEAD.POOLER_TYPE
cube_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=pooler_sampling_ratio,
pooler_type=pooler_type,
)
in_channels = [input_shape[f].channels for f in in_features][0]
shape = ShapeSpec(
channels=in_channels, width=pooler_resolution, height=pooler_resolution
)
cube_head = build_cube_head(cfg, shape)
return {
'cube_head': cube_head,
'cube_pooler': cube_pooler,
'use_confidence': cfg.MODEL.ROI_CUBE_HEAD.USE_CONFIDENCE,
'inverse_z_weight': cfg.MODEL.ROI_CUBE_HEAD.INVERSE_Z_WEIGHT,
'loss_w_3d': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_3D,
'loss_w_xy': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_XY,
'loss_w_z': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_Z,
'loss_w_dims': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_DIMS,
'loss_w_pose': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_POSE,
'loss_w_joint': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_JOINT,
'z_type': cfg.MODEL.ROI_CUBE_HEAD.Z_TYPE,
'pose_type': cfg.MODEL.ROI_CUBE_HEAD.POSE_TYPE,
'dims_priors_enabled': cfg.MODEL.ROI_CUBE_HEAD.DIMS_PRIORS_ENABLED,
'dims_priors_func': cfg.MODEL.ROI_CUBE_HEAD.DIMS_PRIORS_FUNC,
'disentangled_loss': cfg.MODEL.ROI_CUBE_HEAD.DISENTANGLED_LOSS,
'virtual_depth': cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_DEPTH,
'virtual_focal': cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_FOCAL,
'test_scale': cfg.INPUT.MIN_SIZE_TEST,
'chamfer_pose': cfg.MODEL.ROI_CUBE_HEAD.CHAMFER_POSE,
'allocentric_pose': cfg.MODEL.ROI_CUBE_HEAD.ALLOCENTRIC_POSE,
'cluster_bins': cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS,
'ignore_thresh': cfg.MODEL.RPN.IGNORE_THRESHOLD,
'scale_roi_boxes': cfg.MODEL.ROI_CUBE_HEAD.SCALE_ROI_BOXES,
}
def forward(self, images, features, proposals, Ks, im_scales_ratio, targets=None):
im_dims = [image.shape[1:] for image in images]
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training:
losses = self._forward_box(features, proposals)
if self.loss_w_3d > 0:
instances_3d, losses_cube = self._forward_cube(features, proposals, Ks, im_dims, im_scales_ratio)
losses.update(losses_cube)
return instances_3d, losses
else:
# when oracle is available, by pass the box forward.
# simulate the predicted instances by creating a new
# instance for each passed in image.
if isinstance(proposals, list) and ~np.any([isinstance(p, Instances) for p in proposals]):
pred_instances = []
for proposal, im_dim in zip(proposals, im_dims):
pred_instances_i = Instances(im_dim)
pred_instances_i.pred_boxes = Boxes(proposal['gt_bbox2D'])
pred_instances_i.pred_classes = proposal['gt_classes']
pred_instances_i.scores = torch.ones_like(proposal['gt_classes']).float()
pred_instances.append(pred_instances_i)
else:
pred_instances = self._forward_box(features, proposals)
if self.loss_w_3d > 0:
pred_instances = self._forward_cube(features, pred_instances, Ks, im_dims, im_scales_ratio)
return pred_instances, {}
def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if self.training:
losses = self.box_predictor.losses(
predictions, proposals,
)
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.pred_boxes = Boxes(pred_boxes_per_image)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals, )
return pred_instances
def l1_loss(self, vals, target):
return F.smooth_l1_loss(vals, target, reduction='none', beta=0.0)
def chamfer_loss(self, vals, target):
B = vals.shape[0]
xx = vals.view(B, 8, 1, 3)
yy = target.view(B, 1, 8, 3)
l1_dist = (xx - yy).abs().sum(-1)
l1 = (l1_dist.min(1).values.mean(-1) + l1_dist.min(2).values.mean(-1))
return l1
# optionally, scale proposals to zoom RoI in (<1.0) our out (>1.0)
def scale_proposals(self, proposal_boxes):
if self.scale_roi_boxes > 0:
proposal_boxes_scaled = []
for boxes in proposal_boxes:
centers = boxes.get_centers()
widths = boxes.tensor[:, 2] - boxes.tensor[:, 0]
heights = boxes.tensor[:, 2] - boxes.tensor[:, 0]
x1 = centers[:, 0] - 0.5*widths*self.scale_roi_boxes
x2 = centers[:, 0] + 0.5*widths*self.scale_roi_boxes
y1 = centers[:, 1] - 0.5*heights*self.scale_roi_boxes
y2 = centers[:, 1] + 0.5*heights*self.scale_roi_boxes
boxes_scaled = Boxes(torch.stack([x1, y1, x2, y2], dim=1))
proposal_boxes_scaled.append(boxes_scaled)
else:
proposal_boxes_scaled = proposal_boxes
return proposal_boxes_scaled
def _forward_cube(self, features, instances, Ks, im_current_dims, im_scales_ratio):
features = [features[f] for f in self.in_features]
# training on foreground
if self.training:
losses = {}
# add up the amount we should normalize the losses by.
# this follows the same logic as the BoxHead, where each FG proposal
# is able to contribute the same amount of supervision. Technically,
# this value doesn't change during training unless the batch size is dynamic.
self.normalize_factor = max(sum([i.gt_classes.numel() for i in instances]), 1.0)
# The loss is only defined on positive proposals
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
pred_boxes = [x.pred_boxes for x in proposals]
box_classes = (torch.cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0))
gt_boxes3D = torch.cat([p.gt_boxes3D for p in proposals], dim=0,)
gt_poses = torch.cat([p.gt_poses for p in proposals], dim=0,)
assert len(gt_poses) == len(gt_boxes3D) == len(box_classes)
# eval on all instances
else:
proposals = instances
pred_boxes = [x.pred_boxes for x in instances]
proposal_boxes = pred_boxes
box_classes = torch.cat([x.pred_classes for x in instances])
proposal_boxes_scaled = self.scale_proposals(proposal_boxes)
# forward features
cube_features = self.cube_pooler(features, proposal_boxes_scaled).flatten(1)
n = cube_features.shape[0]
# nothing to do..
if n == 0:
return instances if not self.training else (instances, {})
num_boxes_per_image = [len(i) for i in proposals]
# scale the intrinsics according to the ratio the image has been scaled.
# this means the projections at the current scale are in sync.
Ks_scaled_per_box = torch.cat([
(Ks[i]/im_scales_ratio[i]).unsqueeze(0).repeat([num, 1, 1])
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
Ks_scaled_per_box[:, -1, -1] = 1
focal_lengths_per_box = torch.cat([
(Ks[i][1, 1]).unsqueeze(0).repeat([num])
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
im_ratios_per_box = torch.cat([
torch.FloatTensor([im_scales_ratio[i]]).repeat(num)
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
# scaling factor for Network resolution -> Original
im_scales_per_box = torch.cat([
torch.FloatTensor([im_current_dims[i][0]]).repeat(num)
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
im_scales_original_per_box = im_scales_per_box * im_ratios_per_box
if self.virtual_depth:
virtual_to_real = util.compute_virtual_scale_from_focal_spaces(
focal_lengths_per_box, im_scales_original_per_box,
self.virtual_focal, im_scales_per_box
)
real_to_virtual = 1 / virtual_to_real
else:
real_to_virtual = virtual_to_real = 1.0
# 2D boxes are needed to apply deltas
src_boxes = torch.cat([box_per_im.tensor for box_per_im in proposal_boxes], dim=0)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_scales = (src_heights**2 + src_widths**2).sqrt()
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
# For some methods, we need the predicted 2D box,
# e.g., the differentiable tensors from the 2D box head.
pred_src_boxes = torch.cat([box_per_im.tensor for box_per_im in pred_boxes], dim=0)
pred_widths = pred_src_boxes[:, 2] - pred_src_boxes[:, 0]
pred_heights = pred_src_boxes[:, 3] - pred_src_boxes[:, 1]
pred_src_x = (pred_src_boxes[:, 2] + pred_src_boxes[:, 0]) * 0.5
pred_src_y = (pred_src_boxes[:, 3] + pred_src_boxes[:, 1]) * 0.5
# forward predictions
cube_2d_deltas, cube_z, cube_dims, cube_pose, cube_uncert = self.cube_head(cube_features)
# simple indexing re-used commonly for selection purposes
fg_inds = torch.arange(n)
# Z when clusters are used
if cube_z is not None and self.cluster_bins > 1:
# compute closest bin assignments per batch per category (batch x n_category)
scales_diff = (self.priors_z_scales.detach().T.unsqueeze(0) - src_scales.unsqueeze(1).unsqueeze(2)).abs()
# assign the correct scale prediction.
# (the others are not used / thrown away)
assignments = scales_diff.argmin(1)
# select FG, category, and correct cluster
cube_z = cube_z[fg_inds, :, box_classes, :][fg_inds, assignments[fg_inds, box_classes]]
elif cube_z is not None:
# if z is available, collect the per-category predictions.
cube_z = cube_z[fg_inds, box_classes, :]
cube_dims = cube_dims[fg_inds, box_classes, :]
cube_pose = cube_pose[fg_inds, box_classes, :, :]
if self.use_confidence:
# if uncertainty is available, collect the per-category predictions.
cube_uncert = cube_uncert[fg_inds, box_classes]
cube_2d_deltas = cube_2d_deltas[fg_inds, box_classes, :]
# apply our predicted deltas based on src boxes.
cube_x = src_ctr_x + src_widths * cube_2d_deltas[:, 0]
cube_y = src_ctr_y + src_heights * cube_2d_deltas[:, 1]
cube_xy = torch.cat((cube_x.unsqueeze(1), cube_y.unsqueeze(1)), dim=1)
cube_dims_norm = cube_dims
if self.dims_priors_enabled:
# gather prior dimensions
prior_dims = self.priors_dims_per_cat.detach().repeat([n, 1, 1, 1])[fg_inds, box_classes]
prior_dims_mean = prior_dims[:, 0, :]
prior_dims_std = prior_dims[:, 1, :]
if self.dims_priors_func == 'sigmoid':
prior_dims_min = (prior_dims_mean - 3*prior_dims_std).clip(0.0)
prior_dims_max = (prior_dims_mean + 3*prior_dims_std)
cube_dims = util.scaled_sigmoid(cube_dims_norm, min=prior_dims_min, max=prior_dims_max)
elif self.dims_priors_func == 'exp':
cube_dims = torch.exp(cube_dims_norm.clip(max=5)) * prior_dims_mean
else:
# no priors are used
cube_dims = torch.exp(cube_dims_norm.clip(max=5))
if self.allocentric_pose:
# To compare with GTs, we need the pose to be egocentric, not allocentric
cube_pose_allocentric = cube_pose
cube_pose = util.R_from_allocentric(Ks_scaled_per_box, cube_pose, u=cube_x.detach(), v=cube_y.detach())
cube_z = cube_z.squeeze()
if self.z_type =='sigmoid':
cube_z_norm = torch.sigmoid(cube_z)
cube_z = cube_z_norm * 100
elif self.z_type == 'log':
cube_z_norm = cube_z
cube_z = torch.exp(cube_z)
elif self.z_type == 'clusters':
# gather the mean depth, same operation as above, for a n x c result
z_means = self.priors_z_stats[:, :, 0].T.unsqueeze(0).repeat([n, 1, 1])
z_means = torch.gather(z_means, 1, assignments.unsqueeze(1)).squeeze(1)
# gather the std depth, same operation as above, for a n x c result
z_stds = self.priors_z_stats[:, :, 1].T.unsqueeze(0).repeat([n, 1, 1])
z_stds = torch.gather(z_stds, 1, assignments.unsqueeze(1)).squeeze(1)
# do not learn these, they are static
z_means = z_means.detach()
z_stds = z_stds.detach()
z_means = z_means[fg_inds, box_classes]
z_stds = z_stds[fg_inds, box_classes]
z_mins = (z_means - 3*z_stds).clip(0)
z_maxs = (z_means + 3*z_stds)
cube_z_norm = cube_z
cube_z = util.scaled_sigmoid(cube_z, min=z_mins, max=z_maxs)
if self.virtual_depth:
cube_z = (cube_z * virtual_to_real)
if self.training:
prefix = 'Cube/'
storage = get_event_storage()
# Pull off necessary GT information
# let lowercase->2D and uppercase->3D
# [x, y, Z, W, H, L]
gt_2d = gt_boxes3D[:, :2]
gt_z = gt_boxes3D[:, 2]
gt_dims = gt_boxes3D[:, 3:6]
# this box may have been mirrored and scaled so
# we need to recompute XYZ in 3D by backprojecting.
gt_x3d = gt_z * (gt_2d[:, 0] - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
gt_y3d = gt_z * (gt_2d[:, 1] - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
gt_3d = torch.stack((gt_x3d, gt_y3d, gt_z)).T
# put together the GT boxes
gt_box3d = torch.cat((gt_3d, gt_dims), dim=1)
# These are the corners which will be the target for all losses!!
gt_corners = util.get_cuboid_verts_faces(gt_box3d, gt_poses)[0]
# project GT corners
gt_proj_boxes = torch.bmm(Ks_scaled_per_box, gt_corners.transpose(1,2))
gt_proj_boxes /= gt_proj_boxes[:, -1, :].clone().unsqueeze(1)
gt_proj_x1 = gt_proj_boxes[:, 0, :].min(1)[0]
gt_proj_y1 = gt_proj_boxes[:, 1, :].min(1)[0]
gt_proj_x2 = gt_proj_boxes[:, 0, :].max(1)[0]
gt_proj_y2 = gt_proj_boxes[:, 1, :].max(1)[0]
gt_widths = gt_proj_x2 - gt_proj_x1
gt_heights = gt_proj_y2 - gt_proj_y1
gt_x = gt_proj_x1 + 0.5 * gt_widths
gt_y = gt_proj_y1 + 0.5 * gt_heights
gt_proj_boxes = torch.stack((gt_proj_x1, gt_proj_y1, gt_proj_x2, gt_proj_y2), dim=1)
if self.disentangled_loss:
'''
Disentangled loss compares each varaible group to the
cuboid corners, which is generally more robust to hyperparams.
'''
# compute disentangled Z corners
cube_dis_x3d_from_z = cube_z * (gt_2d[:, 0] - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_dis_y3d_from_z = cube_z * (gt_2d[:, 1] - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_dis_z = torch.cat((torch.stack((cube_dis_x3d_from_z, cube_dis_y3d_from_z, cube_z)).T, gt_dims), dim=1)
dis_z_corners = util.get_cuboid_verts_faces(cube_dis_z, gt_poses)[0]
# compute disentangled XY corners
cube_dis_x3d = gt_z * (cube_x - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_dis_y3d = gt_z * (cube_y - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_dis_XY = torch.cat((torch.stack((cube_dis_x3d, cube_dis_y3d, gt_z)).T, gt_dims), dim=1)
dis_XY_corners = util.get_cuboid_verts_faces(cube_dis_XY, gt_poses)[0]
loss_xy = self.l1_loss(dis_XY_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Pose
dis_pose_corners = util.get_cuboid_verts_faces(gt_box3d, cube_pose)[0]
# Dims
dis_dims_corners = util.get_cuboid_verts_faces(torch.cat((gt_3d, cube_dims), dim=1), gt_poses)[0]
# Loss dims
loss_dims = self.l1_loss(dis_dims_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Loss z
loss_z = self.l1_loss(dis_z_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Rotation uses chamfer or l1 like others
if self.chamfer_pose:
loss_pose = self.chamfer_loss(dis_pose_corners, gt_corners)
else:
loss_pose = self.l1_loss(dis_pose_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Non-disentangled training losses
else:
'''
These loss functions are fairly arbitrarily designed.
Generally, they are in some normalized space but there
are many alternative implementations for most functions.
'''
# XY
gt_deltas = (gt_2d.clone() - torch.cat((src_ctr_x.unsqueeze(1), src_ctr_y.unsqueeze(1)), dim=1)) \
/ torch.cat((src_widths.unsqueeze(1), src_heights.unsqueeze(1)), dim=1)
loss_xy = self.l1_loss(cube_2d_deltas, gt_deltas).mean(1)
# Dims
if self.dims_priors_enabled:
cube_dims_gt_normspace = torch.log(gt_dims/prior_dims)
loss_dims = self.l1_loss(cube_dims_norm, cube_dims_gt_normspace).mean(1)
else:
loss_dims = self.l1_loss(cube_dims_norm, torch.log(gt_dims)).mean(1)
# Pose
try:
if self.allocentric_pose:
gt_poses_allocentric = util.R_to_allocentric(Ks_scaled_per_box, gt_poses, u=cube_x.detach(), v=cube_y.detach())
loss_pose = 1-so3_relative_angle(cube_pose_allocentric, gt_poses_allocentric, eps=0.1, cos_angle=True)
else:
loss_pose = 1-so3_relative_angle(cube_pose, gt_poses, eps=0.1, cos_angle=True)
# Can fail with bad EPS values/instability
except:
loss_pose = None
if self.z_type == 'direct':
loss_z = self.l1_loss(cube_z, gt_z)
elif self.z_type == 'sigmoid':
loss_z = self.l1_loss(cube_z_norm, (gt_z * real_to_virtual / 100).clip(0, 1))
elif self.z_type == 'log':
loss_z = self.l1_loss(cube_z_norm, torch.log((gt_z * real_to_virtual).clip(0.01)))
elif self.z_type == 'clusters':
loss_z = self.l1_loss(cube_z_norm, (((gt_z * real_to_virtual) - z_means)/(z_stds)))
total_3D_loss_for_reporting = loss_dims*self.loss_w_dims
if not loss_pose is None:
total_3D_loss_for_reporting += loss_pose*self.loss_w_pose
if not cube_2d_deltas is None:
total_3D_loss_for_reporting += loss_xy*self.loss_w_xy
if not loss_z is None:
total_3D_loss_for_reporting += loss_z*self.loss_w_z
# reporting does not need gradients
total_3D_loss_for_reporting = total_3D_loss_for_reporting.detach()
if self.loss_w_joint > 0:
'''
If we are using joint [entangled] loss, then we also need to pair all
predictions together and compute a chamfer or l1 loss vs. cube corners.
'''
cube_dis_x3d_from_z = cube_z * (cube_x - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_dis_y3d_from_z = cube_z * (cube_y - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_dis_z = torch.cat((torch.stack((cube_dis_x3d_from_z, cube_dis_y3d_from_z, cube_z)).T, cube_dims), dim=1)
dis_z_corners_joint = util.get_cuboid_verts_faces(cube_dis_z, cube_pose)[0]
if self.chamfer_pose and self.disentangled_loss:
loss_joint = self.chamfer_loss(dis_z_corners_joint, gt_corners)
else:
loss_joint = self.l1_loss(dis_z_corners_joint, gt_corners).contiguous().view(n, -1).mean(dim=1)
valid_joint = loss_joint < np.inf
total_3D_loss_for_reporting += (loss_joint*self.loss_w_joint).detach()
# compute errors for tracking purposes
z_error = (cube_z - gt_z).detach().abs()
dims_error = (cube_dims - gt_dims).detach().abs()
xy_error = (cube_xy - gt_2d).detach().abs()
storage.put_scalar(prefix + 'z_error', z_error.mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'dims_error', dims_error.mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'xy_error', xy_error.mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'z_close', (z_error<0.20).float().mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'total_3D_loss', self.loss_w_3d * self.safely_reduce_losses(total_3D_loss_for_reporting), smoothing_hint=False)
if self.inverse_z_weight:
'''
Weights all losses to prioritize close up boxes.
'''
gt_z = gt_boxes3D[:, 2]
inverse_z_w = 1/torch.log(gt_z.clip(E_CONSTANT))
loss_dims *= inverse_z_w
# scale based on log, but clip at e
if not cube_2d_deltas is None:
loss_xy *= inverse_z_w
if loss_z is not None:
loss_z *= inverse_z_w
if loss_pose is not None:
loss_pose *= inverse_z_w
if self.loss_w_joint > 0:
loss_joint *= inverse_z_w
if self.use_confidence > 0:
uncert_sf = SQRT_2_CONSTANT * torch.exp(-cube_uncert)
loss_dims *= uncert_sf
if not cube_2d_deltas is None:
loss_xy *= uncert_sf
if not loss_z is None:
loss_z *= uncert_sf
if loss_pose is not None:
loss_pose *= uncert_sf
if self.loss_w_joint > 0:
loss_joint *= uncert_sf
losses.update({prefix + 'uncert': self.use_confidence*self.safely_reduce_losses(cube_uncert.clone())})
storage.put_scalar(prefix + 'conf', torch.exp(-cube_uncert).mean().item(), smoothing_hint=False)
# store per batch loss stats temporarily
self.batch_losses = [batch_losses.mean().item() for batch_losses in total_3D_loss_for_reporting.split(num_boxes_per_image)]
if self.loss_w_dims > 0:
losses.update({
prefix + 'loss_dims': self.safely_reduce_losses(loss_dims) * self.loss_w_dims * self.loss_w_3d,
})
if not cube_2d_deltas is None:
losses.update({
prefix + 'loss_xy': self.safely_reduce_losses(loss_xy) * self.loss_w_xy * self.loss_w_3d,
})
if not loss_z is None:
losses.update({
prefix + 'loss_z': self.safely_reduce_losses(loss_z) * self.loss_w_z * self.loss_w_3d,
})
if loss_pose is not None:
losses.update({
prefix + 'loss_pose': self.safely_reduce_losses(loss_pose) * self.loss_w_pose * self.loss_w_3d,
})
if self.loss_w_joint > 0:
if valid_joint.any():
losses.update({prefix + 'loss_joint': self.safely_reduce_losses(loss_joint[valid_joint]) * self.loss_w_joint * self.loss_w_3d})
'''
Inference
'''
if len(cube_z.shape) == 0:
cube_z = cube_z.unsqueeze(0)
# inference
cube_x3d = cube_z * (cube_x - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_y3d = cube_z * (cube_y - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_3D = torch.cat((torch.stack((cube_x3d, cube_y3d, cube_z)).T, cube_dims, cube_xy*im_ratios_per_box.unsqueeze(1)), dim=1)
if self.use_confidence:
cube_conf = torch.exp(-cube_uncert)
cube_3D = torch.cat((cube_3D, cube_conf.unsqueeze(1)), dim=1)
# convert the predictions to intances per image
cube_3D = cube_3D.split(num_boxes_per_image)
cube_pose = cube_pose.split(num_boxes_per_image)
box_classes = box_classes.split(num_boxes_per_image)
pred_instances = None
pred_instances = instances if not self.training else \
[Instances(image_size) for image_size in im_current_dims]
for cube_3D_i, cube_pose_i, instances_i, K, im_dim, im_scale_ratio, box_classes_i, pred_boxes_i in \
zip(cube_3D, cube_pose, pred_instances, Ks, im_current_dims, im_scales_ratio, box_classes, pred_boxes):
# merge scores if they already exist
if hasattr(instances_i, 'scores'):
instances_i.scores = (instances_i.scores * cube_3D_i[:, -1])**(1/2)
# assign scores if none are present
else:
instances_i.scores = cube_3D_i[:, -1]
# assign box classes if none exist
if not hasattr(instances_i, 'pred_classes'):
instances_i.pred_classes = box_classes_i
# assign predicted boxes if none exist
if not hasattr(instances_i, 'pred_boxes'):
instances_i.pred_boxes = pred_boxes_i
instances_i.pred_bbox3D = util.get_cuboid_verts_faces(cube_3D_i[:, :6], cube_pose_i)[0]
instances_i.pred_center_cam = cube_3D_i[:, :3]
instances_i.pred_center_2D = cube_3D_i[:, 6:8]
instances_i.pred_dimensions = cube_3D_i[:, 3:6]
instances_i.pred_pose = cube_pose_i
if self.training:
return pred_instances, losses
else:
return pred_instances
def _sample_proposals(
self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor, matched_ious=None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes, matched_ious=matched_ious
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals(self, proposals: List[Instances], targets: List[Instances]) -> List[Instances]:
#separate valid and ignore gts
targets_ign = [target[target.gt_classes < 0] for target in targets]
targets = [target[target.gt_classes >= 0] for target in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(targets, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image, targets_ign_per_image in zip(proposals, targets, targets_ign):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, proposals_per_image.proposal_boxes)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
try:
if len(targets_ign_per_image) > 0:
# compute the quality matrix, only on subset of background
background_inds = (matched_labels == 0).nonzero().squeeze()
# determine the boxes inside ignore regions with sufficient threshold
if background_inds.numel() > 1:
match_quality_matrix_ign = pairwise_ioa(targets_ign_per_image.gt_boxes, proposals_per_image.proposal_boxes[background_inds])
matched_labels[background_inds[match_quality_matrix_ign.max(0)[0] >= self.ignore_thresh]] = -1
del match_quality_matrix_ign
except:
pass
gt_arange = torch.arange(match_quality_matrix.shape[1]).to(matched_idxs.device)
matched_ious = match_quality_matrix[matched_idxs, gt_arange]
sampled_idxs, gt_classes = self._sample_proposals(matched_idxs, matched_labels, targets_per_image.gt_classes, matched_ious=matched_ious)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def safely_reduce_losses(self, loss):
valid = (~(loss.isinf())) & (~(loss.isnan()))
if valid.any():
return loss[valid].mean()
else:
# no valid losses, simply zero out
return loss.mean()*0.0 | 41,015 | 42.634043 | 151 | py |
omni3d | omni3d-main/cubercnn/modeling/proposal_generator/rpn.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from typing import Dict, List, Tuple
import torch
from typing import List, Tuple, Union
import torch.nn.functional as F
from detectron2.config import configurable
from detectron2.utils.events import get_event_storage
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import Boxes, Instances, pairwise_iou, pairwise_ioa
from detectron2.utils.memory import retry_if_cuda_oom
from fvcore.nn import smooth_l1_loss
from detectron2.layers import cat
from detectron2.layers import nonzero_tuple
from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss
from detectron2.modeling.proposal_generator import RPN
from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY
@PROPOSAL_GENERATOR_REGISTRY.register()
class RPNWithIgnore(RPN):
@configurable
def __init__(
self,
*,
ignore_thresh: float = 0.5,
objectness_uncertainty: str = 'none',
**kwargs
):
super().__init__(**kwargs)
self.ignore_thresh = ignore_thresh
self.objectness_uncertainty = objectness_uncertainty
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["ignore_thresh"] = cfg.MODEL.RPN.IGNORE_THRESHOLD
ret["objectness_uncertainty"] = cfg.MODEL.RPN.OBJECTNESS_UNCERTAINTY
return ret
@torch.jit.unused
@torch.no_grad()
def label_and_sample_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
anchors = Boxes.cat(anchors)
# separate valid and ignore gts
gt_boxes_ign = [x.gt_boxes[x.gt_classes < 0] for x in gt_instances]
gt_boxes = [x.gt_boxes[x.gt_classes >= 0] for x in gt_instances]
del gt_instances
gt_labels = []
matched_gt_boxes = []
for gt_boxes_i, gt_boxes_ign_i in zip(gt_boxes, gt_boxes_ign):
"""
gt_boxes_i: ground-truth boxes for i-th image
gt_boxes_ign_i: ground-truth ignore boxes for i-th image
"""
match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors)
matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
gt_arange = torch.arange(match_quality_matrix.shape[1]).to(matched_idxs.device)
matched_ious = match_quality_matrix[matched_idxs, gt_arange]
best_ious_gt_vals, best_ious_gt_ind = match_quality_matrix.max(dim=1)
del match_quality_matrix
best_inds = torch.tensor(list(set(best_ious_gt_ind.tolist()) & set((gt_labels_i == 1).nonzero().squeeze(1).tolist())))
# A vector of labels (-1, 0, 1) for each anchor
# which denote (ignore, background, foreground)
gt_labels_i = self._subsample_labels(gt_labels_i, matched_ious=matched_ious)
# overrride the best possible GT options, always selected for sampling.
# otherwise aggressive thresholds may produce HUGE amounts of low quality FG.
if best_inds.numel() > 0:
gt_labels_i[best_inds] = 1.0
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
else:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
if len(gt_boxes_ign_i) > 0:
# compute the quality matrix, only on subset of background
background_inds = (gt_labels_i == 0).nonzero().squeeze()
if background_inds.numel() > 1:
match_quality_matrix_ign = retry_if_cuda_oom(pairwise_ioa)(gt_boxes_ign_i, anchors[background_inds])
# determine the boxes inside ignore regions with sufficient threshold
gt_labels_i[background_inds[match_quality_matrix_ign.max(0)[0] >= self.ignore_thresh]] = -1
del match_quality_matrix_ign
gt_labels.append(gt_labels_i) # N,AHW
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
def _subsample_labels(self, label, matched_ious=None):
"""
Randomly sample a subset of positive and negative examples, and overwrite
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
Args:
labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
"""
pos_idx, neg_idx = subsample_labels(
label, self.batch_size_per_image, self.positive_fraction, 0, matched_ious=matched_ious
)
# Fill with the ignore label (-1), then set positive and negative labels
label.fill_(-1)
label.scatter_(0, pos_idx, 1)
label.scatter_(0, neg_idx, 0)
return label
@torch.jit.unused
def losses(
self,
anchors: List[Boxes],
pred_objectness_logits: List[torch.Tensor],
gt_labels: List[torch.Tensor],
pred_anchor_deltas: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
) -> Dict[str, torch.Tensor]:
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Args:
anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each
has shape (Hi*Wi*A, B), where B is box dimension (4 or 5).
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, Hi*Wi*A) representing
the predicted objectness logits for all anchors.
gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors
to proposals.
gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai))
# Log the number of positive/negative anchors per-image that's used in training
pos_mask = gt_labels == 1
num_pos_anchors = pos_mask.sum().item()
num_neg_anchors = (gt_labels == 0).sum().item()
storage = get_event_storage()
storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images)
storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images)
if not self.objectness_uncertainty.lower() in ['none']:
localization_loss, objectness_loss = _dense_box_regression_loss_with_uncertainty(
anchors,
self.box2box_transform,
pred_anchor_deltas,
pred_objectness_logits,
gt_boxes,
pos_mask,
box_reg_loss_type=self.box_reg_loss_type,
smooth_l1_beta=self.smooth_l1_beta,
uncertainty_type=self.objectness_uncertainty,
)
else:
localization_loss = _dense_box_regression_loss(
anchors,
self.box2box_transform,
pred_anchor_deltas,
gt_boxes,
pos_mask,
box_reg_loss_type=self.box_reg_loss_type,
smooth_l1_beta=self.smooth_l1_beta,
)
valid_mask = gt_labels >= 0
objectness_loss = F.binary_cross_entropy_with_logits(
cat(pred_objectness_logits, dim=1)[valid_mask],
gt_labels[valid_mask].to(torch.float32),
reduction="sum",
)
normalizer = self.batch_size_per_image * num_images
losses = {
"rpn/cls": objectness_loss / normalizer,
"rpn/loc": localization_loss / normalizer,
}
losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
return losses
def _dense_box_regression_loss_with_uncertainty(
anchors: List[Union[Boxes, torch.Tensor]],
box2box_transform: Box2BoxTransform,
pred_anchor_deltas: List[torch.Tensor],
pred_objectness_logits: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
fg_mask: torch.Tensor,
box_reg_loss_type="smooth_l1",
smooth_l1_beta=0.0,
uncertainty_type='centerness',
):
"""
Compute loss for dense multi-level box regression.
Loss is accumulated over ``fg_mask``.
Args:
anchors: #lvl anchor boxes, each is (HixWixA, 4)
pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)
gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))
fg_mask: the foreground boolean mask of shape (N, R) to compute loss on
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou",
"diou", "ciou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
if isinstance(anchors[0], Boxes):
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
else:
anchors = cat(anchors)
n = len(gt_boxes)
boxes_fg = Boxes(anchors.unsqueeze(0).repeat([n, 1, 1])[fg_mask])
gt_boxes_fg = Boxes(torch.stack(gt_boxes)[fg_mask].detach())
objectness_targets_anchors = matched_pairwise_iou(boxes_fg, gt_boxes_fg).detach()
objectness_logits = torch.cat(pred_objectness_logits, dim=1)
# Numerically the same as (-(y*torch.log(p) + (1 - y)*torch.log(1 - p))).sum()
loss_box_conf = F.binary_cross_entropy_with_logits(
objectness_logits[fg_mask],
objectness_targets_anchors,
reduction='none'
)
loss_box_conf = (loss_box_conf * objectness_targets_anchors).sum()
# keep track of how scores look for FG / BG.
# ideally, FG slowly >>> BG scores as regression improves.
storage = get_event_storage()
storage.put_scalar("rpn/conf_pos_anchors", torch.sigmoid(objectness_logits[fg_mask]).mean().item())
storage.put_scalar("rpn/conf_neg_anchors", torch.sigmoid(objectness_logits[~fg_mask]).mean().item())
if box_reg_loss_type == "smooth_l1":
gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
loss_box_reg = smooth_l1_loss(
cat(pred_anchor_deltas, dim=1)[fg_mask],
gt_anchor_deltas[fg_mask],
beta=smooth_l1_beta,
reduction="none",
)
loss_box_reg = (loss_box_reg.sum(dim=1) * objectness_targets_anchors).sum()
else:
raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'")
return loss_box_reg, loss_box_conf
def subsample_labels(
labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int, matched_ious=None, eps=1e-4
):
"""
Return `num_samples` (or fewer, if not enough found)
random samples from `labels` which is a mixture of positives & negatives.
It will try to return as many positives as possible without
exceeding `positive_fraction * num_samples`, and then try to
fill the remaining slots with negatives.
Args:
labels (Tensor): (N, ) label vector with values:
* -1: ignore
* bg_label: background ("negative") class
* otherwise: one or more foreground ("positive") classes
num_samples (int): The total number of labels with value >= 0 to return.
Values that are not sampled will be filled with -1 (ignore).
positive_fraction (float): The number of subsampled labels with values > 0
is `min(num_positives, int(positive_fraction * num_samples))`. The number
of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.
In order words, if there are not enough positives, the sample is filled with
negatives. If there are also not enough negatives, then as many elements are
sampled as is possible.
bg_label (int): label index of background ("negative") class.
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0]
negative = nonzero_tuple(labels == bg_label)[0]
num_pos = int(num_samples * positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = num_samples - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
#if positive_fraction == 1.0 and num_neg > 10:
# allow some negatives for statistics only.
#num_neg = 10
# randomly select positive and negative examples
if num_pos > 0 and matched_ious is not None:
perm1 = torch.multinomial(matched_ious[positive] + eps, num_pos)
else:
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
if num_neg > 0 and matched_ious is not None:
perm2 = torch.multinomial(matched_ious[negative] + eps, num_neg)
else:
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx = positive[perm1]
neg_idx = negative[perm2]
return pos_idx, neg_idx
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes that have the same number of boxes.
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
Args:
boxes1 (Boxes): bounding boxes, sized [N,4].
boxes2 (Boxes): same length as boxes1
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou | 15,229 | 42.022599 | 141 | py |
VLC-BERT | VLC-BERT-master/vqa/train_end2end.py | import _init_paths
import os
import argparse
import torch
import subprocess
from vqa.function.config import config, update_config
from vqa.function.train import train_net
from vqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set',
default=False, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')
# easy test pretrain model
parser.add_argument('--partial-pretrain', type=str)
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if args.partial_pretrain is not None:
config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
return args, config
def main():
args, config = parse_args()
rank, model = train_net(args, config)
if args.do_test and (rank is None or rank == 0):
test_net(args, config)
if __name__ == '__main__':
main()
| 2,191 | 33.793651 | 113 | py |
VLC-BERT | VLC-BERT-master/vqa/function/val.py | from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_batch]
datas = [batch[i] for i in range(len(batch)) if i != label_index_in_batch % len(batch)]
outputs = net(*datas)
outputs.update({'label': label})
metrics.update(outputs)
| 528 | 26.842105 | 95 | py |
VLC-BERT | VLC-BERT-master/vqa/function/test.py | import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from vqa.data.build import make_dataloader
from vqa.modules import *
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-best.model'.format(model_prefix)
print('Use best checkpoint {}...'.format(ckpt_path))
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if not os.path.exists(save_path):
os.makedirs(save_path)
shutil.copy2(ckpt_path,
os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# get network
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
torch.cuda.set_device(device_ids[0])
model = model.cuda()
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# loader
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# test
q_ids = []
answer_ids = []
model.eval()
cur_id = 0
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
# for nbatch, batch in tqdm(enumerate(test_loader)):
bs = test_loader.batch_sampler.batch_size if test_loader.batch_sampler is not None else test_loader.batch_size
q_ids.extend([test_database[id]['question_id'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
batch = to_cuda(batch)
output = model(*batch)
answer_ids.extend(output['label_logits'].argmax(dim=1).detach().cpu().tolist())
cur_id += bs
result = [{'question_id': q_id, 'answer': test_dataset.answer_vocab[a_id]} for q_id, a_id in zip(q_ids, answer_ids)]
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
result_json_path = os.path.join(save_path, '{}_vqa2_{}.json'.format(cfg_name if save_name is None else save_name,
config.DATASET.TEST_IMAGE_SET))
with open(result_json_path, 'w') as f:
json.dump(result, f)
print('result json saved to {}.'.format(result_json_path))
return result_json_path
| 3,359 | 39.481928 | 120 | py |
VLC-BERT | VLC-BERT-master/vqa/function/train.py | import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import vqa_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from vqa.data.build import make_dataloader, build_dataset, build_transforms
from vqa.modules import *
from vqa.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
"Please use amp.parallel.DistributedDataParallel instead."
total_gpus = num_gpus
rank = None
writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir is not None else None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict)
# pretrained classifier
if config.NETWORK.CLASSIFIER_PRETRAINED:
print('Initializing classifier weight from pretrained word embeddings...')
answers_word_embed = []
for k, v in model.state_dict().items():
if 'word_embeddings.weight' in k:
word_embeddings = v.detach().clone()
break
for answer in train_loader.dataset.answer_vocab:
a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
a_word_embed = (torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0)).mean(dim=0)
answers_word_embed.append(a_word_embed)
answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
for name, module in model.named_modules():
if name.endswith('final_mlp'):
module[-1].weight.data = answers_word_embed_tensor.to(device=module[-1].weight.data.device)
# metrics
train_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
val_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(
vqa_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist,
num_replicas=world_size if args.dist else 1))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name='SoftAcc',
label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH)
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(config.TRAIN.LR_SCHEDULE))
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger,
rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks,
writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return rank, model
| 17,541 | 51.053412 | 147 | py |
Subsets and Splits