repo_name
stringclasses 6
values | pr_number
int64 99
20.3k
| pr_title
stringlengths 8
158
| pr_description
stringlengths 0
6.54k
| author
stringlengths 4
18
| date_created
unknown | date_merged
unknown | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 37
6.57k
| filepath
stringlengths 8
153
| before_content
stringlengths 0
876M
| after_content
stringlengths 0
876M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/notebooks/mesh_segmentation_dataio.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset Pipeline for mesh_segmentation_demo.ipynb.
The shorthands used in parameter descriptions below are
'B': Batch size.
'E': Number of unique directed edges in a mesh.
'V': Number of vertices in a mesh.
'T': Number of triangles in a mesh.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.convolution import utils as conv_utils
from tensorflow_graphics.geometry.representation.mesh import utils as mesh_utils
from tensorflow_graphics.util import shape
DEFAULT_IO_PARAMS = {
'batch_size': 8,
'shuffle_buffer_size': 100,
'is_training': True,
'parallel_threads': 5,
'mean_center': True,
'shuffle': None,
'repeat': None,
}
def adjacency_from_edges(edges, weights, num_edges, num_vertices):
"""Returns a batched sparse 1-ring adj tensor from edge list tensor.
Args:
edges: [B, E, 2] `int32` tensor of edges, possibly 0 padded.
weights: [B, E] `float32` tensor of edge weights, possibly 0 padded.
num_edges: [B] `int32` tensor of number of valid edges per batch sample.
num_vertices: [B] `int32` tensor of number of valid vertices per batch
sample.
Returns:
adj: A batched SparseTensor of weighted adjacency graph, of
dense_shape [B, V, V] where V is max(num_vertices)
"""
edges = tf.convert_to_tensor(value=edges)
weights = tf.convert_to_tensor(value=weights)
num_edges = tf.convert_to_tensor(value=num_edges)
num_vertices = tf.convert_to_tensor(value=num_vertices)
if not edges.dtype.is_integer:
raise TypeError("'edges' must have an integer type.")
if not num_edges.dtype.is_integer:
raise TypeError("'num_edges' must have an integer type.")
if not num_vertices.dtype.is_integer:
raise TypeError("'num_vertices' must have an integer type.")
if not weights.dtype.is_floating:
raise TypeError("'weights' must have a floating type.")
shape.check_static(tensor=edges, tensor_name='edges', has_rank=3)
shape.check_static(tensor=weights, tensor_name='weights', has_rank=2)
shape.check_static(tensor=num_edges, tensor_name='num_edges', has_rank=1)
shape.check_static(
tensor=num_vertices, tensor_name='num_vertices', has_rank=1)
shape.compare_dimensions(
tensors=(edges, weights, num_edges, num_vertices),
tensor_names=('edges', 'weights', 'num_edges', 'num_vertices'),
axes=(-3, -2, -1, -1))
shape.compare_dimensions(
tensors=(edges, weights),
tensor_names=('edges', 'weights'),
axes=(-2, -1))
batch_size = tf.shape(input=edges)[0]
max_num_vertices = tf.reduce_max(input_tensor=num_vertices)
max_num_edges = tf.shape(input=edges)[1]
batch_col = tf.reshape(tf.range(batch_size, dtype=edges.dtype), [-1, 1, 1])
batch_col = tf.tile(batch_col, [1, max_num_edges, 1])
batch_edges = tf.concat([batch_col, edges], axis=-1)
indices, _ = conv_utils.flatten_batch_to_2d(batch_edges, sizes=num_edges)
values, _ = conv_utils.flatten_batch_to_2d(
tf.expand_dims(weights, -1), sizes=num_edges)
values = tf.squeeze(values)
adjacency = tf.SparseTensor(
indices=tf.cast(indices, tf.int64),
values=values,
dense_shape=[batch_size, max_num_vertices, max_num_vertices])
adjacency = tf.sparse.reorder(adjacency)
return adjacency
def get_weighted_edges(faces, self_edges=True):
r"""Gets unique edges and degree weights from a triangular mesh.
The shorthands used below are:
`T`: The number of triangles in the mesh.
`E`: The number of unique directed edges in the mesh.
Args:
faces: A [T, 3] `int32` numpy.ndarray of triangle vertex indices.
self_edges: A `bool` flag. If true, then for every vertex 'i' an edge
[i, i] is added to edge list.
Returns:
edges: A [E, 2] `int32` numpy.ndarray of directed edges.
weights: A [E] `float32` numpy.ndarray denoting edge weights.
The degree of a vertex is the number of edges incident on the vertex,
including any self-edges. The weight for an edge $w_{ij}$ connecting vertex
$v_i$ and vertex $v_j$ is defined as,
$$
w_{ij} = 1.0 / degree(v_i)
\sum_{j} w_{ij} = 1
$$
"""
edges = mesh_utils.extract_unique_edges_from_triangular_mesh(
faces, directed_edges=True).astype(np.int32)
if self_edges:
vertices = np.expand_dims(np.unique(edges[:, 0]), axis=1)
self_edges = np.concatenate((vertices, vertices), axis=1)
edges = np.unique(np.concatenate((edges, self_edges), axis=0), axis=0)
weights = mesh_utils.get_degree_based_edge_weights(edges, dtype=np.float32)
return edges, weights
def _tfrecords_to_dataset(tfrecords,
parallel_threads,
shuffle,
repeat,
sloppy,
max_readers=16):
"""Creates a TFRecordsDataset that iterates over filenames in parallel.
Args:
tfrecords: A list of tf.Data.TFRecords filenames.
parallel_threads: The `int` number denoting number of parallel worker
threads.
shuffle: The `bool` flag denoting whether to shuffle the dataset.
repeat: The `bool` flag denoting whether to repeat the dataset.
sloppy: The `bool` flag denoting if elements are produced in deterministic
order.
max_readers: The `int` number denoting the maximum number of input tfrecords
to interleave from in parallel.
Returns:
A tf.data.TFRecordDataset
"""
total_tfrecords = sum([len(tf.io.gfile.glob(f)) for f in tfrecords])
num_readers = min(total_tfrecords, max_readers)
dataset = tf.data.Dataset.list_files(tfrecords, shuffle=shuffle)
if repeat:
dataset = dataset.repeat()
return dataset.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
num_readers,
sloppy=sloppy,
buffer_output_elements=parallel_threads,
prefetch_input_elements=parallel_threads))
def _parse_tfex_proto(example_proto):
"""Parses the tfexample proto to a raw mesh_data dictionary.
Args:
example_proto: A tf.Example proto storing the encoded mesh data.
Returns:
A mesh data dictionary with the following fields:
'num_vertices': The `int64` number of vertices in mesh.
'num_triangles': The `int64` number of triangles in mesh.
'vertices': A serialized tensor of vertex positions.
'triangles': A serialized tensor of triangle vertex indices.
'labels': A serialized tensor of per vertex class labels.
"""
feature_description = {
'num_vertices': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'num_triangles': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'vertices': tf.io.FixedLenFeature([], tf.string, default_value=''),
'triangles': tf.io.FixedLenFeature([], tf.string, default_value=''),
'labels': tf.io.FixedLenFeature([], tf.string, default_value=''),
}
return tf.io.parse_single_example(
serialized=example_proto, features=feature_description)
def _parse_mesh_data(mesh_data, mean_center=True):
"""Parses a raw mesh_data dictionary read from tf examples.
Args:
mesh_data: A mesh data dictionary with serialized data tensors,
as output from _parse_tfex_proto()
mean_center: If true, centers the mesh vertices to mean(vertices).
Returns:
A mesh data dictionary with following fields:
'num_vertices': The `int32` number of vertices in mesh.
'num_triangles': The `int32` number of triangles in mesh.
'num_edges': The `int32` number of unique directed edges in mesh.
'vertices': A [V, 3] `float32` of vertex positions.
'triangles': A [T, 3] `int32` tensor of triangle vertex indices.
'labels': A [V] `int32` tensor of per vertex class labels.
'edges': A [E, 2] `int32` tensor of unique directed edges in mesh.
'edge_weights': A [E] `float32` tensor of vertex degree based edge
weights.
"""
labels = tf.io.parse_tensor(mesh_data['labels'], tf.int32)
vertices = tf.io.parse_tensor(mesh_data['vertices'], tf.float32)
triangles = tf.io.parse_tensor(mesh_data['triangles'], tf.int32)
if mean_center:
vertices = vertices - tf.reduce_mean(
input_tensor=vertices, axis=0, keepdims=True)
edges, weights = tf.py_function(
func=lambda t: get_weighted_edges(t.numpy()),
inp=[triangles],
Tout=[tf.int32, tf.float32])
num_edges = tf.shape(input=edges)[0]
num_vertices = tf.cast(mesh_data['num_vertices'], tf.int32)
num_triangles = tf.cast(mesh_data['num_triangles'], tf.int32)
mesh_data = dict(
vertices=vertices,
labels=labels,
triangles=triangles,
edges=edges,
edge_weights=weights,
num_triangles=num_triangles,
num_vertices=num_vertices,
num_edges=num_edges)
return mesh_data
def create_dataset_from_tfrecords(tfrecords, params):
"""Creates a mesh dataset given a list of tf records filenames.
Args:
tfrecords: A list of TFRecords filenames.
params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS.
Returns:
A tf.data.Dataset, with each element a dictionary of batched mesh data
with following fields:
'vertices': A [B, V, 3] `float32` tensor of vertex positions, possibly
0-padded.
'triangles': A [B, T, 3] `int32` tensor of triangle vertex indices,
possibly 0-padded
'labels': A [B, V] `int32` tensor of per vertex class labels, possibly
0-padded
'edges': A [B, E, 2] `int32` tensor of unique directed edges in mesh,
possibly 0-padded
'edge_weights': A [B, E] `float32` tensor of vertex degree based edge
weights, possibly 0-padded.
'num_edges': A [B] `int32` tensor of number of unique directed edges in
each mesh in the batch.
'num_vertices': A [B] `int32` tensor of number of vertices in each mesh
in the batch.
'num_triangles': A [B] `int32` tensor of number of triangles in each mesh
in the batch.
"""
def _set_default_if_none(param, param_dict, default_val):
if param not in param_dict:
return default_val
else:
return default_val if param_dict[param] is None else param_dict[param]
is_training = params['is_training']
shuffle = _set_default_if_none('shuffle', params, is_training)
repeat = _set_default_if_none('repeat', params, is_training)
sloppy = _set_default_if_none('sloppy', params, is_training)
if not isinstance(tfrecords, list):
tfrecords = [tfrecords]
dataset = _tfrecords_to_dataset(tfrecords, params['parallel_threads'],
shuffle, repeat, sloppy)
dataset = dataset.map(_parse_tfex_proto, tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
lambda x: _parse_mesh_data(x, mean_center=params['mean_center']),
tf.data.experimental.AUTOTUNE)
if repeat:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(params['shuffle_buffer_size'])
return dataset.padded_batch(
params['batch_size'],
padded_shapes={
'vertices': [None, 3],
'labels': [None],
'triangles': [None, 3],
'edges': [None, 2],
'edge_weights': [None],
'num_edges': [],
'num_vertices': [],
'num_triangles': [],
},
drop_remainder=is_training)
def create_input_from_dataset(dataset_fn, files, io_params):
"""Creates input function given dataset generator and input files.
Args:
dataset_fn: A dataset generator function.
files: A list of TFRecords filenames.
io_params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS.
Returns:
features: A dictionary of mesh data training features.
labels: A [B] `int32` tensor of per vertex class labels.
"""
for k in DEFAULT_IO_PARAMS:
io_params[k] = io_params[k] if k in io_params else DEFAULT_IO_PARAMS[k]
dataset = dataset_fn(files, io_params)
mesh_data = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
mesh_data['neighbors'] = adjacency_from_edges(mesh_data['edges'],
mesh_data['edge_weights'],
mesh_data['num_edges'],
mesh_data['num_vertices'])
max_num_verts = tf.reduce_max(input_tensor=mesh_data['num_vertices'])
features = dict(
vertices=tf.reshape(mesh_data['vertices'], [-1, max_num_verts, 3]),
triangles=mesh_data['triangles'],
neighbors=mesh_data['neighbors'],
num_triangles=mesh_data['num_triangles'],
num_vertices=mesh_data['num_vertices'])
labels = mesh_data['labels']
# Copy labels to features dictionary for estimator prediction mode.
if not io_params['is_training']:
features['labels'] = mesh_data['labels']
return features, labels
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset Pipeline for mesh_segmentation_demo.ipynb.
The shorthands used in parameter descriptions below are
'B': Batch size.
'E': Number of unique directed edges in a mesh.
'V': Number of vertices in a mesh.
'T': Number of triangles in a mesh.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.convolution import utils as conv_utils
from tensorflow_graphics.geometry.representation.mesh import utils as mesh_utils
from tensorflow_graphics.util import shape
DEFAULT_IO_PARAMS = {
'batch_size': 8,
'shuffle_buffer_size': 100,
'is_training': True,
'parallel_threads': 5,
'mean_center': True,
'shuffle': None,
'repeat': None,
}
def adjacency_from_edges(edges, weights, num_edges, num_vertices):
"""Returns a batched sparse 1-ring adj tensor from edge list tensor.
Args:
edges: [B, E, 2] `int32` tensor of edges, possibly 0 padded.
weights: [B, E] `float32` tensor of edge weights, possibly 0 padded.
num_edges: [B] `int32` tensor of number of valid edges per batch sample.
num_vertices: [B] `int32` tensor of number of valid vertices per batch
sample.
Returns:
adj: A batched SparseTensor of weighted adjacency graph, of
dense_shape [B, V, V] where V is max(num_vertices)
"""
edges = tf.convert_to_tensor(value=edges)
weights = tf.convert_to_tensor(value=weights)
num_edges = tf.convert_to_tensor(value=num_edges)
num_vertices = tf.convert_to_tensor(value=num_vertices)
if not edges.dtype.is_integer:
raise TypeError("'edges' must have an integer type.")
if not num_edges.dtype.is_integer:
raise TypeError("'num_edges' must have an integer type.")
if not num_vertices.dtype.is_integer:
raise TypeError("'num_vertices' must have an integer type.")
if not weights.dtype.is_floating:
raise TypeError("'weights' must have a floating type.")
shape.check_static(tensor=edges, tensor_name='edges', has_rank=3)
shape.check_static(tensor=weights, tensor_name='weights', has_rank=2)
shape.check_static(tensor=num_edges, tensor_name='num_edges', has_rank=1)
shape.check_static(
tensor=num_vertices, tensor_name='num_vertices', has_rank=1)
shape.compare_dimensions(
tensors=(edges, weights, num_edges, num_vertices),
tensor_names=('edges', 'weights', 'num_edges', 'num_vertices'),
axes=(-3, -2, -1, -1))
shape.compare_dimensions(
tensors=(edges, weights),
tensor_names=('edges', 'weights'),
axes=(-2, -1))
batch_size = tf.shape(input=edges)[0]
max_num_vertices = tf.reduce_max(input_tensor=num_vertices)
max_num_edges = tf.shape(input=edges)[1]
batch_col = tf.reshape(tf.range(batch_size, dtype=edges.dtype), [-1, 1, 1])
batch_col = tf.tile(batch_col, [1, max_num_edges, 1])
batch_edges = tf.concat([batch_col, edges], axis=-1)
indices, _ = conv_utils.flatten_batch_to_2d(batch_edges, sizes=num_edges)
values, _ = conv_utils.flatten_batch_to_2d(
tf.expand_dims(weights, -1), sizes=num_edges)
values = tf.squeeze(values)
adjacency = tf.SparseTensor(
indices=tf.cast(indices, tf.int64),
values=values,
dense_shape=[batch_size, max_num_vertices, max_num_vertices])
adjacency = tf.sparse.reorder(adjacency)
return adjacency
def get_weighted_edges(faces, self_edges=True):
r"""Gets unique edges and degree weights from a triangular mesh.
The shorthands used below are:
`T`: The number of triangles in the mesh.
`E`: The number of unique directed edges in the mesh.
Args:
faces: A [T, 3] `int32` numpy.ndarray of triangle vertex indices.
self_edges: A `bool` flag. If true, then for every vertex 'i' an edge
[i, i] is added to edge list.
Returns:
edges: A [E, 2] `int32` numpy.ndarray of directed edges.
weights: A [E] `float32` numpy.ndarray denoting edge weights.
The degree of a vertex is the number of edges incident on the vertex,
including any self-edges. The weight for an edge $w_{ij}$ connecting vertex
$v_i$ and vertex $v_j$ is defined as,
$$
w_{ij} = 1.0 / degree(v_i)
\sum_{j} w_{ij} = 1
$$
"""
edges = mesh_utils.extract_unique_edges_from_triangular_mesh(
faces, directed_edges=True).astype(np.int32)
if self_edges:
vertices = np.expand_dims(np.unique(edges[:, 0]), axis=1)
self_edges = np.concatenate((vertices, vertices), axis=1)
edges = np.unique(np.concatenate((edges, self_edges), axis=0), axis=0)
weights = mesh_utils.get_degree_based_edge_weights(edges, dtype=np.float32)
return edges, weights
def _tfrecords_to_dataset(tfrecords,
parallel_threads,
shuffle,
repeat,
sloppy,
max_readers=16):
"""Creates a TFRecordsDataset that iterates over filenames in parallel.
Args:
tfrecords: A list of tf.Data.TFRecords filenames.
parallel_threads: The `int` number denoting number of parallel worker
threads.
shuffle: The `bool` flag denoting whether to shuffle the dataset.
repeat: The `bool` flag denoting whether to repeat the dataset.
sloppy: The `bool` flag denoting if elements are produced in deterministic
order.
max_readers: The `int` number denoting the maximum number of input tfrecords
to interleave from in parallel.
Returns:
A tf.data.TFRecordDataset
"""
total_tfrecords = sum([len(tf.io.gfile.glob(f)) for f in tfrecords])
num_readers = min(total_tfrecords, max_readers)
dataset = tf.data.Dataset.list_files(tfrecords, shuffle=shuffle)
if repeat:
dataset = dataset.repeat()
return dataset.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
num_readers,
sloppy=sloppy,
buffer_output_elements=parallel_threads,
prefetch_input_elements=parallel_threads))
def _parse_tfex_proto(example_proto):
"""Parses the tfexample proto to a raw mesh_data dictionary.
Args:
example_proto: A tf.Example proto storing the encoded mesh data.
Returns:
A mesh data dictionary with the following fields:
'num_vertices': The `int64` number of vertices in mesh.
'num_triangles': The `int64` number of triangles in mesh.
'vertices': A serialized tensor of vertex positions.
'triangles': A serialized tensor of triangle vertex indices.
'labels': A serialized tensor of per vertex class labels.
"""
feature_description = {
'num_vertices': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'num_triangles': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'vertices': tf.io.FixedLenFeature([], tf.string, default_value=''),
'triangles': tf.io.FixedLenFeature([], tf.string, default_value=''),
'labels': tf.io.FixedLenFeature([], tf.string, default_value=''),
}
return tf.io.parse_single_example(
serialized=example_proto, features=feature_description)
def _parse_mesh_data(mesh_data, mean_center=True):
"""Parses a raw mesh_data dictionary read from tf examples.
Args:
mesh_data: A mesh data dictionary with serialized data tensors,
as output from _parse_tfex_proto()
mean_center: If true, centers the mesh vertices to mean(vertices).
Returns:
A mesh data dictionary with following fields:
'num_vertices': The `int32` number of vertices in mesh.
'num_triangles': The `int32` number of triangles in mesh.
'num_edges': The `int32` number of unique directed edges in mesh.
'vertices': A [V, 3] `float32` of vertex positions.
'triangles': A [T, 3] `int32` tensor of triangle vertex indices.
'labels': A [V] `int32` tensor of per vertex class labels.
'edges': A [E, 2] `int32` tensor of unique directed edges in mesh.
'edge_weights': A [E] `float32` tensor of vertex degree based edge
weights.
"""
labels = tf.io.parse_tensor(mesh_data['labels'], tf.int32)
vertices = tf.io.parse_tensor(mesh_data['vertices'], tf.float32)
triangles = tf.io.parse_tensor(mesh_data['triangles'], tf.int32)
if mean_center:
vertices = vertices - tf.reduce_mean(
input_tensor=vertices, axis=0, keepdims=True)
edges, weights = tf.py_function(
func=lambda t: get_weighted_edges(t.numpy()),
inp=[triangles],
Tout=[tf.int32, tf.float32])
num_edges = tf.shape(input=edges)[0]
num_vertices = tf.cast(mesh_data['num_vertices'], tf.int32)
num_triangles = tf.cast(mesh_data['num_triangles'], tf.int32)
mesh_data = dict(
vertices=vertices,
labels=labels,
triangles=triangles,
edges=edges,
edge_weights=weights,
num_triangles=num_triangles,
num_vertices=num_vertices,
num_edges=num_edges)
return mesh_data
def create_dataset_from_tfrecords(tfrecords, params):
"""Creates a mesh dataset given a list of tf records filenames.
Args:
tfrecords: A list of TFRecords filenames.
params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS.
Returns:
A tf.data.Dataset, with each element a dictionary of batched mesh data
with following fields:
'vertices': A [B, V, 3] `float32` tensor of vertex positions, possibly
0-padded.
'triangles': A [B, T, 3] `int32` tensor of triangle vertex indices,
possibly 0-padded
'labels': A [B, V] `int32` tensor of per vertex class labels, possibly
0-padded
'edges': A [B, E, 2] `int32` tensor of unique directed edges in mesh,
possibly 0-padded
'edge_weights': A [B, E] `float32` tensor of vertex degree based edge
weights, possibly 0-padded.
'num_edges': A [B] `int32` tensor of number of unique directed edges in
each mesh in the batch.
'num_vertices': A [B] `int32` tensor of number of vertices in each mesh
in the batch.
'num_triangles': A [B] `int32` tensor of number of triangles in each mesh
in the batch.
"""
def _set_default_if_none(param, param_dict, default_val):
if param not in param_dict:
return default_val
else:
return default_val if param_dict[param] is None else param_dict[param]
is_training = params['is_training']
shuffle = _set_default_if_none('shuffle', params, is_training)
repeat = _set_default_if_none('repeat', params, is_training)
sloppy = _set_default_if_none('sloppy', params, is_training)
if not isinstance(tfrecords, list):
tfrecords = [tfrecords]
dataset = _tfrecords_to_dataset(tfrecords, params['parallel_threads'],
shuffle, repeat, sloppy)
dataset = dataset.map(_parse_tfex_proto, tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
lambda x: _parse_mesh_data(x, mean_center=params['mean_center']),
tf.data.experimental.AUTOTUNE)
if repeat:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(params['shuffle_buffer_size'])
return dataset.padded_batch(
params['batch_size'],
padded_shapes={
'vertices': [None, 3],
'labels': [None],
'triangles': [None, 3],
'edges': [None, 2],
'edge_weights': [None],
'num_edges': [],
'num_vertices': [],
'num_triangles': [],
},
drop_remainder=is_training)
def create_input_from_dataset(dataset_fn, files, io_params):
"""Creates input function given dataset generator and input files.
Args:
dataset_fn: A dataset generator function.
files: A list of TFRecords filenames.
io_params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS.
Returns:
features: A dictionary of mesh data training features.
labels: A [B] `int32` tensor of per vertex class labels.
"""
for k in DEFAULT_IO_PARAMS:
io_params[k] = io_params[k] if k in io_params else DEFAULT_IO_PARAMS[k]
dataset = dataset_fn(files, io_params)
mesh_data = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
mesh_data['neighbors'] = adjacency_from_edges(mesh_data['edges'],
mesh_data['edge_weights'],
mesh_data['num_edges'],
mesh_data['num_vertices'])
max_num_verts = tf.reduce_max(input_tensor=mesh_data['num_vertices'])
features = dict(
vertices=tf.reshape(mesh_data['vertices'], [-1, max_num_verts, 3]),
triangles=mesh_data['triangles'],
neighbors=mesh_data['neighbors'],
num_triangles=mesh_data['num_triangles'],
num_vertices=mesh_data['num_vertices'])
labels = mesh_data['labels']
# Copy labels to features dictionary for estimator prediction mode.
if not io_params['is_training']:
features['labels'] = mesh_data['labels']
return features, labels
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/cvxnet/lib/utils.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from os import path
import numpy as np
import scipy as sp
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib import datasets
from tensorflow_graphics.projects.cvxnet.lib import models
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
import trimesh
Stats = collections.namedtuple("Stats", ["iou", "chamfer", "fscore"])
SYSNET_CLASSES = {
"02691156": "airplane",
"02933112": "cabinet",
"03001627": "chair",
"03636649": "lamp",
"04090263": "rifle",
"04379243": "table",
"04530566": "watercraft",
"02828884": "bench",
"02958343": "car",
"03211117": "display",
"03691459": "speaker",
"04256520": "sofa",
"04401088": "telephone",
"all": "all",
}
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Model flags
flags.DEFINE_enum("model", "multiconvex",
list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_float("sharpness", 75., "Sharpness term.")
flags.DEFINE_integer("n_parts", 50, "Number of convexes uesd.")
flags.DEFINE_integer("n_half_planes", 25, "Number of half spaces used.")
flags.DEFINE_integer("latent_size", 256, "The size of latent code.")
flags.DEFINE_integer("dims", 3, "The dimension of query points.")
flags.DEFINE_bool("image_input", False, "Use color images as input if True.")
flags.DEFINE_float("vis_scale", 1.3,
"Scale of bbox used when extracting meshes.")
flags.DEFINE_float("level_set", 0.5,
"Level set used for extracting surfaces.")
# Dataset flags
flags.DEFINE_enum("dataset", "shapenet",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_integer("image_h", 137, "The height of the color images.")
flags.DEFINE_integer("image_w", 137, "The width of the color images.")
flags.DEFINE_integer("image_d", 3, "The channels of color images.")
flags.DEFINE_integer("depth_h", 224, "The height of depth images.")
flags.DEFINE_integer("depth_w", 224, "The width of depth images.")
flags.DEFINE_integer("depth_d", 20, "The number of depth views.")
flags.DEFINE_integer("n_views", 24, "The number of color images views.")
flags.DEFINE_string("data_dir", None, "The base directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_string("obj_class", "*", "Object class used from dataset.")
# Training flags
flags.DEFINE_float("lr", 1e-4, "Start learning rate.")
flags.DEFINE_string(
"train_dir", None, "The base directory to save training info and"
"checkpoints.")
flags.DEFINE_integer("save_every", 20000,
"The number of steps to save checkpoint.")
flags.DEFINE_integer("max_steps", 800000, "The number of steps of training.")
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_integer("sample_bbx", 1024,
"The number of bounding box sample points.")
flags.DEFINE_integer("sample_surf", 1024,
"The number of surface sample points.")
flags.DEFINE_float("weight_overlap", 0.1, "Weight of overlap_loss")
flags.DEFINE_float("weight_balance", 0.01, "Weight of balance_loss")
flags.DEFINE_float("weight_center", 0.001, "Weight of center_loss")
flags.mark_flag_as_required("train_dir")
# Eval flags
flags.DEFINE_bool("extract_mesh", False,
"Extract meshes and set to disk if True.")
flags.DEFINE_bool("surface_metrics", False,
"Measure surface metrics and save to csv if True.")
flags.DEFINE_string("mesh_dir", None, "Path to load ground truth meshes.")
flags.DEFINE_string("trans_dir", None,
"Path to load pred-to-target transformations.")
flags.DEFINE_bool("eval_once", False, "Evaluate the model only once if True.")
def mesh_name_helper(name):
name = name[0].decode("utf-8")
split = name.find("-")
cls_name = name[:split]
obj_name = name[split + 1:]
return cls_name, obj_name
def extract_mesh(input_val, params, indicators, input_holder, params_holder,
points_holder, sess, args):
"""Extracting meshes from an indicator function.
Args:
input_val: np.array, [1, height, width, channel], input image.
params: tf.Operation, hyperplane parameter hook.
indicators: tf.Operation, indicator hook.
input_holder: tf.Placeholder, input image placeholder.
params_holder: tf.Placeholder, hyperplane parameter placeholder.
points_holder: tf.Placeholder, query point placeholder.
sess: tf.Session, running sess.
args: tf.app.flags.FLAGS, configurations.
Returns:
mesh: trimesh.Trimesh, the extracted mesh.
"""
mesh_extractor = mise.MISE(64, 1, args.level_set)
points = mesh_extractor.query()
params_val = sess.run(params, {input_holder: input_val})
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (
(np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) *
args.vis_scale)
n_points = points.shape[1]
values = []
for i in range(0, n_points, 100000): # Add this to prevent OOM.
value = sess.run(indicators, {
params_holder: params_val,
points_holder: points[:, i:i + 100000]
})
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(args.level_set,
value_grid.max() * 0.75))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3
],
dtype=np.float32)
verts = args.vis_scale * (verts - 0.5)
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
return trimesh.Trimesh(vertices=verts, faces=faces)
def transform_mesh(mesh, name, trans_dir):
"""Transform mesh back to the same coordinate of ground truth.
Args:
mesh: trimesh.Trimesh, predicted mesh before transformation.
name: Tensor, hash name of the mesh as recorded in the dataset.
trans_dir: string, path to the directory for loading transformations.
Returns:
mesh: trimesh.Trimesh, the transformed mesh.
"""
if trans_dir is None:
raise ValueError("Need to specify args.trans_dir for loading pred-to-target"
"transformations.")
cls_name, obj_name = mesh_name_helper(name)
with tf.io.gfile.GFile(
path.join(trans_dir, "test", cls_name, obj_name, "occnet_to_gaps.txt"),
"r") as fin:
tx = np.loadtxt(fin).reshape([4, 4])
mesh.apply_transform(np.linalg.inv(tx))
return mesh
def save_mesh(mesh, name, eval_dir):
"""Save a mesh to disk.
Args:
mesh: trimesh.Trimesh, the mesh to save.
name: Tensor, hash name of the mesh as recorded in the dataset.
eval_dir: string, path to the directory to save the mesh.
"""
cls_name, obj_name = mesh_name_helper(name)
cls_dir = path.join(eval_dir, "meshes", cls_name)
if not tf.io.gfile.isdir(cls_dir):
tf.io.gfile.makedirs(cls_dir)
with tf.io.gfile.GFile(path.join(cls_dir, obj_name + ".obj"), "w") as fout:
mesh.export(fout, file_type="obj")
def distance_field_helper(source, target):
target_kdtree = sp.spatial.cKDTree(target)
distances, unused_var = target_kdtree.query(source, n_jobs=-1)
return distances
def compute_surface_metrics(mesh, name, mesh_dir):
"""Compute surface metrics (chamfer distance and f-score) for one example.
Args:
mesh: trimesh.Trimesh, the mesh to evaluate.
name: Tensor, hash name of the mesh as recorded in the dataset.
mesh_dir: string, path to the directory for loading ground truth meshes.
Returns:
chamfer: float, chamfer distance.
fscore: float, f-score.
"""
if mesh_dir is None:
raise ValueError("Need to specify args.mesh_dir for loading ground truth.")
cls_name, obj_name = mesh_name_helper(name)
with tf.io.gfile.GFile(
path.join(mesh_dir, "test", cls_name, obj_name, "model_occnet.ply"),
"rb",
) as fin:
mesh_gt = trimesh.Trimesh(**trimesh.exchange.ply.load_ply(fin))
# Chamfer
eval_points = 100000
point_gt = mesh_gt.sample(eval_points)
point_gt = point_gt.astype(np.float32)
point_pred = mesh.sample(eval_points)
point_pred = point_pred.astype(np.float32)
pred_to_gt = distance_field_helper(point_pred, point_gt)
gt_to_pred = distance_field_helper(point_gt, point_pred)
chamfer = np.mean(pred_to_gt**2) + np.mean(gt_to_pred**2)
# Fscore
tau = 1e-4
eps = 1e-9
pred_to_gt = (pred_to_gt**2)
gt_to_pred = (gt_to_pred**2)
prec_tau = (pred_to_gt <= tau).astype(np.float32).mean() * 100.
recall_tau = (gt_to_pred <= tau).astype(np.float32).mean() * 100.
fscore = (2 * prec_tau * recall_tau) / max(prec_tau + recall_tau, eps)
# Following the tradition to scale chamfer distance up by 10.
return chamfer * 100., fscore
def init_stats():
"""Initialize evaluation stats."""
stats = {}
for k in SYSNET_CLASSES:
stats[k] = {
"cnt": 0,
"iou": 0.,
"chamfer": 0.,
"fscore": 0.,
}
return stats
def update_stats(example_stats, name, shapenet_stats):
"""Update evaluation statistics.
Args:
example_stats: Stats, the stats of one example.
name: Tensor, hash name of the example as recorded in the dataset.
shapenet_stats: dict, the current stats of the whole dataset.
"""
cls_name, unused_var = mesh_name_helper(name)
shapenet_stats[cls_name]["cnt"] += 1
shapenet_stats[cls_name]["iou"] += example_stats.iou
shapenet_stats[cls_name]["chamfer"] += example_stats.chamfer
shapenet_stats[cls_name]["fscore"] += example_stats.fscore
shapenet_stats["all"]["cnt"] += 1
shapenet_stats["all"]["iou"] += example_stats.iou
shapenet_stats["all"]["chamfer"] += example_stats.chamfer
shapenet_stats["all"]["fscore"] += example_stats.fscore
def average_stats(shapenet_stats):
"""Average the accumulated stats of the whole dataset."""
for k, v in shapenet_stats.items():
cnt = max(v["cnt"], 1)
shapenet_stats[k] = {
"iou": v["iou"] / cnt,
"chamfer": v["chamfer"] / cnt,
"fscore": v["fscore"] / cnt,
}
def write_stats(stats, eval_dir, step):
"""Write stats of the dataset to disk.
Args:
stats: dict, statistics to save.
eval_dir: string, path to the directory to save the statistics.
step: int, the global step of the checkpoint.
"""
if not tf.io.gfile.isdir(eval_dir):
tf.io.gfile.makedirs(eval_dir)
with tf.io.gfile.GFile(path.join(eval_dir, "stats_{}.csv".format(step)),
"w") as fout:
fout.write("class,iou,chamfer,fscore\n")
for k in sorted(stats.keys()):
if k == "all":
continue
fout.write("{0},{1},{2},{3}\n".format(
SYSNET_CLASSES[k],
stats[k]["iou"],
stats[k]["chamfer"],
stats[k]["fscore"],
))
fout.write("all,{0},{1},{2}".format(
stats["all"]["iou"],
stats["all"]["chamfer"],
stats["all"]["fscore"],
))
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from os import path
import numpy as np
import scipy as sp
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib import datasets
from tensorflow_graphics.projects.cvxnet.lib import models
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
import trimesh
Stats = collections.namedtuple("Stats", ["iou", "chamfer", "fscore"])
SYSNET_CLASSES = {
"02691156": "airplane",
"02933112": "cabinet",
"03001627": "chair",
"03636649": "lamp",
"04090263": "rifle",
"04379243": "table",
"04530566": "watercraft",
"02828884": "bench",
"02958343": "car",
"03211117": "display",
"03691459": "speaker",
"04256520": "sofa",
"04401088": "telephone",
"all": "all",
}
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Model flags
flags.DEFINE_enum("model", "multiconvex",
list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_float("sharpness", 75., "Sharpness term.")
flags.DEFINE_integer("n_parts", 50, "Number of convexes uesd.")
flags.DEFINE_integer("n_half_planes", 25, "Number of half spaces used.")
flags.DEFINE_integer("latent_size", 256, "The size of latent code.")
flags.DEFINE_integer("dims", 3, "The dimension of query points.")
flags.DEFINE_bool("image_input", False, "Use color images as input if True.")
flags.DEFINE_float("vis_scale", 1.3,
"Scale of bbox used when extracting meshes.")
flags.DEFINE_float("level_set", 0.5,
"Level set used for extracting surfaces.")
# Dataset flags
flags.DEFINE_enum("dataset", "shapenet",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_integer("image_h", 137, "The height of the color images.")
flags.DEFINE_integer("image_w", 137, "The width of the color images.")
flags.DEFINE_integer("image_d", 3, "The channels of color images.")
flags.DEFINE_integer("depth_h", 224, "The height of depth images.")
flags.DEFINE_integer("depth_w", 224, "The width of depth images.")
flags.DEFINE_integer("depth_d", 20, "The number of depth views.")
flags.DEFINE_integer("n_views", 24, "The number of color images views.")
flags.DEFINE_string("data_dir", None, "The base directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_string("obj_class", "*", "Object class used from dataset.")
# Training flags
flags.DEFINE_float("lr", 1e-4, "Start learning rate.")
flags.DEFINE_string(
"train_dir", None, "The base directory to save training info and"
"checkpoints.")
flags.DEFINE_integer("save_every", 20000,
"The number of steps to save checkpoint.")
flags.DEFINE_integer("max_steps", 800000, "The number of steps of training.")
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_integer("sample_bbx", 1024,
"The number of bounding box sample points.")
flags.DEFINE_integer("sample_surf", 1024,
"The number of surface sample points.")
flags.DEFINE_float("weight_overlap", 0.1, "Weight of overlap_loss")
flags.DEFINE_float("weight_balance", 0.01, "Weight of balance_loss")
flags.DEFINE_float("weight_center", 0.001, "Weight of center_loss")
flags.mark_flag_as_required("train_dir")
# Eval flags
flags.DEFINE_bool("extract_mesh", False,
"Extract meshes and set to disk if True.")
flags.DEFINE_bool("surface_metrics", False,
"Measure surface metrics and save to csv if True.")
flags.DEFINE_string("mesh_dir", None, "Path to load ground truth meshes.")
flags.DEFINE_string("trans_dir", None,
"Path to load pred-to-target transformations.")
flags.DEFINE_bool("eval_once", False, "Evaluate the model only once if True.")
def mesh_name_helper(name):
name = name[0].decode("utf-8")
split = name.find("-")
cls_name = name[:split]
obj_name = name[split + 1:]
return cls_name, obj_name
def extract_mesh(input_val, params, indicators, input_holder, params_holder,
points_holder, sess, args):
"""Extracting meshes from an indicator function.
Args:
input_val: np.array, [1, height, width, channel], input image.
params: tf.Operation, hyperplane parameter hook.
indicators: tf.Operation, indicator hook.
input_holder: tf.Placeholder, input image placeholder.
params_holder: tf.Placeholder, hyperplane parameter placeholder.
points_holder: tf.Placeholder, query point placeholder.
sess: tf.Session, running sess.
args: tf.app.flags.FLAGS, configurations.
Returns:
mesh: trimesh.Trimesh, the extracted mesh.
"""
mesh_extractor = mise.MISE(64, 1, args.level_set)
points = mesh_extractor.query()
params_val = sess.run(params, {input_holder: input_val})
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (
(np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) *
args.vis_scale)
n_points = points.shape[1]
values = []
for i in range(0, n_points, 100000): # Add this to prevent OOM.
value = sess.run(indicators, {
params_holder: params_val,
points_holder: points[:, i:i + 100000]
})
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(args.level_set,
value_grid.max() * 0.75))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3
],
dtype=np.float32)
verts = args.vis_scale * (verts - 0.5)
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
return trimesh.Trimesh(vertices=verts, faces=faces)
def transform_mesh(mesh, name, trans_dir):
"""Transform mesh back to the same coordinate of ground truth.
Args:
mesh: trimesh.Trimesh, predicted mesh before transformation.
name: Tensor, hash name of the mesh as recorded in the dataset.
trans_dir: string, path to the directory for loading transformations.
Returns:
mesh: trimesh.Trimesh, the transformed mesh.
"""
if trans_dir is None:
raise ValueError("Need to specify args.trans_dir for loading pred-to-target"
"transformations.")
cls_name, obj_name = mesh_name_helper(name)
with tf.io.gfile.GFile(
path.join(trans_dir, "test", cls_name, obj_name, "occnet_to_gaps.txt"),
"r") as fin:
tx = np.loadtxt(fin).reshape([4, 4])
mesh.apply_transform(np.linalg.inv(tx))
return mesh
def save_mesh(mesh, name, eval_dir):
"""Save a mesh to disk.
Args:
mesh: trimesh.Trimesh, the mesh to save.
name: Tensor, hash name of the mesh as recorded in the dataset.
eval_dir: string, path to the directory to save the mesh.
"""
cls_name, obj_name = mesh_name_helper(name)
cls_dir = path.join(eval_dir, "meshes", cls_name)
if not tf.io.gfile.isdir(cls_dir):
tf.io.gfile.makedirs(cls_dir)
with tf.io.gfile.GFile(path.join(cls_dir, obj_name + ".obj"), "w") as fout:
mesh.export(fout, file_type="obj")
def distance_field_helper(source, target):
target_kdtree = sp.spatial.cKDTree(target)
distances, unused_var = target_kdtree.query(source, n_jobs=-1)
return distances
def compute_surface_metrics(mesh, name, mesh_dir):
"""Compute surface metrics (chamfer distance and f-score) for one example.
Args:
mesh: trimesh.Trimesh, the mesh to evaluate.
name: Tensor, hash name of the mesh as recorded in the dataset.
mesh_dir: string, path to the directory for loading ground truth meshes.
Returns:
chamfer: float, chamfer distance.
fscore: float, f-score.
"""
if mesh_dir is None:
raise ValueError("Need to specify args.mesh_dir for loading ground truth.")
cls_name, obj_name = mesh_name_helper(name)
with tf.io.gfile.GFile(
path.join(mesh_dir, "test", cls_name, obj_name, "model_occnet.ply"),
"rb",
) as fin:
mesh_gt = trimesh.Trimesh(**trimesh.exchange.ply.load_ply(fin))
# Chamfer
eval_points = 100000
point_gt = mesh_gt.sample(eval_points)
point_gt = point_gt.astype(np.float32)
point_pred = mesh.sample(eval_points)
point_pred = point_pred.astype(np.float32)
pred_to_gt = distance_field_helper(point_pred, point_gt)
gt_to_pred = distance_field_helper(point_gt, point_pred)
chamfer = np.mean(pred_to_gt**2) + np.mean(gt_to_pred**2)
# Fscore
tau = 1e-4
eps = 1e-9
pred_to_gt = (pred_to_gt**2)
gt_to_pred = (gt_to_pred**2)
prec_tau = (pred_to_gt <= tau).astype(np.float32).mean() * 100.
recall_tau = (gt_to_pred <= tau).astype(np.float32).mean() * 100.
fscore = (2 * prec_tau * recall_tau) / max(prec_tau + recall_tau, eps)
# Following the tradition to scale chamfer distance up by 10.
return chamfer * 100., fscore
def init_stats():
"""Initialize evaluation stats."""
stats = {}
for k in SYSNET_CLASSES:
stats[k] = {
"cnt": 0,
"iou": 0.,
"chamfer": 0.,
"fscore": 0.,
}
return stats
def update_stats(example_stats, name, shapenet_stats):
"""Update evaluation statistics.
Args:
example_stats: Stats, the stats of one example.
name: Tensor, hash name of the example as recorded in the dataset.
shapenet_stats: dict, the current stats of the whole dataset.
"""
cls_name, unused_var = mesh_name_helper(name)
shapenet_stats[cls_name]["cnt"] += 1
shapenet_stats[cls_name]["iou"] += example_stats.iou
shapenet_stats[cls_name]["chamfer"] += example_stats.chamfer
shapenet_stats[cls_name]["fscore"] += example_stats.fscore
shapenet_stats["all"]["cnt"] += 1
shapenet_stats["all"]["iou"] += example_stats.iou
shapenet_stats["all"]["chamfer"] += example_stats.chamfer
shapenet_stats["all"]["fscore"] += example_stats.fscore
def average_stats(shapenet_stats):
"""Average the accumulated stats of the whole dataset."""
for k, v in shapenet_stats.items():
cnt = max(v["cnt"], 1)
shapenet_stats[k] = {
"iou": v["iou"] / cnt,
"chamfer": v["chamfer"] / cnt,
"fscore": v["fscore"] / cnt,
}
def write_stats(stats, eval_dir, step):
"""Write stats of the dataset to disk.
Args:
stats: dict, statistics to save.
eval_dir: string, path to the directory to save the statistics.
step: int, the global step of the checkpoint.
"""
if not tf.io.gfile.isdir(eval_dir):
tf.io.gfile.makedirs(eval_dir)
with tf.io.gfile.GFile(path.join(eval_dir, "stats_{}.csv".format(step)),
"w") as fout:
fout.write("class,iou,chamfer,fscore\n")
for k in sorted(stats.keys()):
if k == "all":
continue
fout.write("{0},{1},{2},{3}\n".format(
SYSNET_CLASSES[k],
stats[k]["iou"],
stats[k]["chamfer"],
stats[k]["fscore"],
))
fout.write("all,{0},{1},{2}".format(
stats["all"]["iou"],
stats["all"]["chamfer"],
stats["all"]["fscore"],
))
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/representation/mesh/sampler.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Computes a weighted point sampling of a triangular mesh.
This op computes a uniform sampling of points on the surface of the mesh.
Points are sampled from the surface of each triangle using a uniform
distribution, proportional to a specified face density (e.g. face area).
Uses the approach mentioned in the TOG 2002 paper "Shape distributions"
(https://dl.acm.org/citation.cfm?id=571648)
to generate random barycentric coordinates.
This op can be used for several tasks, including better mesh reconstruction.
For example, see these recent papers demonstrating reconstruction losses using
this op:
1. "GEOMetrics: Exploiting Geometric Structure for Graph-Encoded Objects"
(https://arxiv.org/abs/1901.11461) ICML 2019.
2. "Mesh R-CNN" (https://arxiv.org/abs/1906.02739) ICCV 2019.
Op is differentiable w.r.t mesh vertex positions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.geometry.representation import triangle
from tensorflow_graphics.geometry.representation.mesh import normals
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def triangle_area(vertex0, vertex1, vertex2, name="triangle_area"):
"""Computes triangle areas.
Note:
Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges
of triangle.
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
In the following, A1 to An are optional batch dimensions.
Args:
vertex0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vertex of a triangle.
vertex1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vertex of a triangle.
vertex2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the third vertex of a triangle.
name: A name for this op. Defaults to "triangle_area".
Returns:
A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents
the triangle areas.
"""
with tf.name_scope(name):
vertex0 = tf.convert_to_tensor(value=vertex0)
vertex1 = tf.convert_to_tensor(value=vertex1)
vertex2 = tf.convert_to_tensor(value=vertex2)
triangle_normals = triangle.normal(
vertex0, vertex1, vertex2, normalize=False)
areas = 0.5 * tf.linalg.norm(tensor=triangle_normals, axis=-1)
return areas
def _random_categorical_sample(num_samples,
weights,
seed=None,
stateless=False,
name="random_categorical_sample",
sample_dtype=tf.int32):
"""Samples from a categorical distribution with arbitrary batch dimensions.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
num_samples: An `int32` scalar denoting the number of samples to generate
per mesh.
weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of
faces.
All weights must be > 0.
seed: Optional random seed, value depends on `stateless`.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then `seed` must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate the same reproducible sequence
across calls. If stateless=False, then a stateful random number generator
is used (default behavior).
name: Name for op. Defaults to "random_categorical_sample".
sample_dtype: Type of output samples.
Returns:
A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`.
"""
with tf.name_scope(name):
asserts.assert_all_above(weights, 0)
logits = tf.math.log(weights)
num_faces = tf.shape(input=logits)[-1]
batch_shape = tf.shape(input=logits)[:-1]
logits_2d = tf.reshape(logits, [-1, num_faces])
if stateless:
seed = tf.convert_to_tensor(value=seed)
shape.check_static(
tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2))
sample_fn = tf.random.stateless_categorical
else:
sample_fn = tf.random.categorical
draws = sample_fn(
logits=logits_2d,
num_samples=num_samples,
dtype=sample_dtype,
seed=seed)
samples = tf.reshape(
draws,
shape=tf.concat((batch_shape, (num_samples,)), axis=0))
return samples
def generate_random_face_indices(num_samples,
face_weights,
seed=None,
stateless=False,
name="generate_random_face_indices"):
"""Generate a sample of face ids given per face probability.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
num_samples: An `int32` scalar denoting the number of samples to generate
per mesh.
face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is
number of faces. All weights must be > 0.
seed: Optional seed for the random number generator.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then `seed` must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate the same reproducible sequence
across calls. If stateless=False, then a stateful random number generator
is used (default behavior).
name: Name for op. Defaults to "generate_random_face_indices".
Returns:
An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled
face indices.
"""
with tf.name_scope(name):
num_samples = tf.convert_to_tensor(value=num_samples)
face_weights = tf.convert_to_tensor(value=face_weights)
shape.check_static(
tensor=face_weights,
tensor_name="face_weights",
has_rank_greater_than=0)
shape.check_static(
tensor=num_samples, tensor_name="num_samples", has_rank=0)
face_weights = asserts.assert_all_above(face_weights, minval=0.0)
eps = asserts.select_eps_for_division(face_weights.dtype)
face_weights = face_weights + eps
sampled_face_indices = _random_categorical_sample(
num_samples=num_samples,
weights=face_weights,
seed=seed,
stateless=stateless)
return sampled_face_indices
def generate_random_barycentric_coordinates(
sample_shape,
dtype=tf.dtypes.float32,
seed=None,
stateless=False,
name="generate_random_barycentric_coordinates"):
"""Generate uniformly sampled random barycentric coordinates.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
sample_shape: An `int` tensor with shape `[n+1,]` and values `(A1, ..., An,
num_samples)` denoting total number of random samples drawn, where `n` is
number of batch dimensions, and `num_samples` is the number of samples
drawn for each mesh.
dtype: Optional type of generated barycentric coordinates, defaults to
float32.
seed: An optional random seed.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then `seed` must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate the same reproducible sequence
across calls. If stateless=False, then a stateful random number generator
is used (default behavior).
name: Name for op. Defaults to "generate_random_barycentric_coordinates".
Returns:
A `dtype` tensor of shape [A1, ..., An, num_samples, 3],
where the last dimension contains the sampled barycentric coordinates.
"""
with tf.name_scope(name):
sample_shape = tf.convert_to_tensor(value=sample_shape)
shape.check_static(
tensor=sample_shape, tensor_name="sample_shape", has_rank=1)
sample_shape = tf.concat((sample_shape, (2,)), axis=0)
if stateless:
seed = tf.convert_to_tensor(value=seed)
shape.check_static(
tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2))
sample_fn = tf.random.stateless_uniform
else:
sample_fn = tf.random.uniform
random_uniform = sample_fn(
shape=sample_shape, minval=0.0, maxval=1.0, dtype=dtype, seed=seed)
random1 = tf.sqrt(random_uniform[..., 0])
random2 = random_uniform[..., 1]
barycentric = tf.stack(
(1 - random1, random1 * (1 - random2), random1 * random2), axis=-1)
return barycentric
def weighted_random_sample_triangle_mesh(
vertex_attributes,
faces,
num_samples,
face_weights,
seed=None,
stateless=False,
name="weighted_random_sample_triangle_mesh"):
"""Performs a face probability weighted random sampling of a tri mesh.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V
is the number of vertices, and D is dimensionality of each vertex.
faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number
of faces.
num_samples: A `int` 0-D tensor denoting number of samples to be drawn from
each mesh.
face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting
unnormalized sampling probability of each face, where F is the number of
faces.
seed: Optional random seed.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then seed must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate same sequence across calls.
name: Name for op. Defaults to "weighted_random_sample_triangle_mesh".
Returns:
sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`,
where D is dimensionality of each sampled point.
sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`.
"""
with tf.name_scope(name):
faces = tf.convert_to_tensor(value=faces)
vertex_attributes = tf.convert_to_tensor(value=vertex_attributes)
face_weights = tf.convert_to_tensor(value=face_weights)
num_samples = tf.convert_to_tensor(value=num_samples)
shape.check_static(
tensor=vertex_attributes,
tensor_name="vertex_attributes",
has_rank_greater_than=1)
shape.check_static(
tensor=faces, tensor_name="faces", has_rank_greater_than=1)
shape.check_static(
tensor=face_weights,
tensor_name="face_weights",
has_rank_greater_than=0)
shape.compare_batch_dimensions(
tensors=(faces, face_weights),
last_axes=(-2, -1),
tensor_names=("faces", "face_weights"),
broadcast_compatible=False)
shape.compare_batch_dimensions(
tensors=(vertex_attributes, faces, face_weights),
last_axes=(-3, -3, -2),
tensor_names=("vertex_attributes", "faces", "face_weights"),
broadcast_compatible=False)
asserts.assert_all_above(face_weights, 0)
batch_dims = faces.shape.ndims - 2
batch_shape = faces.shape.as_list()[:-2]
sample_shape = tf.concat(
(batch_shape, tf.convert_to_tensor(
value=(num_samples,), dtype=tf.int32)),
axis=0)
sample_face_indices = generate_random_face_indices(
num_samples, face_weights, seed=seed, stateless=stateless)
sample_vertex_indices = tf.gather(
faces, sample_face_indices, batch_dims=batch_dims)
sample_vertices = tf.gather(
vertex_attributes, sample_vertex_indices, batch_dims=batch_dims)
barycentric = generate_random_barycentric_coordinates(
sample_shape,
dtype=vertex_attributes.dtype,
seed=seed,
stateless=stateless)
barycentric = tf.expand_dims(barycentric, axis=-1)
sample_points = tf.math.multiply(sample_vertices, barycentric)
sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2)
return sample_points, sample_face_indices
def area_weighted_random_sample_triangle_mesh(
vertex_attributes,
faces,
num_samples,
vertex_positions=None,
seed=None,
stateless=False,
name="area_weighted_random_sample_triangle_mesh"):
"""Performs a face area weighted random sampling of a tri mesh.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V
is the number of vertices, and D is dimensionality of a feature defined on
each vertex. If `vertex_positions` is not provided, then first 3
dimensions of `vertex_attributes` denote the vertex positions.
faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number
of faces.
num_samples: An `int` scalar denoting number of samples to be drawn from
each mesh.
vertex_positions: An optional `float` tensor of shape `[A1, ..., An, V, 3]`,
where V is the number of vertices. If None, then vertex_attributes[...,
:3] is used as vertex positions.
seed: Optional random seed.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then seed must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate same sequence across calls.
name: Name for op. Defaults to "area_weighted_random_sample_triangle_mesh".
Returns:
sample_pts: A `float` tensor of shape `[A1, ..., An, num_samples, D]`,
where D is dimensionality of each sampled point.
sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`.
"""
with tf.name_scope(name):
faces = tf.convert_to_tensor(value=faces)
vertex_attributes = tf.convert_to_tensor(value=vertex_attributes)
num_samples = tf.convert_to_tensor(value=num_samples)
shape.check_static(
tensor=vertex_attributes,
tensor_name="vertex_attributes",
has_rank_greater_than=1)
shape.check_static(
tensor=vertex_attributes,
tensor_name="vertex_attributes",
has_dim_greater_than=(-1, 2))
if vertex_positions is not None:
vertex_positions = tf.convert_to_tensor(value=vertex_positions)
else:
vertex_positions = vertex_attributes[..., :3]
shape.check_static(
tensor=vertex_positions,
tensor_name="vertex_positions",
has_rank_greater_than=1)
shape.check_static(
tensor=vertex_positions,
tensor_name="vertex_positions",
has_dim_equals=(-1, 3))
triangle_vertex_positions = normals.gather_faces(vertex_positions, faces)
triangle_areas = triangle_area(triangle_vertex_positions[..., 0, :],
triangle_vertex_positions[..., 1, :],
triangle_vertex_positions[..., 2, :])
return weighted_random_sample_triangle_mesh(
vertex_attributes,
faces,
num_samples,
face_weights=triangle_areas,
seed=seed,
stateless=stateless)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Computes a weighted point sampling of a triangular mesh.
This op computes a uniform sampling of points on the surface of the mesh.
Points are sampled from the surface of each triangle using a uniform
distribution, proportional to a specified face density (e.g. face area).
Uses the approach mentioned in the TOG 2002 paper "Shape distributions"
(https://dl.acm.org/citation.cfm?id=571648)
to generate random barycentric coordinates.
This op can be used for several tasks, including better mesh reconstruction.
For example, see these recent papers demonstrating reconstruction losses using
this op:
1. "GEOMetrics: Exploiting Geometric Structure for Graph-Encoded Objects"
(https://arxiv.org/abs/1901.11461) ICML 2019.
2. "Mesh R-CNN" (https://arxiv.org/abs/1906.02739) ICCV 2019.
Op is differentiable w.r.t mesh vertex positions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.geometry.representation import triangle
from tensorflow_graphics.geometry.representation.mesh import normals
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def triangle_area(vertex0, vertex1, vertex2, name="triangle_area"):
"""Computes triangle areas.
Note:
Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges
of triangle.
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
In the following, A1 to An are optional batch dimensions.
Args:
vertex0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vertex of a triangle.
vertex1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vertex of a triangle.
vertex2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the third vertex of a triangle.
name: A name for this op. Defaults to "triangle_area".
Returns:
A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents
the triangle areas.
"""
with tf.name_scope(name):
vertex0 = tf.convert_to_tensor(value=vertex0)
vertex1 = tf.convert_to_tensor(value=vertex1)
vertex2 = tf.convert_to_tensor(value=vertex2)
triangle_normals = triangle.normal(
vertex0, vertex1, vertex2, normalize=False)
areas = 0.5 * tf.linalg.norm(tensor=triangle_normals, axis=-1)
return areas
def _random_categorical_sample(num_samples,
weights,
seed=None,
stateless=False,
name="random_categorical_sample",
sample_dtype=tf.int32):
"""Samples from a categorical distribution with arbitrary batch dimensions.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
num_samples: An `int32` scalar denoting the number of samples to generate
per mesh.
weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of
faces.
All weights must be > 0.
seed: Optional random seed, value depends on `stateless`.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then `seed` must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate the same reproducible sequence
across calls. If stateless=False, then a stateful random number generator
is used (default behavior).
name: Name for op. Defaults to "random_categorical_sample".
sample_dtype: Type of output samples.
Returns:
A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`.
"""
with tf.name_scope(name):
asserts.assert_all_above(weights, 0)
logits = tf.math.log(weights)
num_faces = tf.shape(input=logits)[-1]
batch_shape = tf.shape(input=logits)[:-1]
logits_2d = tf.reshape(logits, [-1, num_faces])
if stateless:
seed = tf.convert_to_tensor(value=seed)
shape.check_static(
tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2))
sample_fn = tf.random.stateless_categorical
else:
sample_fn = tf.random.categorical
draws = sample_fn(
logits=logits_2d,
num_samples=num_samples,
dtype=sample_dtype,
seed=seed)
samples = tf.reshape(
draws,
shape=tf.concat((batch_shape, (num_samples,)), axis=0))
return samples
def generate_random_face_indices(num_samples,
face_weights,
seed=None,
stateless=False,
name="generate_random_face_indices"):
"""Generate a sample of face ids given per face probability.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
num_samples: An `int32` scalar denoting the number of samples to generate
per mesh.
face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is
number of faces. All weights must be > 0.
seed: Optional seed for the random number generator.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then `seed` must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate the same reproducible sequence
across calls. If stateless=False, then a stateful random number generator
is used (default behavior).
name: Name for op. Defaults to "generate_random_face_indices".
Returns:
An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled
face indices.
"""
with tf.name_scope(name):
num_samples = tf.convert_to_tensor(value=num_samples)
face_weights = tf.convert_to_tensor(value=face_weights)
shape.check_static(
tensor=face_weights,
tensor_name="face_weights",
has_rank_greater_than=0)
shape.check_static(
tensor=num_samples, tensor_name="num_samples", has_rank=0)
face_weights = asserts.assert_all_above(face_weights, minval=0.0)
eps = asserts.select_eps_for_division(face_weights.dtype)
face_weights = face_weights + eps
sampled_face_indices = _random_categorical_sample(
num_samples=num_samples,
weights=face_weights,
seed=seed,
stateless=stateless)
return sampled_face_indices
def generate_random_barycentric_coordinates(
sample_shape,
dtype=tf.dtypes.float32,
seed=None,
stateless=False,
name="generate_random_barycentric_coordinates"):
"""Generate uniformly sampled random barycentric coordinates.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
sample_shape: An `int` tensor with shape `[n+1,]` and values `(A1, ..., An,
num_samples)` denoting total number of random samples drawn, where `n` is
number of batch dimensions, and `num_samples` is the number of samples
drawn for each mesh.
dtype: Optional type of generated barycentric coordinates, defaults to
float32.
seed: An optional random seed.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then `seed` must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate the same reproducible sequence
across calls. If stateless=False, then a stateful random number generator
is used (default behavior).
name: Name for op. Defaults to "generate_random_barycentric_coordinates".
Returns:
A `dtype` tensor of shape [A1, ..., An, num_samples, 3],
where the last dimension contains the sampled barycentric coordinates.
"""
with tf.name_scope(name):
sample_shape = tf.convert_to_tensor(value=sample_shape)
shape.check_static(
tensor=sample_shape, tensor_name="sample_shape", has_rank=1)
sample_shape = tf.concat((sample_shape, (2,)), axis=0)
if stateless:
seed = tf.convert_to_tensor(value=seed)
shape.check_static(
tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2))
sample_fn = tf.random.stateless_uniform
else:
sample_fn = tf.random.uniform
random_uniform = sample_fn(
shape=sample_shape, minval=0.0, maxval=1.0, dtype=dtype, seed=seed)
random1 = tf.sqrt(random_uniform[..., 0])
random2 = random_uniform[..., 1]
barycentric = tf.stack(
(1 - random1, random1 * (1 - random2), random1 * random2), axis=-1)
return barycentric
def weighted_random_sample_triangle_mesh(
vertex_attributes,
faces,
num_samples,
face_weights,
seed=None,
stateless=False,
name="weighted_random_sample_triangle_mesh"):
"""Performs a face probability weighted random sampling of a tri mesh.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V
is the number of vertices, and D is dimensionality of each vertex.
faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number
of faces.
num_samples: A `int` 0-D tensor denoting number of samples to be drawn from
each mesh.
face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting
unnormalized sampling probability of each face, where F is the number of
faces.
seed: Optional random seed.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then seed must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate same sequence across calls.
name: Name for op. Defaults to "weighted_random_sample_triangle_mesh".
Returns:
sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`,
where D is dimensionality of each sampled point.
sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`.
"""
with tf.name_scope(name):
faces = tf.convert_to_tensor(value=faces)
vertex_attributes = tf.convert_to_tensor(value=vertex_attributes)
face_weights = tf.convert_to_tensor(value=face_weights)
num_samples = tf.convert_to_tensor(value=num_samples)
shape.check_static(
tensor=vertex_attributes,
tensor_name="vertex_attributes",
has_rank_greater_than=1)
shape.check_static(
tensor=faces, tensor_name="faces", has_rank_greater_than=1)
shape.check_static(
tensor=face_weights,
tensor_name="face_weights",
has_rank_greater_than=0)
shape.compare_batch_dimensions(
tensors=(faces, face_weights),
last_axes=(-2, -1),
tensor_names=("faces", "face_weights"),
broadcast_compatible=False)
shape.compare_batch_dimensions(
tensors=(vertex_attributes, faces, face_weights),
last_axes=(-3, -3, -2),
tensor_names=("vertex_attributes", "faces", "face_weights"),
broadcast_compatible=False)
asserts.assert_all_above(face_weights, 0)
batch_dims = faces.shape.ndims - 2
batch_shape = faces.shape.as_list()[:-2]
sample_shape = tf.concat(
(batch_shape, tf.convert_to_tensor(
value=(num_samples,), dtype=tf.int32)),
axis=0)
sample_face_indices = generate_random_face_indices(
num_samples, face_weights, seed=seed, stateless=stateless)
sample_vertex_indices = tf.gather(
faces, sample_face_indices, batch_dims=batch_dims)
sample_vertices = tf.gather(
vertex_attributes, sample_vertex_indices, batch_dims=batch_dims)
barycentric = generate_random_barycentric_coordinates(
sample_shape,
dtype=vertex_attributes.dtype,
seed=seed,
stateless=stateless)
barycentric = tf.expand_dims(barycentric, axis=-1)
sample_points = tf.math.multiply(sample_vertices, barycentric)
sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2)
return sample_points, sample_face_indices
def area_weighted_random_sample_triangle_mesh(
vertex_attributes,
faces,
num_samples,
vertex_positions=None,
seed=None,
stateless=False,
name="area_weighted_random_sample_triangle_mesh"):
"""Performs a face area weighted random sampling of a tri mesh.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V
is the number of vertices, and D is dimensionality of a feature defined on
each vertex. If `vertex_positions` is not provided, then first 3
dimensions of `vertex_attributes` denote the vertex positions.
faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number
of faces.
num_samples: An `int` scalar denoting number of samples to be drawn from
each mesh.
vertex_positions: An optional `float` tensor of shape `[A1, ..., An, V, 3]`,
where V is the number of vertices. If None, then vertex_attributes[...,
:3] is used as vertex positions.
seed: Optional random seed.
stateless: Optional flag to use stateless random sampler. If stateless=True,
then seed must be provided as shape `[2]` int tensor. Stateless random
sampling is useful for testing to generate same sequence across calls.
name: Name for op. Defaults to "area_weighted_random_sample_triangle_mesh".
Returns:
sample_pts: A `float` tensor of shape `[A1, ..., An, num_samples, D]`,
where D is dimensionality of each sampled point.
sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`.
"""
with tf.name_scope(name):
faces = tf.convert_to_tensor(value=faces)
vertex_attributes = tf.convert_to_tensor(value=vertex_attributes)
num_samples = tf.convert_to_tensor(value=num_samples)
shape.check_static(
tensor=vertex_attributes,
tensor_name="vertex_attributes",
has_rank_greater_than=1)
shape.check_static(
tensor=vertex_attributes,
tensor_name="vertex_attributes",
has_dim_greater_than=(-1, 2))
if vertex_positions is not None:
vertex_positions = tf.convert_to_tensor(value=vertex_positions)
else:
vertex_positions = vertex_attributes[..., :3]
shape.check_static(
tensor=vertex_positions,
tensor_name="vertex_positions",
has_rank_greater_than=1)
shape.check_static(
tensor=vertex_positions,
tensor_name="vertex_positions",
has_dim_equals=(-1, 3))
triangle_vertex_positions = normals.gather_faces(vertex_positions, faces)
triangle_areas = triangle_area(triangle_vertex_positions[..., 0, :],
triangle_vertex_positions[..., 1, :],
triangle_vertex_positions[..., 2, :])
return weighted_random_sample_triangle_mesh(
vertex_attributes,
faces,
num_samples,
face_weights=triangle_areas,
seed=seed,
stateless=stateless)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/deformation_energy/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformation energies module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.geometry.deformation_energy import as_conformal_as_possible
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.deformation_energy.
__all__ = _export_api.get_modules()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformation energies module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.geometry.deformation_energy import as_conformal_as_possible
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.deformation_energy.
__all__ = _export_api.get_modules()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/features/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""`tensorflow_graphics.datasets.features` API defining feature types."""
from tensorflow_graphics.datasets.features.camera_feature import Camera
from tensorflow_graphics.datasets.features.pose_feature import Pose
from tensorflow_graphics.datasets.features.trimesh_feature import TriangleMesh
from tensorflow_graphics.datasets.features.voxel_feature import VoxelGrid
__all__ = [
"TriangleMesh",
"VoxelGrid",
"Camera",
"Pose"
]
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""`tensorflow_graphics.datasets.features` API defining feature types."""
from tensorflow_graphics.datasets.features.camera_feature import Camera
from tensorflow_graphics.datasets.features.pose_feature import Pose
from tensorflow_graphics.datasets.features.trimesh_feature import TriangleMesh
from tensorflow_graphics.datasets.features.voxel_feature import VoxelGrid
__all__ = [
"TriangleMesh",
"VoxelGrid",
"Camera",
"Pose"
]
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/pointnet/train.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop for PointNet v1 on modelnet40."""
# pylint: disable=missing-function-docstring
import tensorflow as tf
from tensorflow_graphics.datasets import modelnet40
from tensorflow_graphics.nn.layer import pointnet
import tqdm # pylint: disable=g-bad-import-order
from . import augment # pylint: disable=g-bad-import-order
from . import helpers # pylint: disable=g-bad-import-order
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
parser = helpers.ArgumentParser()
parser.add("--batch_size", 32)
parser.add("--num_epochs", 250)
parser.add("--num_points", 2048, help="subsampled (max 2048)")
parser.add("--learning_rate", 1e-3, help="initial Adam learning rate")
parser.add("--lr_decay", True, help="enable learning rate decay")
parser.add("--bn_decay", .5, help="batch norm decay momentum")
parser.add("--tb_every", 100, help="tensorboard frequency (iterations)")
parser.add("--ev_every", 308, help="evaluation frequency (iterations)")
parser.add("--augment", True, help="use augmentations")
parser.add("--tqdm", True, help="enable the progress bar")
FLAGS = parser.parse_args()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
if FLAGS.lr_decay:
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
FLAGS.learning_rate,
decay_steps=6250, #< 200.000 / 32 (batch size) (from original pointnet)
decay_rate=0.7,
staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)
else:
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
model = pointnet.PointNetVanillaClassifier(
num_classes=40, momentum=FLAGS.bn_decay)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
@tf.function
def wrapped_tf_function(points, label):
"""Performs one step of minimization of the loss."""
# --- subsampling (order DO matter)
points = points[0:FLAGS.num_points, ...]
# --- augmentation
if FLAGS.augment:
points = tf.map_fn(augment.rotate, points)
points = augment.jitter(points)
# --- training
with tf.GradientTape() as tape:
logits = model(points, training=True)
loss = model.loss(label, logits)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
def train(example):
"""Performs one step of minimization of the loss and populates the summary."""
points = example["points"]
label = example["label"]
step = optimizer.iterations.numpy()
# --- optimize
loss = wrapped_tf_function(points, label)
if step % FLAGS.tb_every == 0:
tf.summary.scalar(name="loss", data=loss, step=step)
# --- report rate in summaries
if FLAGS.lr_decay and step % FLAGS.tb_every == 0:
tf.summary.scalar(name="learning_rate", data=lr_scheduler(step), step=step)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def evaluate():
"""Identify the best accuracy reached during training."""
step = optimizer.iterations.numpy()
if "best_accuracy" not in evaluate.__dict__:
evaluate.best_accuracy = 0
if step % FLAGS.ev_every != 0:
return evaluate.best_accuracy
aggregator = tf.keras.metrics.SparseCategoricalAccuracy()
for example in ds_test:
points, labels = example["points"], example["label"]
logits = model(points, training=False)
aggregator.update_state(labels, logits)
accuracy = aggregator.result()
evaluate.best_accuracy = max(accuracy, evaluate.best_accuracy)
tf.summary.scalar(name="accuracy_test", data=accuracy, step=step)
return evaluate.best_accuracy
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
ds_train, info = modelnet40.ModelNet40.load(split="train", with_info=True)
num_examples = info.splits["train"].num_examples
ds_train = ds_train.shuffle(num_examples, reshuffle_each_iteration=True)
ds_train = ds_train.repeat(FLAGS.num_epochs)
ds_train = ds_train.batch(FLAGS.batch_size)
ds_test = modelnet40.ModelNet40.load(split="test").batch(FLAGS.batch_size)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
try:
helpers.setup_tensorboard(FLAGS)
helpers.summary_command(parser, FLAGS)
total = tf.data.experimental.cardinality(ds_train).numpy()
pbar = tqdm.tqdm(ds_train, leave=False, total=total, disable=not FLAGS.tqdm)
for train_example in pbar:
train(train_example)
best_accuracy = evaluate()
pbar.set_postfix_str("best accuracy: {:.3f}".format(best_accuracy))
except KeyboardInterrupt:
helpers.handle_keyboard_interrupt(FLAGS)
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop for PointNet v1 on modelnet40."""
# pylint: disable=missing-function-docstring
import tensorflow as tf
from tensorflow_graphics.datasets import modelnet40
from tensorflow_graphics.nn.layer import pointnet
import tqdm # pylint: disable=g-bad-import-order
from . import augment # pylint: disable=g-bad-import-order
from . import helpers # pylint: disable=g-bad-import-order
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
parser = helpers.ArgumentParser()
parser.add("--batch_size", 32)
parser.add("--num_epochs", 250)
parser.add("--num_points", 2048, help="subsampled (max 2048)")
parser.add("--learning_rate", 1e-3, help="initial Adam learning rate")
parser.add("--lr_decay", True, help="enable learning rate decay")
parser.add("--bn_decay", .5, help="batch norm decay momentum")
parser.add("--tb_every", 100, help="tensorboard frequency (iterations)")
parser.add("--ev_every", 308, help="evaluation frequency (iterations)")
parser.add("--augment", True, help="use augmentations")
parser.add("--tqdm", True, help="enable the progress bar")
FLAGS = parser.parse_args()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
if FLAGS.lr_decay:
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
FLAGS.learning_rate,
decay_steps=6250, #< 200.000 / 32 (batch size) (from original pointnet)
decay_rate=0.7,
staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)
else:
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
model = pointnet.PointNetVanillaClassifier(
num_classes=40, momentum=FLAGS.bn_decay)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
@tf.function
def wrapped_tf_function(points, label):
"""Performs one step of minimization of the loss."""
# --- subsampling (order DO matter)
points = points[0:FLAGS.num_points, ...]
# --- augmentation
if FLAGS.augment:
points = tf.map_fn(augment.rotate, points)
points = augment.jitter(points)
# --- training
with tf.GradientTape() as tape:
logits = model(points, training=True)
loss = model.loss(label, logits)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
def train(example):
"""Performs one step of minimization of the loss and populates the summary."""
points = example["points"]
label = example["label"]
step = optimizer.iterations.numpy()
# --- optimize
loss = wrapped_tf_function(points, label)
if step % FLAGS.tb_every == 0:
tf.summary.scalar(name="loss", data=loss, step=step)
# --- report rate in summaries
if FLAGS.lr_decay and step % FLAGS.tb_every == 0:
tf.summary.scalar(name="learning_rate", data=lr_scheduler(step), step=step)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def evaluate():
"""Identify the best accuracy reached during training."""
step = optimizer.iterations.numpy()
if "best_accuracy" not in evaluate.__dict__:
evaluate.best_accuracy = 0
if step % FLAGS.ev_every != 0:
return evaluate.best_accuracy
aggregator = tf.keras.metrics.SparseCategoricalAccuracy()
for example in ds_test:
points, labels = example["points"], example["label"]
logits = model(points, training=False)
aggregator.update_state(labels, logits)
accuracy = aggregator.result()
evaluate.best_accuracy = max(accuracy, evaluate.best_accuracy)
tf.summary.scalar(name="accuracy_test", data=accuracy, step=step)
return evaluate.best_accuracy
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
ds_train, info = modelnet40.ModelNet40.load(split="train", with_info=True)
num_examples = info.splits["train"].num_examples
ds_train = ds_train.shuffle(num_examples, reshuffle_each_iteration=True)
ds_train = ds_train.repeat(FLAGS.num_epochs)
ds_train = ds_train.batch(FLAGS.batch_size)
ds_test = modelnet40.ModelNet40.load(split="test").batch(FLAGS.batch_size)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
try:
helpers.setup_tensorboard(FLAGS)
helpers.summary_command(parser, FLAGS)
total = tf.data.experimental.cardinality(ds_train).numpy()
pbar = tqdm.tqdm(ds_train, leave=False, total=total, disable=not FLAGS.tqdm)
for train_example in pbar:
train(train_example)
best_accuracy = evaluate()
pbar.set_postfix_str("best accuracy: {:.3f}".format(best_accuracy))
except KeyboardInterrupt:
helpers.handle_keyboard_interrupt(FLAGS)
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/nn/layer/tests/graph_convolution_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the graph convolution layers."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_graphics.nn.layer.graph_convolution as gc_layer
from tensorflow_graphics.util import test_case
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)
def _dummy_data(batch_size, num_vertices, num_channels):
"""Create inputs for feature_steered_convolution."""
if batch_size > 0:
data = np.zeros(
shape=(batch_size, num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(
np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1)))
else:
data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32))
return data, neighbors
class GraphConvolutionTestFeatureSteeredConvolutionLayerTests(
test_case.TestCase):
@parameterized.parameters(
(1, 1, 1, 1, 1, False),
(4, 2, 3, None, 5, False),
(1, 2, 3, 4, 5, True),
)
def test_feature_steered_convolution_layer_exception_not_raised_shapes(
self, batch_size, num_vertices, in_channels, out_channels,
num_weight_matrices, translation_invariant):
"""Check if the convolution parameters and output have correct shapes."""
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
name_scope = "test"
if tf.executing_eagerly():
layer = gc_layer.FeatureSteeredConvolutionKerasLayer(
translation_invariant=translation_invariant,
num_weight_matrices=num_weight_matrices,
num_output_channels=out_channels,
name=name_scope)
def _run_convolution():
"""Run the appropriate feature steered convolution layer."""
if tf.executing_eagerly():
try:
output = layer(inputs=[data, neighbors], sizes=None)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
else:
try:
output = gc_layer.feature_steered_convolution_layer(
data=data,
neighbors=neighbors,
sizes=None,
translation_invariant=translation_invariant,
num_weight_matrices=num_weight_matrices,
num_output_channels=out_channels,
var_name=name_scope)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
return output
output = _run_convolution()
output_shape = output.shape.as_list()
out_channels = in_channels if out_channels is None else out_channels
self.assertEqual(output_shape[-1], out_channels)
self.assertAllEqual(output_shape[:-1], data.shape[:-1])
def _get_var_shape(var_name):
"""Get the shape of a variable by name."""
if tf.executing_eagerly():
trainable_variables = layer.trainable_variables
for tv in trainable_variables:
if tv.name == name_scope + "/" + var_name + ":0":
return tv.shape.as_list()
raise ValueError("Variable not found.")
else:
with tf.compat.v1.variable_scope(name_scope, reuse=True):
variable = tf.compat.v1.get_variable(
var_name, initializer=tf.constant(0))
return variable.shape.as_list()
self.assertAllEqual(_get_var_shape("u"), [in_channels, num_weight_matrices])
self.assertAllEqual(_get_var_shape("c"), [num_weight_matrices])
self.assertAllEqual(_get_var_shape("b"), [out_channels])
self.assertAllEqual(
_get_var_shape("w"), [num_weight_matrices, in_channels, out_channels])
if not translation_invariant:
self.assertAllEqual(
_get_var_shape("v"), [in_channels, num_weight_matrices])
def test_feature_steered_convolution_layer_initializer(self):
"""Tests a custom variable initializer."""
data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)))
neighbors_indices = np.array(((0, 0), (0, 1), (0, 3),
(1, 0), (1, 1), (1, 2),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 2), (3, 3)))
neighbors = tf.SparseTensor(
neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4))
initializer = tf.compat.v1.keras.initializers.zeros()
if tf.executing_eagerly():
layer = gc_layer.FeatureSteeredConvolutionKerasLayer(
translation_invariant=False,
initializer=initializer)
output = layer(inputs=[data, neighbors], sizes=None)
else:
out = gc_layer.feature_steered_convolution_layer(
data=data,
neighbors=neighbors,
sizes=None,
translation_invariant=False,
initializer=initializer)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(out)
# All zeros initializer should result in all zeros output.
self.assertAllEqual(output, np.zeros_like(data))
def test_feature_steered_convolution_layer_training(self):
"""Test a simple training loop."""
# Generate a small valid input for a simple training task.
# Four corners of a square.
data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)))
neighbors_indices = np.array(((0, 0), (0, 1), (0, 3),
(1, 0), (1, 1), (1, 2),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 2), (3, 3)))
neighbors = tf.SparseTensor(
neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4))
# Desired output is arbitrary.
labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1))
num_training_iterations = 5
if tf.executing_eagerly():
with tf.GradientTape(persistent=True) as tape:
layer = gc_layer.FeatureSteeredConvolutionKerasLayer(
translation_invariant=False,
num_weight_matrices=1,
num_output_channels=1)
output = layer(inputs=[data, neighbors], sizes=None)
loss = tf.nn.l2_loss(output - labels)
trainable_variables = layer.trainable_variables
for _ in range(num_training_iterations):
grads = tape.gradient(loss, trainable_variables)
tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients(
zip(grads, trainable_variables))
else:
output = gc_layer.feature_steered_convolution_layer(
data=data,
neighbors=neighbors,
sizes=None,
translation_invariant=False,
num_weight_matrices=1,
num_output_channels=1)
train_op = tf.compat.v1.train.GradientDescentOptimizer(1e-4).minimize(
tf.nn.l2_loss(output - labels))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.initialize_all_variables())
for _ in range(num_training_iterations):
sess.run(train_op)
class GraphConvolutionTestDynamicGraphConvolutionKerasLayerTests(
test_case.TestCase):
@parameterized.parameters(
(1, 1, 1, 1, "weighted"),
(4, 2, 3, 12, "max"),
(1, 2, 3, 4, "max"),
)
def test_dynamic_graph_convolution_keras_layer_exception_not_raised_shapes(
self, batch_size, num_vertices, in_channels, out_channels, reduction):
"""Check if the convolution parameters and output have correct shapes."""
if not tf.executing_eagerly():
return
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction=reduction)
try:
output = layer(inputs=[data, neighbors], sizes=None)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
self.assertAllEqual((batch_size, num_vertices, out_channels), output.shape)
@parameterized.parameters(
(1, 1, 1, 1, "weighted"),
(4, 2, 3, 12, "max"),
(1, 2, 3, 4, "max"),
)
def test_dynamic_graph_convolution_keras_layer_zero_kernel(
self, batch_size, num_vertices, in_channels, out_channels, reduction):
"""Tests convolution with an all-zeros kernel."""
if not tf.executing_eagerly():
return
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
data = np.random.uniform(size=data.shape).astype(np.float32)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction=reduction,
use_bias=False,
kernel_initializer=tf.compat.v1.keras.initializers.zeros())
output = layer(inputs=[data, neighbors], sizes=None)
self.assertAllEqual(
output,
np.zeros(shape=(batch_size, num_vertices, out_channels),
dtype=np.float32))
@parameterized.parameters((1, 1, 1), (2, 3, 12), (2, 3, 4))
def test_dynamic_graph_convolution_keras_layer_duplicate_features(
self, num_vertices, in_channels, out_channels):
"""Tests convolution when all vertex features are identical."""
if not tf.executing_eagerly():
return
data = np.random.uniform(size=(1, in_channels))
data = np.tile(data, (num_vertices, 1))
# Results should be independent of 'neighbors'.
neighbors = np.maximum(np.random.randint(
0, 2, size=(num_vertices, num_vertices)), np.eye(num_vertices))
neighbors = _dense_to_sparse(neighbors)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction="max")
output = layer(inputs=[data, neighbors], sizes=None)
output_tile = tf.tile(output[:1, :], (num_vertices, 1))
self.assertAllEqual(output, output_tile)
@parameterized.parameters("weighted", "max")
def test_dynamic_graph_convolution_keras_layer_training(self, reduction):
"""Test a simple training loop."""
if not tf.executing_eagerly():
return
# Generate a small valid input for a simple training task.
# Four corners of a square.
data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)))
neighbors_indices = np.array(((0, 0), (0, 1), (0, 3),
(1, 0), (1, 1), (1, 2),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 2), (3, 3)))
neighbors = tf.SparseTensor(
neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4))
# Desired output is arbitrary.
labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1))
num_training_iterations = 5
with tf.GradientTape(persistent=True) as tape:
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=2,
reduction=reduction)
output = layer(inputs=[data, neighbors], sizes=None)
loss = tf.nn.l2_loss(output - labels)
trainable_variables = layer.trainable_variables
for _ in range(num_training_iterations):
grads = tape.gradient(loss, trainable_variables)
tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients(
zip(grads, trainable_variables))
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the graph convolution layers."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_graphics.nn.layer.graph_convolution as gc_layer
from tensorflow_graphics.util import test_case
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)
def _dummy_data(batch_size, num_vertices, num_channels):
"""Create inputs for feature_steered_convolution."""
if batch_size > 0:
data = np.zeros(
shape=(batch_size, num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(
np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1)))
else:
data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32))
return data, neighbors
class GraphConvolutionTestFeatureSteeredConvolutionLayerTests(
test_case.TestCase):
@parameterized.parameters(
(1, 1, 1, 1, 1, False),
(4, 2, 3, None, 5, False),
(1, 2, 3, 4, 5, True),
)
def test_feature_steered_convolution_layer_exception_not_raised_shapes(
self, batch_size, num_vertices, in_channels, out_channels,
num_weight_matrices, translation_invariant):
"""Check if the convolution parameters and output have correct shapes."""
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
name_scope = "test"
if tf.executing_eagerly():
layer = gc_layer.FeatureSteeredConvolutionKerasLayer(
translation_invariant=translation_invariant,
num_weight_matrices=num_weight_matrices,
num_output_channels=out_channels,
name=name_scope)
def _run_convolution():
"""Run the appropriate feature steered convolution layer."""
if tf.executing_eagerly():
try:
output = layer(inputs=[data, neighbors], sizes=None)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
else:
try:
output = gc_layer.feature_steered_convolution_layer(
data=data,
neighbors=neighbors,
sizes=None,
translation_invariant=translation_invariant,
num_weight_matrices=num_weight_matrices,
num_output_channels=out_channels,
var_name=name_scope)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
return output
output = _run_convolution()
output_shape = output.shape.as_list()
out_channels = in_channels if out_channels is None else out_channels
self.assertEqual(output_shape[-1], out_channels)
self.assertAllEqual(output_shape[:-1], data.shape[:-1])
def _get_var_shape(var_name):
"""Get the shape of a variable by name."""
if tf.executing_eagerly():
trainable_variables = layer.trainable_variables
for tv in trainable_variables:
if tv.name == name_scope + "/" + var_name + ":0":
return tv.shape.as_list()
raise ValueError("Variable not found.")
else:
with tf.compat.v1.variable_scope(name_scope, reuse=True):
variable = tf.compat.v1.get_variable(
var_name, initializer=tf.constant(0))
return variable.shape.as_list()
self.assertAllEqual(_get_var_shape("u"), [in_channels, num_weight_matrices])
self.assertAllEqual(_get_var_shape("c"), [num_weight_matrices])
self.assertAllEqual(_get_var_shape("b"), [out_channels])
self.assertAllEqual(
_get_var_shape("w"), [num_weight_matrices, in_channels, out_channels])
if not translation_invariant:
self.assertAllEqual(
_get_var_shape("v"), [in_channels, num_weight_matrices])
def test_feature_steered_convolution_layer_initializer(self):
"""Tests a custom variable initializer."""
data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)))
neighbors_indices = np.array(((0, 0), (0, 1), (0, 3),
(1, 0), (1, 1), (1, 2),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 2), (3, 3)))
neighbors = tf.SparseTensor(
neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4))
initializer = tf.compat.v1.keras.initializers.zeros()
if tf.executing_eagerly():
layer = gc_layer.FeatureSteeredConvolutionKerasLayer(
translation_invariant=False,
initializer=initializer)
output = layer(inputs=[data, neighbors], sizes=None)
else:
out = gc_layer.feature_steered_convolution_layer(
data=data,
neighbors=neighbors,
sizes=None,
translation_invariant=False,
initializer=initializer)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(out)
# All zeros initializer should result in all zeros output.
self.assertAllEqual(output, np.zeros_like(data))
def test_feature_steered_convolution_layer_training(self):
"""Test a simple training loop."""
# Generate a small valid input for a simple training task.
# Four corners of a square.
data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)))
neighbors_indices = np.array(((0, 0), (0, 1), (0, 3),
(1, 0), (1, 1), (1, 2),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 2), (3, 3)))
neighbors = tf.SparseTensor(
neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4))
# Desired output is arbitrary.
labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1))
num_training_iterations = 5
if tf.executing_eagerly():
with tf.GradientTape(persistent=True) as tape:
layer = gc_layer.FeatureSteeredConvolutionKerasLayer(
translation_invariant=False,
num_weight_matrices=1,
num_output_channels=1)
output = layer(inputs=[data, neighbors], sizes=None)
loss = tf.nn.l2_loss(output - labels)
trainable_variables = layer.trainable_variables
for _ in range(num_training_iterations):
grads = tape.gradient(loss, trainable_variables)
tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients(
zip(grads, trainable_variables))
else:
output = gc_layer.feature_steered_convolution_layer(
data=data,
neighbors=neighbors,
sizes=None,
translation_invariant=False,
num_weight_matrices=1,
num_output_channels=1)
train_op = tf.compat.v1.train.GradientDescentOptimizer(1e-4).minimize(
tf.nn.l2_loss(output - labels))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.initialize_all_variables())
for _ in range(num_training_iterations):
sess.run(train_op)
class GraphConvolutionTestDynamicGraphConvolutionKerasLayerTests(
test_case.TestCase):
@parameterized.parameters(
(1, 1, 1, 1, "weighted"),
(4, 2, 3, 12, "max"),
(1, 2, 3, 4, "max"),
)
def test_dynamic_graph_convolution_keras_layer_exception_not_raised_shapes(
self, batch_size, num_vertices, in_channels, out_channels, reduction):
"""Check if the convolution parameters and output have correct shapes."""
if not tf.executing_eagerly():
return
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction=reduction)
try:
output = layer(inputs=[data, neighbors], sizes=None)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
self.assertAllEqual((batch_size, num_vertices, out_channels), output.shape)
@parameterized.parameters(
(1, 1, 1, 1, "weighted"),
(4, 2, 3, 12, "max"),
(1, 2, 3, 4, "max"),
)
def test_dynamic_graph_convolution_keras_layer_zero_kernel(
self, batch_size, num_vertices, in_channels, out_channels, reduction):
"""Tests convolution with an all-zeros kernel."""
if not tf.executing_eagerly():
return
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
data = np.random.uniform(size=data.shape).astype(np.float32)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction=reduction,
use_bias=False,
kernel_initializer=tf.compat.v1.keras.initializers.zeros())
output = layer(inputs=[data, neighbors], sizes=None)
self.assertAllEqual(
output,
np.zeros(shape=(batch_size, num_vertices, out_channels),
dtype=np.float32))
@parameterized.parameters((1, 1, 1), (2, 3, 12), (2, 3, 4))
def test_dynamic_graph_convolution_keras_layer_duplicate_features(
self, num_vertices, in_channels, out_channels):
"""Tests convolution when all vertex features are identical."""
if not tf.executing_eagerly():
return
data = np.random.uniform(size=(1, in_channels))
data = np.tile(data, (num_vertices, 1))
# Results should be independent of 'neighbors'.
neighbors = np.maximum(np.random.randint(
0, 2, size=(num_vertices, num_vertices)), np.eye(num_vertices))
neighbors = _dense_to_sparse(neighbors)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction="max")
output = layer(inputs=[data, neighbors], sizes=None)
output_tile = tf.tile(output[:1, :], (num_vertices, 1))
self.assertAllEqual(output, output_tile)
@parameterized.parameters("weighted", "max")
def test_dynamic_graph_convolution_keras_layer_training(self, reduction):
"""Test a simple training loop."""
if not tf.executing_eagerly():
return
# Generate a small valid input for a simple training task.
# Four corners of a square.
data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)))
neighbors_indices = np.array(((0, 0), (0, 1), (0, 3),
(1, 0), (1, 1), (1, 2),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 2), (3, 3)))
neighbors = tf.SparseTensor(
neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4))
# Desired output is arbitrary.
labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1))
num_training_iterations = 5
with tf.GradientTape(persistent=True) as tape:
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=2,
reduction=reduction)
output = layer(inputs=[data, neighbors], sizes=None)
loss = tf.nn.l2_loss(output - labels)
trainable_variables = layer.trainable_variables
for _ in range(num_training_iterations):
grads = tape.gradient(loss, trainable_variables)
tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients(
zip(grads, trainable_variables))
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/math/interpolation/tests/slerp_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slerp."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.math.interpolation import slerp
from tensorflow_graphics.util import test_case
_SQRT2_DIV2 = np.sqrt(2.0).astype(np.float32) * 0.5
class SlerpTest(test_case.TestCase):
def _pick_random_quaternion(self):
"""Creates a random quaternion with random shape."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
return np.random.normal(size=tensor_shape + [4])
def _quaternion_slerp_helper(self, q1, q2, p):
"""Calls interpolate function for quaternions."""
return slerp.interpolate(q1, q2, p, slerp.InterpolationType.QUATERNION)
def _vector_slerp_helper(self, q1, q2, p):
"""Calls interpolate function for vectors."""
return slerp.interpolate(q1, q2, p, slerp.InterpolationType.VECTOR)
def test_interpolate_raises_exceptions(self):
"""Tests if unknown methods raise exceptions."""
vector1 = self._pick_random_quaternion()
self.assert_exception_is_raised(
slerp.interpolate,
error_msg="Unknown interpolation type supplied.",
shapes=[],
vector1=vector1,
vector2=-vector1,
percent=0.1,
method=2)
def test_interpolate_with_weights_quaternion_preset(self):
"""Compares interpolate to quaternion_weights + interpolate_with_weights."""
q1 = self._pick_random_quaternion()
q2 = q1 + tf.ones_like(q1)
q1 = tf.nn.l2_normalize(q1, axis=-1)
q2 = tf.nn.l2_normalize(q2, axis=-1)
weight1, weight2 = slerp.quaternion_weights(q1, q2, 0.25)
qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2)
qi = slerp.interpolate(
q1, q2, 0.25, method=slerp.InterpolationType.QUATERNION)
self.assertAllClose(qf, qi, atol=1e-9)
def test_interpolate_with_weights_vector_preset(self):
"""Compares interpolate to vector_weights + interpolate_with_weights."""
# Any quaternion is a valid vector
q1 = self._pick_random_quaternion()
q2 = q1 + tf.ones_like(q1)
weight1, weight2 = slerp.vector_weights(q1, q2, 0.75)
qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2)
qi = slerp.interpolate(q1, q2, 0.75, method=slerp.InterpolationType.VECTOR)
self.assertAllClose(qf, qi, atol=1e-9)
@parameterized.parameters(
# Orthogonal, same hemisphere
(((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)),
((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)),
(((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)),
# Same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)),
((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)),
# Same quaternions
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)),
((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)),
((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)),
# Anti-polar - large percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)),
((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)),
# Extrapolation - same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)),
((0.408248290463863, -0.408248290463863, 0.816496580927726, 0.0),)),
# Extrapolation - opposite hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)),
((-0.408248290463863, -0.408248290463863, -0.816496580927726, 0.0),)),
)
def test_quaternion_slerp_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of qslerp against numpy-quaternion values."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(self._quaternion_slerp_helper, test_inputs,
test_outputs, tile=False)
def test_unnormalized_quaternion_weights_exception_raised(self):
"""Tests if quaternion_weights raise exceptions for unnormalized input."""
q1 = self._pick_random_quaternion()
q2 = tf.nn.l2_normalize(q1, axis=-1)
p = tf.constant((0.5), dtype=q1.dtype)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(slerp.quaternion_weights(q1, q2, p))
@parameterized.parameters(
((4,), (4,), (1,)),
((None, 4), (None, 4), (None, 1)),
((None, 4), (None, 4), (None, 4)),
)
def test_quaternion_weights_exception_not_raised(self, *shapes):
"""Tests that valid input shapes do not raise exceptions for qslerp."""
self.assert_exception_is_not_raised(slerp.quaternion_weights, shapes)
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (3,), (4,), (1,)),
("must have exactly 4 dimensions in axis -1", (4,), (3,), (1,)),
("Not all batch dimensions are broadcast-compatible.", (2, 4), (3, 4),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 4), (3, 4),
(2,)),
)
def test_quaternion_weights_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised for qslerp."""
self.assert_exception_is_raised(slerp.quaternion_weights, error_msg, shapes)
@parameterized.parameters(
# Same quaternions
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), (
(0.25,),
(0.75,),
)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), (
(-0.8,),
(0.2,),
)),
# Anti-polar - large percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), (
(-0.2,),
(0.8,),
)),
)
def test_quaternion_weights_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of quaternion_weights for problem cases."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(slerp.quaternion_weights, test_inputs,
test_outputs, tile=False)
@parameterized.parameters(
((3,), (3,), (1,)),
((None, 4), (None, 4), (None, 1)),
)
def test_vector_weights_exception_not_raised(self, *shapes):
"""Tests that valid inputs do not raise exceptions for vector_weights."""
self.assert_exception_is_not_raised(slerp.vector_weights, shapes)
@parameterized.parameters(
("must have the same number of dimensions in axes", (None, 3), (None, 4),
(1,)),
("must have the same number of dimensions in axes", (2, 3), (2, 4), (1,)),
("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 3), (3, 3),
(2,)),
)
def test_vector_weights_exception_raised(self, error_msg, *shapes):
"""Tests that shape exceptions are properly raised for vector_weights."""
self.assert_exception_is_raised(slerp.vector_weights, error_msg, shapes)
@parameterized.parameters(
# Orthogonal, same hemisphere
(((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)),
((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)),
(((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)),
# Same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)),
((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)),
# Same vectors
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)),
((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)),
# Anti-polar - equal weights
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.5,)),
((0.0, 0.0, 0.0, 0.0),)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.25,)),
((0.5, 0.0, 0.5, 0.0),)),
# Extrapolation - same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-1.0,)),
((0.0, -_SQRT2_DIV2, _SQRT2_DIV2, 0.0),)),
# Extrapolation - opposite hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (1.5,)),
((-_SQRT2_DIV2, -0.0, -_SQRT2_DIV2, 0.0),)),
# Unnormalized vectors
(((4.0, 0.0), (0.0, 1.0), (0.5,)), ((2.82842712, _SQRT2_DIV2),)),
)
def test_vector_slerp_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of vector slerp results."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(self._vector_slerp_helper, test_inputs,
test_outputs, tile=False)
def test_vector_weights_reduce_to_lerp_preset(self):
"""Tests if vector slerp reduces to lerp for identical vectors as input."""
q1 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0))
q2 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0))
p = tf.constant((0.75,), dtype=q1.dtype)
w1, w2 = slerp.vector_weights(q1, q2, p)
self.assertAllClose(w1, (0.25,), rtol=1e-6)
self.assertAllClose(w2, (0.75,), rtol=1e-6)
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slerp."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.math.interpolation import slerp
from tensorflow_graphics.util import test_case
_SQRT2_DIV2 = np.sqrt(2.0).astype(np.float32) * 0.5
class SlerpTest(test_case.TestCase):
def _pick_random_quaternion(self):
"""Creates a random quaternion with random shape."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
return np.random.normal(size=tensor_shape + [4])
def _quaternion_slerp_helper(self, q1, q2, p):
"""Calls interpolate function for quaternions."""
return slerp.interpolate(q1, q2, p, slerp.InterpolationType.QUATERNION)
def _vector_slerp_helper(self, q1, q2, p):
"""Calls interpolate function for vectors."""
return slerp.interpolate(q1, q2, p, slerp.InterpolationType.VECTOR)
def test_interpolate_raises_exceptions(self):
"""Tests if unknown methods raise exceptions."""
vector1 = self._pick_random_quaternion()
self.assert_exception_is_raised(
slerp.interpolate,
error_msg="Unknown interpolation type supplied.",
shapes=[],
vector1=vector1,
vector2=-vector1,
percent=0.1,
method=2)
def test_interpolate_with_weights_quaternion_preset(self):
"""Compares interpolate to quaternion_weights + interpolate_with_weights."""
q1 = self._pick_random_quaternion()
q2 = q1 + tf.ones_like(q1)
q1 = tf.nn.l2_normalize(q1, axis=-1)
q2 = tf.nn.l2_normalize(q2, axis=-1)
weight1, weight2 = slerp.quaternion_weights(q1, q2, 0.25)
qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2)
qi = slerp.interpolate(
q1, q2, 0.25, method=slerp.InterpolationType.QUATERNION)
self.assertAllClose(qf, qi, atol=1e-9)
def test_interpolate_with_weights_vector_preset(self):
"""Compares interpolate to vector_weights + interpolate_with_weights."""
# Any quaternion is a valid vector
q1 = self._pick_random_quaternion()
q2 = q1 + tf.ones_like(q1)
weight1, weight2 = slerp.vector_weights(q1, q2, 0.75)
qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2)
qi = slerp.interpolate(q1, q2, 0.75, method=slerp.InterpolationType.VECTOR)
self.assertAllClose(qf, qi, atol=1e-9)
@parameterized.parameters(
# Orthogonal, same hemisphere
(((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)),
((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)),
(((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)),
# Same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)),
((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)),
# Same quaternions
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)),
((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)),
((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)),
# Anti-polar - large percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)),
((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)),
# Extrapolation - same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)),
((0.408248290463863, -0.408248290463863, 0.816496580927726, 0.0),)),
# Extrapolation - opposite hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)),
((-0.408248290463863, -0.408248290463863, -0.816496580927726, 0.0),)),
)
def test_quaternion_slerp_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of qslerp against numpy-quaternion values."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(self._quaternion_slerp_helper, test_inputs,
test_outputs, tile=False)
def test_unnormalized_quaternion_weights_exception_raised(self):
"""Tests if quaternion_weights raise exceptions for unnormalized input."""
q1 = self._pick_random_quaternion()
q2 = tf.nn.l2_normalize(q1, axis=-1)
p = tf.constant((0.5), dtype=q1.dtype)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(slerp.quaternion_weights(q1, q2, p))
@parameterized.parameters(
((4,), (4,), (1,)),
((None, 4), (None, 4), (None, 1)),
((None, 4), (None, 4), (None, 4)),
)
def test_quaternion_weights_exception_not_raised(self, *shapes):
"""Tests that valid input shapes do not raise exceptions for qslerp."""
self.assert_exception_is_not_raised(slerp.quaternion_weights, shapes)
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (3,), (4,), (1,)),
("must have exactly 4 dimensions in axis -1", (4,), (3,), (1,)),
("Not all batch dimensions are broadcast-compatible.", (2, 4), (3, 4),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 4), (3, 4),
(2,)),
)
def test_quaternion_weights_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised for qslerp."""
self.assert_exception_is_raised(slerp.quaternion_weights, error_msg, shapes)
@parameterized.parameters(
# Same quaternions
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), (
(0.25,),
(0.75,),
)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), (
(-0.8,),
(0.2,),
)),
# Anti-polar - large percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), (
(-0.2,),
(0.8,),
)),
)
def test_quaternion_weights_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of quaternion_weights for problem cases."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(slerp.quaternion_weights, test_inputs,
test_outputs, tile=False)
@parameterized.parameters(
((3,), (3,), (1,)),
((None, 4), (None, 4), (None, 1)),
)
def test_vector_weights_exception_not_raised(self, *shapes):
"""Tests that valid inputs do not raise exceptions for vector_weights."""
self.assert_exception_is_not_raised(slerp.vector_weights, shapes)
@parameterized.parameters(
("must have the same number of dimensions in axes", (None, 3), (None, 4),
(1,)),
("must have the same number of dimensions in axes", (2, 3), (2, 4), (1,)),
("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 3), (3, 3),
(2,)),
)
def test_vector_weights_exception_raised(self, error_msg, *shapes):
"""Tests that shape exceptions are properly raised for vector_weights."""
self.assert_exception_is_raised(slerp.vector_weights, error_msg, shapes)
@parameterized.parameters(
# Orthogonal, same hemisphere
(((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)),
((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)),
(((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)),
# Same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)),
((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)),
# Same vectors
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)),
((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)),
# Anti-polar - equal weights
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.5,)),
((0.0, 0.0, 0.0, 0.0),)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.25,)),
((0.5, 0.0, 0.5, 0.0),)),
# Extrapolation - same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-1.0,)),
((0.0, -_SQRT2_DIV2, _SQRT2_DIV2, 0.0),)),
# Extrapolation - opposite hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (1.5,)),
((-_SQRT2_DIV2, -0.0, -_SQRT2_DIV2, 0.0),)),
# Unnormalized vectors
(((4.0, 0.0), (0.0, 1.0), (0.5,)), ((2.82842712, _SQRT2_DIV2),)),
)
def test_vector_slerp_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of vector slerp results."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(self._vector_slerp_helper, test_inputs,
test_outputs, tile=False)
def test_vector_weights_reduce_to_lerp_preset(self):
"""Tests if vector slerp reduces to lerp for identical vectors as input."""
q1 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0))
q2 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0))
p = tf.constant((0.75,), dtype=q1.dtype)
w1, w2 = slerp.vector_weights(q1, q2, p)
self.assertAllClose(w1, (0.25,), rtol=1e-6)
self.assertAllClose(w2, (0.75,), rtol=1e-6)
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/features/camera_feature_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_graphics.datasets.features.camera_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.features import camera_feature
class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase):
"""Test Cases for Camera FeatureConnector."""
def __get_camera_params(self):
pose = {'R': np.eye(3).astype(np.float32),
't': np.zeros(3).astype(np.float32)}
f = 35.
optical_center = (640 / 2, 480 / 2)
return pose, f, optical_center
def test_simple_camera(self):
"""Tests camera parameters with fixed focal length, no skew and no aspect ratio."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]],
[0, expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f, 'optical_center': expected_center,
'pose': expected_pose}
lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'look_at': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'position': np.array([0, 0, 0], dtype=np.float32)
}
}
raising_pose_entry = {
'f': expected_f,
'optical_center': expected_center,
'pose': np.eye(4)
}
raising_pose_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {'rot': np.eye(3), 'trans': np.zeros(3)}
}
raising_lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'l': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'C': np.array([0, 0, 0], dtype=np.float32)
}
}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=lookat_inputs,
expected=expected_camera
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_lookat_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_entry,
raise_cls=ValueError,
raise_msg='Pose needs to be a dictionary'
),
],
)
def test_camera_with_aspect_ratio_and_skew(self):
"""Tests camera parameters with fixed focal length, aspect_ratio and skew."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_aspect_ratio = expected_center[0] / expected_center[1]
expected_skew = 0.6
expected_intrinsics = np.asarray(
[[expected_f, expected_skew, expected_center[0]],
[0, expected_aspect_ratio * expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_center,
'skew': expected_skew,
'aspect_ratio': expected_aspect_ratio,
'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
],
)
def test_full_camera_calibration_matrix(self):
"""Tests camera parameters with different focal length per camera axis and skew."""
expected_pose, _, expected_optical_center = self.__get_camera_params()
expected_skew = 0.6
expected_f = (35., 40.)
expected_intrinsics = np.array(
[[expected_f[0], expected_skew, expected_optical_center[0]],
[0, expected_f[1], expected_optical_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
raising_inputs = {'f': expected_f,
'aspect_ratio': 1.5,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=raising_inputs,
raise_cls=ValueError,
raise_msg='If aspect ratio is provided, f needs to '
'be a single float',
),
],
)
if __name__ == '__main__':
tfds.testing.test_main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_graphics.datasets.features.camera_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.features import camera_feature
class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase):
"""Test Cases for Camera FeatureConnector."""
def __get_camera_params(self):
pose = {'R': np.eye(3).astype(np.float32),
't': np.zeros(3).astype(np.float32)}
f = 35.
optical_center = (640 / 2, 480 / 2)
return pose, f, optical_center
def test_simple_camera(self):
"""Tests camera parameters with fixed focal length, no skew and no aspect ratio."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]],
[0, expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f, 'optical_center': expected_center,
'pose': expected_pose}
lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'look_at': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'position': np.array([0, 0, 0], dtype=np.float32)
}
}
raising_pose_entry = {
'f': expected_f,
'optical_center': expected_center,
'pose': np.eye(4)
}
raising_pose_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {'rot': np.eye(3), 'trans': np.zeros(3)}
}
raising_lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'l': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'C': np.array([0, 0, 0], dtype=np.float32)
}
}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=lookat_inputs,
expected=expected_camera
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_lookat_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_entry,
raise_cls=ValueError,
raise_msg='Pose needs to be a dictionary'
),
],
)
def test_camera_with_aspect_ratio_and_skew(self):
"""Tests camera parameters with fixed focal length, aspect_ratio and skew."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_aspect_ratio = expected_center[0] / expected_center[1]
expected_skew = 0.6
expected_intrinsics = np.asarray(
[[expected_f, expected_skew, expected_center[0]],
[0, expected_aspect_ratio * expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_center,
'skew': expected_skew,
'aspect_ratio': expected_aspect_ratio,
'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
],
)
def test_full_camera_calibration_matrix(self):
"""Tests camera parameters with different focal length per camera axis and skew."""
expected_pose, _, expected_optical_center = self.__get_camera_params()
expected_skew = 0.6
expected_f = (35., 40.)
expected_intrinsics = np.array(
[[expected_f[0], expected_skew, expected_optical_center[0]],
[0, expected_f[1], expected_optical_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
raising_inputs = {'f': expected_f,
'aspect_ratio': 1.5,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=raising_inputs,
raise_cls=ValueError,
raise_msg='If aspect ratio is provided, f needs to '
'be a single float',
),
],
)
if __name__ == '__main__':
tfds.testing.test_main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/io/triangle_mesh.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A thin wrapper around the trimesh library for loading triangle meshes."""
import os
import tensorflow as tf
import trimesh
from trimesh import Scene
from trimesh import Trimesh
# TODO(b/156115314): Revisit the library for loading the triangle meshes.
class GFileResolver(trimesh.visual.resolvers.Resolver):
"""A resolver using gfile for accessing other assets in the mesh directory."""
def __init__(self, path):
if tf.io.gfile.isdir(path):
self.directory = path
elif tf.io.gfile.exists(path):
self.directory = os.path.dirname(path)
else:
raise ValueError('path is not a file or directory')
def get(self, name):
with tf.io.gfile.GFile(os.path.join(self.directory, name), 'rb') as f:
data = f.read()
return data
def load(file_obj, file_type=None, **kwargs):
"""Loads a triangle mesh from the given GFile/file path.
Args:
file_obj: A tf.io.gfile.GFile object or a string specifying the mesh file
path.
file_type: A string specifying the type of the file (e.g. 'obj', 'stl'). If
not specified the file_type will be inferred from the file name.
**kwargs: Additional arguments that should be passed to trimesh.load().
Returns:
A trimesh.Trimesh or trimesh.Scene.
"""
if isinstance(file_obj, str):
with tf.io.gfile.GFile(file_obj, 'r') as f:
if file_type is None:
file_type = trimesh.util.split_extension(file_obj)
return trimesh.load(
file_obj=f,
file_type=file_type,
resolver=GFileResolver(file_obj),
**kwargs)
if trimesh.util.is_file(file_obj):
if not hasattr(file_obj, 'name') or not file_obj.name:
raise ValueError(
'file_obj must have attribute "name". Try passing the file name instead.'
)
if file_type is None:
file_type = trimesh.util.split_extension(file_obj.name)
return trimesh.load(
file_obj=file_obj,
file_type=file_type,
resolver=GFileResolver(file_obj.name),
**kwargs)
raise ValueError('file_obj should be either a file object or a string')
__all__ = ['load', 'Trimesh', 'Scene']
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A thin wrapper around the trimesh library for loading triangle meshes."""
import os
import tensorflow as tf
import trimesh
from trimesh import Scene
from trimesh import Trimesh
# TODO(b/156115314): Revisit the library for loading the triangle meshes.
class GFileResolver(trimesh.visual.resolvers.Resolver):
"""A resolver using gfile for accessing other assets in the mesh directory."""
def __init__(self, path):
if tf.io.gfile.isdir(path):
self.directory = path
elif tf.io.gfile.exists(path):
self.directory = os.path.dirname(path)
else:
raise ValueError('path is not a file or directory')
def get(self, name):
with tf.io.gfile.GFile(os.path.join(self.directory, name), 'rb') as f:
data = f.read()
return data
def load(file_obj, file_type=None, **kwargs):
"""Loads a triangle mesh from the given GFile/file path.
Args:
file_obj: A tf.io.gfile.GFile object or a string specifying the mesh file
path.
file_type: A string specifying the type of the file (e.g. 'obj', 'stl'). If
not specified the file_type will be inferred from the file name.
**kwargs: Additional arguments that should be passed to trimesh.load().
Returns:
A trimesh.Trimesh or trimesh.Scene.
"""
if isinstance(file_obj, str):
with tf.io.gfile.GFile(file_obj, 'r') as f:
if file_type is None:
file_type = trimesh.util.split_extension(file_obj)
return trimesh.load(
file_obj=f,
file_type=file_type,
resolver=GFileResolver(file_obj),
**kwargs)
if trimesh.util.is_file(file_obj):
if not hasattr(file_obj, 'name') or not file_obj.name:
raise ValueError(
'file_obj must have attribute "name". Try passing the file name instead.'
)
if file_type is None:
file_type = trimesh.util.split_extension(file_obj.name)
return trimesh.load(
file_obj=file_obj,
file_type=file_type,
resolver=GFileResolver(file_obj.name),
**kwargs)
raise ValueError('file_obj should be either a file object or a string')
__all__ = ['load', 'Trimesh', 'Scene']
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/shapenet/shapenet.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Shapenet Core dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import textwrap
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from tensorflow_datasets import features as tfds_features
from tensorflow_graphics.datasets import features as tfg_features
_CITATION = """
@techreport{shapenet2015,
title = {{ShapeNet: An Information-Rich 3D Model Repository}},
author = {Chang, Angel X. and Funkhouser, Thomas and Guibas, Leonidas and Hanrahan, Pat and Huang, Qixing and Li, Zimo and Savarese, Silvio and Savva, Manolis and Song, Shuran and Su, Hao and Xiao, Jianxiong and Yi, Li and Yu, Fisher},
number = {arXiv:1512.03012 [cs.GR]},
institution = {Stanford University --- Princeton University --- Toyota Technological Institute at Chicago},
year = {2015}
}
"""
_DESCRIPTION = """
ShapeNetCore is a densely annotated subset of ShapeNet covering 55 common object
categories with ~51,300 unique 3D models. Each model in ShapeNetCore is linked
to an appropriate synset in WordNet (version 3.0).
The synsets will be extracted from the taxonomy.json file in the ShapeNetCore.v2.zip
archive and the splits from http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv
"""
_TAXONOMY_FILE_NAME = 'taxonomy.json'
_SPLIT_FILE_URL = \
'http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv'
class ShapenetConfig(tfds.core.BuilderConfig):
"""Base class for Shapenet BuilderConfigs.
The Shapenet database builder delegates the implementation of info,
split_generators and generate_examples to the specified ShapenetConfig. This
is done to allow multiple versions of the dataset.
"""
def info(self, dataset_builder):
"""Delegated Shapenet._info."""
raise NotImplementedError('Abstract method')
def split_generators(self, dl_manager, dataset_builder):
"""Delegated Shapenet._split_generators."""
raise NotImplementedError('Abstract method')
def generate_examples(self, **kwargs):
"""Delegated Shapenet._generate_examples."""
raise NotImplementedError('Abstract method')
class MeshConfig(ShapenetConfig):
"""A Shapenet config for loading the original .obj files."""
_MODEL_SUBPATH = os.path.join('models', 'model_normalized.obj')
def __init__(self, model_subpath=_MODEL_SUBPATH):
super(MeshConfig, self).__init__(
name='shapenet_trimesh',
description=_DESCRIPTION,
version=tfds.core.Version('1.0.0'))
self.model_subpath = model_subpath
def info(self, dataset_builder):
return tfds.core.DatasetInfo(
builder=dataset_builder,
description=_DESCRIPTION,
features=tfds_features.FeaturesDict({
'trimesh': tfg_features.TriangleMesh(),
'label': tfds_features.ClassLabel(num_classes=353),
'model_id': tfds_features.Text(),
}),
supervised_keys=('trimesh', 'label'),
# Homepage of the dataset for documentation
homepage='https://shapenet.org/',
citation=_CITATION,
)
def split_generators(self, dl_manager, dataset_builder):
# Extract the synset ids from the taxonomy file and update the ClassLabel
# feature.
with tf.io.gfile.GFile(
os.path.join(dl_manager.manual_dir,
_TAXONOMY_FILE_NAME)) as taxonomy_file:
labels = [x['synsetId'] for x in json.loads(taxonomy_file.read())]
# Remove duplicate labels (the json file contains two identical entries
# for synset '04591713').
labels = list(collections.OrderedDict.fromkeys(labels))
dataset_builder.info.features['label'].names = labels
split_file = dl_manager.download(_SPLIT_FILE_URL)
fieldnames = ['id', 'synset', 'sub_synset', 'model_id', 'split']
model_items = collections.defaultdict(list)
with tf.io.gfile.GFile(split_file) as csvfile:
for row in csv.DictReader(csvfile, fieldnames):
model_items[row['split']].append(row)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'base_dir': dl_manager.manual_dir,
'models': model_items['train']
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'base_dir': dl_manager.manual_dir,
'models': model_items['test']
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'base_dir': dl_manager.manual_dir,
'models': model_items['val']
},
),
]
def generate_examples(self, base_dir, models):
"""Yields examples.
The structure of the examples:
{
'trimesh': tensorflow_graphics.datasets.features.TriangleMesh
'label': tensorflow_datasets.features.ClassLabel
'model_id': tensorflow_datasets.features.Text
}
Args:
base_dir: The base directory of shapenet.
models: The list of models in the split.
"""
for model in models:
synset = model['synset']
model_id = model['model_id']
model_filepath = os.path.join(base_dir, synset, model_id,
self.model_subpath)
# If the model doesn't exist, skip it.
if not tf.io.gfile.exists(model_filepath):
continue
yield model_id, {
'trimesh': model_filepath,
'label': synset,
'model_id': model_id,
}
class Shapenet(tfds.core.GeneratorBasedBuilder):
"""ShapeNetCore V2.
Example usage of the dataset:
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.shapenet import Shapenet
data_set = Shapenet.load(
split='train',
download_and_prepare_kwargs={
'download_config':
tfds.download.DownloadConfig(manual_dir='~/shapenet_base')
})
for example in data_set.take(1):
trimesh, label, model_id = example['trimesh'], example['label'],
example['model_id']
"""
BUILDER_CONFIGS = [MeshConfig()]
VERSION = tfds.core.Version('1.0.0')
@staticmethod
def load(*args, **kwargs):
return tfds.load('shapenet', *args, **kwargs) # pytype: disable=wrong-arg-count
MANUAL_DOWNLOAD_INSTRUCTIONS = textwrap.dedent("""\
manual_dir should contain the extracted ShapeNetCore.v2.zip archive.
You need to register on https://shapenet.org/download/shapenetcore in order
to get the link to download the dataset.
""")
def _info(self):
return self.builder_config.info(self)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return self.builder_config.split_generators(dl_manager, self)
def _generate_examples(self, **kwargs):
"""Yields examples."""
return self.builder_config.generate_examples(**kwargs)
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Shapenet Core dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import textwrap
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from tensorflow_datasets import features as tfds_features
from tensorflow_graphics.datasets import features as tfg_features
_CITATION = """
@techreport{shapenet2015,
title = {{ShapeNet: An Information-Rich 3D Model Repository}},
author = {Chang, Angel X. and Funkhouser, Thomas and Guibas, Leonidas and Hanrahan, Pat and Huang, Qixing and Li, Zimo and Savarese, Silvio and Savva, Manolis and Song, Shuran and Su, Hao and Xiao, Jianxiong and Yi, Li and Yu, Fisher},
number = {arXiv:1512.03012 [cs.GR]},
institution = {Stanford University --- Princeton University --- Toyota Technological Institute at Chicago},
year = {2015}
}
"""
_DESCRIPTION = """
ShapeNetCore is a densely annotated subset of ShapeNet covering 55 common object
categories with ~51,300 unique 3D models. Each model in ShapeNetCore is linked
to an appropriate synset in WordNet (version 3.0).
The synsets will be extracted from the taxonomy.json file in the ShapeNetCore.v2.zip
archive and the splits from http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv
"""
_TAXONOMY_FILE_NAME = 'taxonomy.json'
_SPLIT_FILE_URL = \
'http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv'
class ShapenetConfig(tfds.core.BuilderConfig):
"""Base class for Shapenet BuilderConfigs.
The Shapenet database builder delegates the implementation of info,
split_generators and generate_examples to the specified ShapenetConfig. This
is done to allow multiple versions of the dataset.
"""
def info(self, dataset_builder):
"""Delegated Shapenet._info."""
raise NotImplementedError('Abstract method')
def split_generators(self, dl_manager, dataset_builder):
"""Delegated Shapenet._split_generators."""
raise NotImplementedError('Abstract method')
def generate_examples(self, **kwargs):
"""Delegated Shapenet._generate_examples."""
raise NotImplementedError('Abstract method')
class MeshConfig(ShapenetConfig):
"""A Shapenet config for loading the original .obj files."""
_MODEL_SUBPATH = os.path.join('models', 'model_normalized.obj')
def __init__(self, model_subpath=_MODEL_SUBPATH):
super(MeshConfig, self).__init__(
name='shapenet_trimesh',
description=_DESCRIPTION,
version=tfds.core.Version('1.0.0'))
self.model_subpath = model_subpath
def info(self, dataset_builder):
return tfds.core.DatasetInfo(
builder=dataset_builder,
description=_DESCRIPTION,
features=tfds_features.FeaturesDict({
'trimesh': tfg_features.TriangleMesh(),
'label': tfds_features.ClassLabel(num_classes=353),
'model_id': tfds_features.Text(),
}),
supervised_keys=('trimesh', 'label'),
# Homepage of the dataset for documentation
homepage='https://shapenet.org/',
citation=_CITATION,
)
def split_generators(self, dl_manager, dataset_builder):
# Extract the synset ids from the taxonomy file and update the ClassLabel
# feature.
with tf.io.gfile.GFile(
os.path.join(dl_manager.manual_dir,
_TAXONOMY_FILE_NAME)) as taxonomy_file:
labels = [x['synsetId'] for x in json.loads(taxonomy_file.read())]
# Remove duplicate labels (the json file contains two identical entries
# for synset '04591713').
labels = list(collections.OrderedDict.fromkeys(labels))
dataset_builder.info.features['label'].names = labels
split_file = dl_manager.download(_SPLIT_FILE_URL)
fieldnames = ['id', 'synset', 'sub_synset', 'model_id', 'split']
model_items = collections.defaultdict(list)
with tf.io.gfile.GFile(split_file) as csvfile:
for row in csv.DictReader(csvfile, fieldnames):
model_items[row['split']].append(row)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'base_dir': dl_manager.manual_dir,
'models': model_items['train']
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'base_dir': dl_manager.manual_dir,
'models': model_items['test']
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'base_dir': dl_manager.manual_dir,
'models': model_items['val']
},
),
]
def generate_examples(self, base_dir, models):
"""Yields examples.
The structure of the examples:
{
'trimesh': tensorflow_graphics.datasets.features.TriangleMesh
'label': tensorflow_datasets.features.ClassLabel
'model_id': tensorflow_datasets.features.Text
}
Args:
base_dir: The base directory of shapenet.
models: The list of models in the split.
"""
for model in models:
synset = model['synset']
model_id = model['model_id']
model_filepath = os.path.join(base_dir, synset, model_id,
self.model_subpath)
# If the model doesn't exist, skip it.
if not tf.io.gfile.exists(model_filepath):
continue
yield model_id, {
'trimesh': model_filepath,
'label': synset,
'model_id': model_id,
}
class Shapenet(tfds.core.GeneratorBasedBuilder):
"""ShapeNetCore V2.
Example usage of the dataset:
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.shapenet import Shapenet
data_set = Shapenet.load(
split='train',
download_and_prepare_kwargs={
'download_config':
tfds.download.DownloadConfig(manual_dir='~/shapenet_base')
})
for example in data_set.take(1):
trimesh, label, model_id = example['trimesh'], example['label'],
example['model_id']
"""
BUILDER_CONFIGS = [MeshConfig()]
VERSION = tfds.core.Version('1.0.0')
@staticmethod
def load(*args, **kwargs):
return tfds.load('shapenet', *args, **kwargs) # pytype: disable=wrong-arg-count
MANUAL_DOWNLOAD_INSTRUCTIONS = textwrap.dedent("""\
manual_dir should contain the extracted ShapeNetCore.v2.zip archive.
You need to register on https://shapenet.org/download/shapenetcore in order
to get the link to download the dataset.
""")
def _info(self):
return self.builder_config.info(self)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return self.builder_config.split_generators(dl_manager, self)
def _generate_examples(self, **kwargs):
"""Yields examples."""
return self.builder_config.generate_examples(**kwargs)
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/light/point_light.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the rendering equation for a point light."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def estimate_radiance(point_light_radiance,
point_light_position,
surface_point_position,
surface_point_normal,
observation_point,
brdf,
name=None,
reflected_light_fall_off=False):
"""Estimates the spectral radiance of a point light reflected from the surface point towards the observation point.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
B1 to Bm are optional batch dimensions for the lights, which must be
broadcast compatible.
Note:
In case the light or the observation point are located behind the surface
the function will return 0.
Note:
The gradient of this function is not smooth when the dot product of the
normal with the light-to-surface or surface-to-observation vectors is 0.
Args:
point_light_radiance: A tensor of shape '[B1, ..., Bm, K]', where the last
axis represents the radiance of the point light at a specific wave length.
point_light_position: A tensor of shape `[B1, ..., Bm, 3]`, where the last
axis represents the position of the point light.
surface_point_position: A tensor of shape `[A1, ..., An, 3]`, where the last
axis represents the position of the surface point.
surface_point_normal: A tensor of shape `[A1, ..., An, 3]`, where the last
axis represents the normalized surface normal at the given surface point.
observation_point: A tensor of shape `[A1, ..., An, 3]`, where the last axis
represents the observation point.
brdf: The BRDF of the surface as a function of:
incoming_light_direction - The incoming light direction as the last axis
of a tensor with shape `[A1, ..., An, 3]`.
outgoing_light_direction - The outgoing light direction as the last axis
of a tensor with shape `[A1, ..., An, 3]`.
surface_point_normal - The surface normal as the last axis of a tensor
with shape `[A1, ..., An, 3]`.
Note - The BRDF should return a tensor of size '[A1, ..., An, K]' where
the last axis represents the amount of reflected light in each wave
length.
name: A name for this op. Defaults to "estimate_radiance".
reflected_light_fall_off: A boolean specifying whether or not to include the
fall off of the light reflected from the surface towards the observation
point in the calculation. Defaults to False.
Returns:
A tensor of shape `[A1, ..., An, B1, ..., Bm, K]`, where the last
axis represents the amount of light received at the observation point
after being reflected from the given surface point.
Raises:
ValueError: if the shape of `point_light_position`,
`surface_point_position`, `surface_point_normal`, or `observation_point` is
not supported.
InvalidArgumentError: if 'surface_point_normal' is not normalized.
"""
with tf.compat.v1.name_scope(name, "estimate_radiance", [
point_light_radiance, point_light_position, surface_point_position,
surface_point_normal, observation_point, brdf
]):
point_light_radiance = tf.convert_to_tensor(value=point_light_radiance)
point_light_position = tf.convert_to_tensor(value=point_light_position)
surface_point_position = tf.convert_to_tensor(value=surface_point_position)
surface_point_normal = tf.convert_to_tensor(value=surface_point_normal)
observation_point = tf.convert_to_tensor(value=observation_point)
shape.check_static(
tensor=point_light_position,
tensor_name="point_light_position",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=surface_point_position,
tensor_name="surface_point_position",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=surface_point_normal,
tensor_name="surface_point_normal",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=observation_point,
tensor_name="observation_point",
has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(surface_point_position, surface_point_normal,
observation_point),
tensor_names=("surface_point_position", "surface_point_normal",
"observation_point"),
last_axes=-2,
broadcast_compatible=True)
shape.compare_batch_dimensions(
tensors=(point_light_radiance, point_light_position),
tensor_names=("point_light_radiance", "point_light_position"),
last_axes=-2,
broadcast_compatible=True)
surface_point_normal = asserts.assert_normalized(surface_point_normal)
# Get the number of lights dimensions (B1,...,Bm).
lights_num_dimensions = max(
len(point_light_radiance.shape), len(point_light_position.shape)) - 1
# Reshape the other parameters so they can be broadcasted to the output of
# shape [A1,...,An, B1,...,Bm, K].
surface_point_position = tf.reshape(
surface_point_position,
surface_point_position.shape[:-1] + (1,) * lights_num_dimensions + (3,))
surface_point_normal = tf.reshape(
surface_point_normal,
surface_point_normal.shape[:-1] + (1,) * lights_num_dimensions + (3,))
observation_point = tf.reshape(
observation_point,
observation_point.shape[:-1] + (1,) * lights_num_dimensions + (3,))
light_to_surface_point = surface_point_position - point_light_position
distance_light_surface_point = tf.norm(
tensor=light_to_surface_point, axis=-1, keepdims=True)
incoming_light_direction = tf.math.l2_normalize(
light_to_surface_point, axis=-1)
surface_to_observation_point = observation_point - surface_point_position
outgoing_light_direction = tf.math.l2_normalize(
surface_to_observation_point, axis=-1)
brdf_value = brdf(incoming_light_direction, outgoing_light_direction,
surface_point_normal)
incoming_light_dot_surface_normal = vector.dot(-incoming_light_direction,
surface_point_normal)
outgoing_light_dot_surface_normal = vector.dot(outgoing_light_direction,
surface_point_normal)
estimated_radiance = (point_light_radiance * \
brdf_value * incoming_light_dot_surface_normal) / \
(4. * math.pi * tf.math.square(distance_light_surface_point))
if reflected_light_fall_off:
distance_surface_observation_point = tf.norm(
tensor=surface_to_observation_point, axis=-1, keepdims=True)
estimated_radiance = estimated_radiance / \
tf.math.square(distance_surface_observation_point)
# Create a condition for checking whether the light or observation point are
# behind the surface.
min_dot = tf.minimum(incoming_light_dot_surface_normal,
outgoing_light_dot_surface_normal)
common_shape = shape.get_broadcasted_shape(min_dot.shape,
estimated_radiance.shape)
d_val = lambda dim: 1 if dim is None else tf.compat.v1.dimension_value(dim)
common_shape = [d_val(dim) for dim in common_shape]
condition = tf.broadcast_to(tf.greater_equal(min_dot, 0.0), common_shape)
return tf.compat.v1.where(condition, estimated_radiance,
tf.zeros_like(estimated_radiance))
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the rendering equation for a point light."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def estimate_radiance(point_light_radiance,
point_light_position,
surface_point_position,
surface_point_normal,
observation_point,
brdf,
name=None,
reflected_light_fall_off=False):
"""Estimates the spectral radiance of a point light reflected from the surface point towards the observation point.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
B1 to Bm are optional batch dimensions for the lights, which must be
broadcast compatible.
Note:
In case the light or the observation point are located behind the surface
the function will return 0.
Note:
The gradient of this function is not smooth when the dot product of the
normal with the light-to-surface or surface-to-observation vectors is 0.
Args:
point_light_radiance: A tensor of shape '[B1, ..., Bm, K]', where the last
axis represents the radiance of the point light at a specific wave length.
point_light_position: A tensor of shape `[B1, ..., Bm, 3]`, where the last
axis represents the position of the point light.
surface_point_position: A tensor of shape `[A1, ..., An, 3]`, where the last
axis represents the position of the surface point.
surface_point_normal: A tensor of shape `[A1, ..., An, 3]`, where the last
axis represents the normalized surface normal at the given surface point.
observation_point: A tensor of shape `[A1, ..., An, 3]`, where the last axis
represents the observation point.
brdf: The BRDF of the surface as a function of:
incoming_light_direction - The incoming light direction as the last axis
of a tensor with shape `[A1, ..., An, 3]`.
outgoing_light_direction - The outgoing light direction as the last axis
of a tensor with shape `[A1, ..., An, 3]`.
surface_point_normal - The surface normal as the last axis of a tensor
with shape `[A1, ..., An, 3]`.
Note - The BRDF should return a tensor of size '[A1, ..., An, K]' where
the last axis represents the amount of reflected light in each wave
length.
name: A name for this op. Defaults to "estimate_radiance".
reflected_light_fall_off: A boolean specifying whether or not to include the
fall off of the light reflected from the surface towards the observation
point in the calculation. Defaults to False.
Returns:
A tensor of shape `[A1, ..., An, B1, ..., Bm, K]`, where the last
axis represents the amount of light received at the observation point
after being reflected from the given surface point.
Raises:
ValueError: if the shape of `point_light_position`,
`surface_point_position`, `surface_point_normal`, or `observation_point` is
not supported.
InvalidArgumentError: if 'surface_point_normal' is not normalized.
"""
with tf.compat.v1.name_scope(name, "estimate_radiance", [
point_light_radiance, point_light_position, surface_point_position,
surface_point_normal, observation_point, brdf
]):
point_light_radiance = tf.convert_to_tensor(value=point_light_radiance)
point_light_position = tf.convert_to_tensor(value=point_light_position)
surface_point_position = tf.convert_to_tensor(value=surface_point_position)
surface_point_normal = tf.convert_to_tensor(value=surface_point_normal)
observation_point = tf.convert_to_tensor(value=observation_point)
shape.check_static(
tensor=point_light_position,
tensor_name="point_light_position",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=surface_point_position,
tensor_name="surface_point_position",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=surface_point_normal,
tensor_name="surface_point_normal",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=observation_point,
tensor_name="observation_point",
has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(surface_point_position, surface_point_normal,
observation_point),
tensor_names=("surface_point_position", "surface_point_normal",
"observation_point"),
last_axes=-2,
broadcast_compatible=True)
shape.compare_batch_dimensions(
tensors=(point_light_radiance, point_light_position),
tensor_names=("point_light_radiance", "point_light_position"),
last_axes=-2,
broadcast_compatible=True)
surface_point_normal = asserts.assert_normalized(surface_point_normal)
# Get the number of lights dimensions (B1,...,Bm).
lights_num_dimensions = max(
len(point_light_radiance.shape), len(point_light_position.shape)) - 1
# Reshape the other parameters so they can be broadcasted to the output of
# shape [A1,...,An, B1,...,Bm, K].
surface_point_position = tf.reshape(
surface_point_position,
surface_point_position.shape[:-1] + (1,) * lights_num_dimensions + (3,))
surface_point_normal = tf.reshape(
surface_point_normal,
surface_point_normal.shape[:-1] + (1,) * lights_num_dimensions + (3,))
observation_point = tf.reshape(
observation_point,
observation_point.shape[:-1] + (1,) * lights_num_dimensions + (3,))
light_to_surface_point = surface_point_position - point_light_position
distance_light_surface_point = tf.norm(
tensor=light_to_surface_point, axis=-1, keepdims=True)
incoming_light_direction = tf.math.l2_normalize(
light_to_surface_point, axis=-1)
surface_to_observation_point = observation_point - surface_point_position
outgoing_light_direction = tf.math.l2_normalize(
surface_to_observation_point, axis=-1)
brdf_value = brdf(incoming_light_direction, outgoing_light_direction,
surface_point_normal)
incoming_light_dot_surface_normal = vector.dot(-incoming_light_direction,
surface_point_normal)
outgoing_light_dot_surface_normal = vector.dot(outgoing_light_direction,
surface_point_normal)
estimated_radiance = (point_light_radiance * \
brdf_value * incoming_light_dot_surface_normal) / \
(4. * math.pi * tf.math.square(distance_light_surface_point))
if reflected_light_fall_off:
distance_surface_observation_point = tf.norm(
tensor=surface_to_observation_point, axis=-1, keepdims=True)
estimated_radiance = estimated_radiance / \
tf.math.square(distance_surface_observation_point)
# Create a condition for checking whether the light or observation point are
# behind the surface.
min_dot = tf.minimum(incoming_light_dot_surface_normal,
outgoing_light_dot_surface_normal)
common_shape = shape.get_broadcasted_shape(min_dot.shape,
estimated_radiance.shape)
d_val = lambda dim: 1 if dim is None else tf.compat.v1.dimension_value(dim)
common_shape = [d_val(dim) for dim in common_shape]
condition = tf.broadcast_to(tf.greater_equal(min_dot, 0.0), common_shape)
return tf.compat.v1.where(condition, estimated_radiance,
tf.zeros_like(estimated_radiance))
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/representation/mesh/tests/mesh_test_utils.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines for mesh unit tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def create_single_triangle_mesh():
r"""Creates a single-triangle mesh, in the z=0 plane and facing +z.
(0,1) 2
|\
| \
| \
(0,0) 0---1 (1,0)
Returns:
vertices: A [3, 3] float array
faces: A [1, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32)
faces = np.array(((0, 1, 2),), dtype=np.int32)
return vertices, faces
def create_square_triangle_mesh():
r"""Creates a square mesh, in the z=0 planse and facing +z.
# (0,1) 2---3 (1,1)
# |\ /|
# | 4 |
# |/ \|
# (0,0) 0---1 (1,0)
Returns:
vertices: A [5, 3] float array
faces: A [4, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),
dtype=np.float32)
faces = np.array(
((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32)
return vertices, faces
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines for mesh unit tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def create_single_triangle_mesh():
r"""Creates a single-triangle mesh, in the z=0 plane and facing +z.
(0,1) 2
|\
| \
| \
(0,0) 0---1 (1,0)
Returns:
vertices: A [3, 3] float array
faces: A [1, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32)
faces = np.array(((0, 1, 2),), dtype=np.int32)
return vertices, faces
def create_square_triangle_mesh():
r"""Creates a square mesh, in the z=0 planse and facing +z.
# (0,1) 2---3 (1,1)
# |\ /|
# | 4 |
# |/ \|
# (0,0) 0---1 (1,0)
Returns:
vertices: A [5, 3] float array
faces: A [4, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),
dtype=np.float32)
faces = np.array(
((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32)
return vertices, faces
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/features/camera_feature.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Camera feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_datasets import features
from tensorflow_graphics.datasets.features import pose_feature
class Camera(features.FeaturesDict):
"""`FeatureConnector` for camera calibration (extrinsic and intrinsic).
During `_generate_examples`, the feature connector accepts as input:
* `parameter_dict:` A dictionary containing the extrinsic and instrinsic
parameters of the camera as:
- 'pose': Dictionary containing
* Either 3x3 rotation matrix and translation vector:
{
'R': A `float32` tensor with shape `[3, 3]` denoting the
3D rotation matrix.
't': A `float32` tensor with shape `[3,]` denoting the
translation vector.
}
OR
* look_at, position and up-vector:
{
'look_at': float32 vector of shape (3,).
'position': float32 vector of shape (3,).
'up': float32 vector of shape (3,).
}
- 'f': focal length of the camera in pixel (either single float32 value
or tuple of float32 as (f_x, f_y).
- 'optical_center': Optical center of the camera
in pixel coordinates as tuple (c_x, c_y)
Optional parameters:
- 'skew': float32 denoting the skew of the camera axes.
- 'aspect_ratio': float32 denoting the aspect_ratio,
if single fixed focal length is provided.
Output:
A dictionary containing:
* 'pose': A `tensorflow_graphics.datasets.features.Pose` FeatureConnector
representing the 3D pose of the camera.
* 'intrinsics': A `float32` tensor with shape `[3,3]` denoting the intrinsic
matrix.
Example:
Default values for skew (s) and aspect_ratio(a) are 0 and 1, respectively.
Full calibration matrix:
K = [[ f_x, s, c_x ],
[ 0, f_y, c_y ],
[ 0, 0, 1 ]]
With same focal length:
K = [[ f, s, c_x ],
[ 0, af, c_y ],
[ 0, 0, 1 ]]
"""
def __init__(self):
super(Camera, self).__init__({
'pose': pose_feature.Pose(),
'intrinsics': features.Tensor(shape=(3, 3), dtype=tf.float32),
})
def encode_example(self, example_dict):
"""Convert the given parameters into a dict convertible to tf example."""
REQUIRED_KEYS = ['pose', 'f', 'optical_center'] # pylint: disable=invalid-name
if not all(key in example_dict for key in REQUIRED_KEYS):
raise ValueError(f'Missing keys in provided dictionary! '
f'Expected {REQUIRED_KEYS}, '
f'but {example_dict.keys()} were given.')
if not isinstance(example_dict['pose'], dict):
raise ValueError('Pose needs to be a dictionary containing either '
'rotation and translation or look at, '
'up vector and position.')
features_dict = {}
pose_dict = example_dict['pose']
if all(key in pose_dict for key in ['R', 't']):
features_dict['pose'] = {
'R': pose_dict['R'],
't': pose_dict['t']
}
elif all(key in pose_dict for key in ['look_at', 'position', 'up']):
rotation = self._create_rotation_from_look_at(pose_dict['look_at'],
pose_dict['position'],
pose_dict['up'])
translation = (-rotation) @ pose_dict['position']
features_dict['pose'] = {
'R': rotation,
't': translation
}
else:
raise ValueError('Wrong keys for pose feature provided!')
aspect_ratio = 1
skew = 0
if 'aspect_ratio' in example_dict.keys():
if not isinstance(example_dict['f'], float):
raise ValueError('If aspect ratio is provided, '
'f needs to be a single float.')
aspect_ratio = example_dict['aspect_ratio']
if 'skew' in example_dict.keys():
skew = example_dict['skew']
features_dict['intrinsics'] = self._create_calibration_matrix(
example_dict['f'],
example_dict['optical_center'],
aspect_ratio,
skew
)
return super(Camera, self).encode_example(features_dict)
def _create_rotation_from_look_at(self, look_at, position, up):
"""Creates rotation matrix according to OpenGL gluLookAt convention.
Args:
look_at: A float32 3D vector of look_at direction.
position: A float32 3D vector of camera position.
up: A float32 3D up direction vector.
Returns:
A 3x3 float32 rotation matrix.
(https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml)
"""
dir_vec = look_at - position
dir_vec /= np.linalg.norm(dir_vec)
side_vec = np.cross(dir_vec, up)
side_vec /= np.linalg.norm(side_vec)
up_vec = np.cross(side_vec, dir_vec)
matrix = np.array([side_vec, up_vec, -dir_vec])
return matrix
def _create_calibration_matrix(self, f, optical_center, aspect_ratio=1,
skew=0):
"""Constructs the 3x3 calibration matrix K.
Args:
f: Focal length of the camera. Either single float.32 value or tuple of
float32 when different focal lengths for each axis are provided (fx, fy)
optical_center: Tuple (c_x, c_y) containing the optical center
of the camera in pixel coordinates.
aspect_ratio: Optional parameter, if fixed focal length for both
dimensions is used. Defaults to 1.
skew: Optional parameter denoting the skew between the camera axes.
Returns:
float32 Tensor of shape [3,3] containing the upper triangular
calibration matrix K.
"""
if not isinstance(optical_center, tuple):
raise ValueError('Optical center of camera needs '
'to be a tuple of (c_x, c_y).')
if isinstance(f, tuple):
f_x, f_y = f
else:
f_x = f
f_y = aspect_ratio * f
return np.asarray([[f_x, skew, optical_center[0]],
[0, f_y, optical_center[1]],
[0, 0, 1]
], dtype=np.float32)
@classmethod
def from_json_content(cls, value) -> 'Camera':
return cls()
def to_json_content(self):
return {}
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Camera feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_datasets import features
from tensorflow_graphics.datasets.features import pose_feature
class Camera(features.FeaturesDict):
"""`FeatureConnector` for camera calibration (extrinsic and intrinsic).
During `_generate_examples`, the feature connector accepts as input:
* `parameter_dict:` A dictionary containing the extrinsic and instrinsic
parameters of the camera as:
- 'pose': Dictionary containing
* Either 3x3 rotation matrix and translation vector:
{
'R': A `float32` tensor with shape `[3, 3]` denoting the
3D rotation matrix.
't': A `float32` tensor with shape `[3,]` denoting the
translation vector.
}
OR
* look_at, position and up-vector:
{
'look_at': float32 vector of shape (3,).
'position': float32 vector of shape (3,).
'up': float32 vector of shape (3,).
}
- 'f': focal length of the camera in pixel (either single float32 value
or tuple of float32 as (f_x, f_y).
- 'optical_center': Optical center of the camera
in pixel coordinates as tuple (c_x, c_y)
Optional parameters:
- 'skew': float32 denoting the skew of the camera axes.
- 'aspect_ratio': float32 denoting the aspect_ratio,
if single fixed focal length is provided.
Output:
A dictionary containing:
* 'pose': A `tensorflow_graphics.datasets.features.Pose` FeatureConnector
representing the 3D pose of the camera.
* 'intrinsics': A `float32` tensor with shape `[3,3]` denoting the intrinsic
matrix.
Example:
Default values for skew (s) and aspect_ratio(a) are 0 and 1, respectively.
Full calibration matrix:
K = [[ f_x, s, c_x ],
[ 0, f_y, c_y ],
[ 0, 0, 1 ]]
With same focal length:
K = [[ f, s, c_x ],
[ 0, af, c_y ],
[ 0, 0, 1 ]]
"""
def __init__(self):
super(Camera, self).__init__({
'pose': pose_feature.Pose(),
'intrinsics': features.Tensor(shape=(3, 3), dtype=tf.float32),
})
def encode_example(self, example_dict):
"""Convert the given parameters into a dict convertible to tf example."""
REQUIRED_KEYS = ['pose', 'f', 'optical_center'] # pylint: disable=invalid-name
if not all(key in example_dict for key in REQUIRED_KEYS):
raise ValueError(f'Missing keys in provided dictionary! '
f'Expected {REQUIRED_KEYS}, '
f'but {example_dict.keys()} were given.')
if not isinstance(example_dict['pose'], dict):
raise ValueError('Pose needs to be a dictionary containing either '
'rotation and translation or look at, '
'up vector and position.')
features_dict = {}
pose_dict = example_dict['pose']
if all(key in pose_dict for key in ['R', 't']):
features_dict['pose'] = {
'R': pose_dict['R'],
't': pose_dict['t']
}
elif all(key in pose_dict for key in ['look_at', 'position', 'up']):
rotation = self._create_rotation_from_look_at(pose_dict['look_at'],
pose_dict['position'],
pose_dict['up'])
translation = (-rotation) @ pose_dict['position']
features_dict['pose'] = {
'R': rotation,
't': translation
}
else:
raise ValueError('Wrong keys for pose feature provided!')
aspect_ratio = 1
skew = 0
if 'aspect_ratio' in example_dict.keys():
if not isinstance(example_dict['f'], float):
raise ValueError('If aspect ratio is provided, '
'f needs to be a single float.')
aspect_ratio = example_dict['aspect_ratio']
if 'skew' in example_dict.keys():
skew = example_dict['skew']
features_dict['intrinsics'] = self._create_calibration_matrix(
example_dict['f'],
example_dict['optical_center'],
aspect_ratio,
skew
)
return super(Camera, self).encode_example(features_dict)
def _create_rotation_from_look_at(self, look_at, position, up):
"""Creates rotation matrix according to OpenGL gluLookAt convention.
Args:
look_at: A float32 3D vector of look_at direction.
position: A float32 3D vector of camera position.
up: A float32 3D up direction vector.
Returns:
A 3x3 float32 rotation matrix.
(https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml)
"""
dir_vec = look_at - position
dir_vec /= np.linalg.norm(dir_vec)
side_vec = np.cross(dir_vec, up)
side_vec /= np.linalg.norm(side_vec)
up_vec = np.cross(side_vec, dir_vec)
matrix = np.array([side_vec, up_vec, -dir_vec])
return matrix
def _create_calibration_matrix(self, f, optical_center, aspect_ratio=1,
skew=0):
"""Constructs the 3x3 calibration matrix K.
Args:
f: Focal length of the camera. Either single float.32 value or tuple of
float32 when different focal lengths for each axis are provided (fx, fy)
optical_center: Tuple (c_x, c_y) containing the optical center
of the camera in pixel coordinates.
aspect_ratio: Optional parameter, if fixed focal length for both
dimensions is used. Defaults to 1.
skew: Optional parameter denoting the skew between the camera axes.
Returns:
float32 Tensor of shape [3,3] containing the upper triangular
calibration matrix K.
"""
if not isinstance(optical_center, tuple):
raise ValueError('Optical center of camera needs '
'to be a tuple of (c_x, c_y).')
if isinstance(f, tuple):
f_x, f_y = f
else:
f_x = f
f_y = aspect_ratio * f
return np.asarray([[f_x, skew, optical_center[0]],
[0, f_y, optical_center[1]],
[0, 0, 1]
], dtype=np.float32)
@classmethod
def from_json_content(cls, value) -> 'Camera':
return cls()
def to_json_content(self):
return {}
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/camera/tests/perspective_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perspective camera functionalities."""
import math
import sys
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.camera import perspective
from tensorflow_graphics.util import test_case
class PerspectiveTest(test_case.TestCase):
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (4, 3)),
("must have exactly 4 dimensions in axis -2", (5, 4)),
("must have exactly 4 dimensions in axis -2", (None, 4)),
("must have exactly 4 dimensions in axis -1", (4, None)),
)
def test_parameters_from_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Checks the inputs of the from_right_handed_shape function."""
self.assert_exception_is_raised(perspective.parameters_from_right_handed,
error_msg, shapes)
@parameterized.parameters(
((4, 4),),
((None, 4, 4),),
((None, None, 4, 4),),
)
def test_parameters_from_right_handed_shape_exception_not_raised(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
perspective.parameters_from_right_handed, shapes)
def test_parameters_from_right_handed_random(self):
"""Tests that parameters_from_right_handed returns the expected values."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist()
vertical_field_of_view_gt = np.random.uniform(
sys.float_info.epsilon, np.pi - sys.float_info.epsilon,
tensor_shape + [1])
aspect_ratio_gt = np.random.uniform(0.1, 10.0, tensor_shape + [1])
near_gt = np.random.uniform(0.1, 100.0, tensor_shape + [1])
far_gt = near_gt + np.random.uniform(0.1, 100.0, tensor_shape + [1])
projection_matrix = perspective.right_handed(vertical_field_of_view_gt,
aspect_ratio_gt, near_gt,
far_gt)
vertical_field_of_view_pred, aspect_ratio_pred, near_pred, far_pred = perspective.parameters_from_right_handed(
projection_matrix)
with self.subTest(name="vertical_field_of_view"):
self.assertAllClose(vertical_field_of_view_gt,
vertical_field_of_view_pred)
with self.subTest(name="aspect_ratio"):
self.assertAllClose(aspect_ratio_gt, aspect_ratio_pred)
with self.subTest(name="near_plane"):
self.assertAllClose(near_gt, near_pred)
with self.subTest(name="far_plane"):
self.assertAllClose(far_gt, far_pred)
def test_parameters_from_right_handed_jacobian_random(self):
"""Tests the Jacobian of parameters_from_right_handed."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist()
vertical_field_of_view = np.random.uniform(sys.float_info.epsilon,
np.pi - sys.float_info.epsilon,
tensor_shape + [1])
aspect_ratio = np.random.uniform(0.1, 10.0, tensor_shape + [1])
near = np.random.uniform(0.1, 100.0, tensor_shape + [1])
far = near + np.random.uniform(0.1, 100.0, tensor_shape + [1])
projection_matrix = perspective.right_handed(vertical_field_of_view,
aspect_ratio, near, far)
with self.subTest(name="vertical_field_of_view"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[0],
[projection_matrix])
with self.subTest(name="aspect_ratio"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[1],
[projection_matrix])
with self.subTest(name="near_plane"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[2],
[projection_matrix])
with self.subTest(name="far_plane"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[3],
[projection_matrix])
def test_perspective_right_handed_preset(self):
"""Tests that perspective_right_handed generates expected results."""
vertical_field_of_view = ((60.0 * math.pi / 180.0,),
(50.0 * math.pi / 180.0,))
aspect_ratio = ((1.5,), (1.1,))
near = ((1.0,), (1.2,))
far = ((10.0,), (5.0,))
pred = perspective.right_handed(vertical_field_of_view, aspect_ratio, near,
far)
gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0),
(0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)),
((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0),
(0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0)))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((1,), (1,), (1,), (1,)),
((None, 1), (None, 1), (None, 1), (None, 1)),
((None, 3, 1), (None, 3, 1), (None, 3, 1), (None, 3, 1)),
)
def test_perspective_right_handed_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.right_handed, shapes)
@parameterized.parameters(
("Not all batch dimensions are identical", (1,), (3, 1), (3, 1), (3, 1)),
("Not all batch dimensions are identical", (3, 1), (None, 3, 1), (3, 1),
(3, 1)),
)
def test_perspective_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.right_handed, error_msg, shapes)
@parameterized.parameters(
((1.0,),
(1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(1.0,)),
((1.0,), (1.0,), (0.0,), (1.0,)),
((1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(0.1,), (1.0,)),
((1.0,), (0.0,), (0.1,), (1.0,)),
((1.0,),
(1.0,), np.random.uniform(1.0, 2.0, size=(1,)).astype(np.float32),
np.random.uniform(0.1, 0.5, size=(1,)).astype(np.float32)),
((1.0,), (1.0,), (0.1,), (0.1,)),
(np.random.uniform(-math.pi, 0.0, size=(1,)).astype(np.float32), (1.0,),
(0.1,), (1.0,)),
(np.random.uniform(math.pi, 2.0 * math.pi, size=(1,)).astype(np.float32),
(1.0,), (0.1,), (1.0,)),
((0.0,), (1.0,), (0.1,), (1.0,)),
((math.pi,), (1.0,), (0.1,), (1.0,)),
)
def test_perspective_right_handed_valid_range_exception_raised(
self, vertical_field_of_view, aspect_ratio, near, far):
"""Tests that an exception is raised with out of bounds values."""
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
perspective.right_handed(vertical_field_of_view, aspect_ratio, near,
far))
def test_perspective_right_handed_cross_jacobian_preset(self):
"""Tests the Jacobian of perspective_right_handed."""
vertical_field_of_view_init = np.array((1.0,))
aspect_ratio_init = np.array((1.0,))
near_init = np.array((1.0,))
far_init = np.array((10.0,))
self.assert_jacobian_is_correct_fn(
perspective.right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
def test_perspective_right_handed_cross_jacobian_random(self):
"""Tests the Jacobian of perspective_right_handed."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
eps = np.finfo(np.float64).eps
vertical_field_of_view_init = np.random.uniform(
eps, math.pi - eps, size=tensor_shape + [1])
aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1])
near_init = np.random.uniform(eps, 10.0, size=tensor_shape + [1])
far_init = np.random.uniform(10 + eps, 100.0, size=tensor_shape + [1])
self.assert_jacobian_is_correct_fn(
perspective.right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
@parameterized.parameters(
((3, 3),),
((3, 3, 3),),
((None, 3, 3),),
)
def test_intrinsics_from_matrix_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.intrinsics_from_matrix,
shapes)
@parameterized.parameters(
("must have a rank greater than 1", (3,)),
("must have exactly 3 dimensions in axis -2", (None, 3)),
("must have exactly 3 dimensions in axis -1", (3, None)),
)
def test_intrinsics_from_matrix_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.intrinsics_from_matrix,
error_msg, shapes)
@parameterized.parameters(
((((0., 0., 0.), (0., 0., 0.), (0., 0., 1.)),), ((0., 0.), (0., 0.),
(0.0,))),
((((1., 0., 3.), (0., 2., 4.), (0., 0., 1.)),), ((1., 2.), (3., 4.),
(0.0,))),
)
def test_intrinsics_from_matrix_preset(self, test_inputs, test_outputs):
"""Tests that intrinsics_from_matrix gives the correct result."""
self.assert_output_is_correct(perspective.intrinsics_from_matrix,
test_inputs, test_outputs)
def test_intrinsics_from_matrix_to_intrinsics_random(self):
"""Tests that converting intrinsics to a matrix and back is consistent."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_skew_coeff = np.random.normal(size=tensor_shape + [1])
matrix = perspective.matrix_from_intrinsics(random_focal,
random_principal_point,
random_skew_coeff)
focal, principal_point, skew_coeff = perspective.intrinsics_from_matrix(
matrix)
random_skew_coeff = np.reshape(random_skew_coeff, (1, 1))
self.assertAllClose(random_focal, focal, rtol=1e-3)
self.assertAllClose(random_principal_point, principal_point, rtol=1e-3)
self.assertAllClose(random_skew_coeff, skew_coeff, rtol=1e-3)
@parameterized.parameters(
((2,), (2,), (1,)),
((2, 2), (2, 2), (2, 1)),
((None, 2), (None, 2), (None, 1)),
)
def test_matrix_from_intrinsics_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics,
shapes)
@parameterized.parameters(
((2,), (2,)),
((2, 2), (2, 2)),
((None, 2), (None, 2)),
)
def test_matrix_from_intrinsics_exception_not_raised_when_skew_not_passed(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics,
shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (None,)),
("Not all batch dimensions are identical.", (3, 2), (2, 2)),
)
def test_matrix_from_intrinsics_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.matrix_from_intrinsics,
error_msg, shapes)
@parameterized.parameters(
(((0.0, 0.0), (0.0, 0.0), (0.0,)), (((0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 1.0)),)),
(((1.0, 2.0), (3.0, 4.0), (0.0,)), (((1.0, 0.0, 3.0), (0.0, 2.0, 4.0),
(0.0, 0.0, 1.0)),)))
def test_matrix_from_intrinsics_preset(self, test_inputs, test_outputs):
"""Tests that matrix_from_intrinsics gives the correct result."""
self.assert_output_is_correct(perspective.matrix_from_intrinsics,
test_inputs, test_outputs)
def test_matrix_from_intrinsics_to_matrix_random(self):
"""Tests that converting a matrix to intrinsics and back is consistent."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
fx = random_focal[..., 0]
fy = random_focal[..., 1]
cx = random_principal_point[..., 0]
cy = random_principal_point[..., 1]
zero = np.zeros_like(fx)
one = np.ones_like(fx)
random_matrix = np.stack((fx, zero, cx, zero, fy, cy, zero, zero, one),
axis=-1).reshape(tensor_shape + [3, 3])
focal, principal_point, skew_coefficient = perspective.intrinsics_from_matrix(
random_matrix)
matrix = perspective.matrix_from_intrinsics(focal,
principal_point,
skew_coefficient)
self.assertAllClose(random_matrix, matrix, rtol=1e-3)
@parameterized.parameters(
((3,), (2,), (2,)),
((2, 3), (2, 2), (2, 2)),
((2, 3), (2,), (2,)),
((None, 3), (None, 2), (None, 2)),
)
def test_project_exception_not_exception_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.project, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (3,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (3,), (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.", (3, 3), (2, 2),
(2, 2)),
)
def test_project_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.project, error_msg, shape)
@parameterized.parameters(
(((0., 0., 1.), (1., 1.), (0., 0.)), ((0., 0.),)),
(((4., 2., 1.), (1., 1.), (-4., -2.)), ((0., 0.),)),
(((4., 2., 10.), (1., 1.), (-.4, -.2)), ((0., 0.),)),
(((4., 2., 10.), (2., 1.), (-.8, -.2)), ((0., 0.),)),
(((4., 2., 10.), (2., 1.), (-.8, 0.)), ((0., .2),)),
)
def test_project_preset(self, test_inputs, test_outputs):
"""Tests that the project function gives the correct result."""
self.assert_output_is_correct(perspective.project, test_inputs,
test_outputs)
def test_project_unproject_random(self):
"""Tests that projecting and unprojecting gives an identity mapping."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_3d = np.random.normal(size=tensor_shape + [3])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1)
point_2d = perspective.project(random_point_3d, random_focal,
random_principal_point)
point_3d = perspective.unproject(point_2d, random_depth, random_focal,
random_principal_point)
self.assertAllClose(random_point_3d, point_3d, rtol=1e-3)
def test_project_ray_random(self):
"""Tests that that ray is pointing toward the correct location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_3d = np.random.normal(size=tensor_shape + [3])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1)
point_2d = perspective.project(random_point_3d, random_focal,
random_principal_point)
ray_3d = perspective.ray(point_2d, random_focal, random_principal_point)
ray_3d = random_depth * ray_3d
self.assertAllClose(random_point_3d, ray_3d, rtol=1e-3)
@parameterized.parameters(
((2,), (2,), (2,)),
((2, 2), (2, 2), (2, 2)),
((3, 2), (1, 2), (2,)),
((None, 2), (None, 2), (None, 2)),
)
def test_ray_exception_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.ray, shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.", (3, 2), (1, 2),
(2, 2)),
)
def test_ray_exception_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.ray, error_msg, shapes)
@parameterized.parameters(
(((0., 0.), (1., 1.), (0., 0.)), ((0., 0., 1.),)),
(((0., 0.), (1., 1.), (-1., -2.)), ((1., 2., 1.),)),
(((0., 0.), (10., 1.), (-1., -2.)), ((.1, 2., 1.),)),
(((-2., -4.), (10., 1.), (-3., -6.)), ((.1, 2., 1.),)),
)
def test_ray_preset(self, test_inputs, test_outputs):
"""Tests that the ray function gives the correct result."""
self.assert_output_is_correct(perspective.ray, test_inputs, test_outputs)
def test_ray_project_random(self):
"""Tests that the end point of the ray projects at the good location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
ray_3d = perspective.ray(random_point_2d, random_focal,
random_principal_point)
point_2d = perspective.project(ray_3d, random_focal, random_principal_point)
self.assertAllClose(random_point_2d, point_2d, rtol=1e-3)
@parameterized.parameters(
((2,), (1,), (2,), (2,)),
((2, 2), (2, 1), (2, 2), (2, 2)),
((None, 2), (None, 1), (None, 2), (None, 2)),
)
def test_unproject_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.unproject, shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (1,), (2,), (2,)),
("must have exactly 1 dimensions in axis -1", (2,), (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (1,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (1,), (2,), (None,)),
("Not all batch dimensions are identical.", (1, 2), (2, 1), (2, 2),
(2, 2)),
)
def test_unproject_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.unproject, error_msg, shapes)
@parameterized.parameters(
(((0., 0.), (1.,), (1., 1.), (0., 0.)), ((0., 0., 1.),)),
(((0., 0.), (1.,), (1., 1.), (-4., -2.)), ((4., 2., 1.),)),
(((0., 0.), (10.,), (1., 1.), (-.4, -.2)), ((4., 2., 10.),)),
(((0., 0.), (10.,), (2., 1.), (-.8, -.2)), ((4., 2., 10.),)),
(((0., .2), (10.,), (2., 1.), (-.8, 0.)), ((4., 2., 10.),)),
)
def test_unproject_preset(self, test_inputs, test_outputs):
"""Tests that the unproject function gives the correct result."""
self.assert_output_is_correct(perspective.unproject, test_inputs,
test_outputs)
def test_unproject_project_random(self):
"""Tests that unprojecting and projecting gives and identity mapping."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.random.normal(size=tensor_shape + [1])
point_3d = perspective.unproject(random_point_2d, random_depth,
random_focal, random_principal_point)
point_2d = perspective.project(point_3d, random_focal,
random_principal_point)
self.assertAllClose(random_point_2d, point_2d, rtol=1e-3)
def test_unproject_ray_random(self):
"""Tests that that ray is pointing toward the correct location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.random.normal(size=tensor_shape + [1])
point_3d = perspective.unproject(random_point_2d, random_depth,
random_focal, random_principal_point)
ray_3d = perspective.ray(random_point_2d, random_focal,
random_principal_point)
ray_3d = random_depth * ray_3d
self.assertAllClose(point_3d, ray_3d, rtol=1e-3)
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perspective camera functionalities."""
import math
import sys
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.camera import perspective
from tensorflow_graphics.util import test_case
class PerspectiveTest(test_case.TestCase):
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (4, 3)),
("must have exactly 4 dimensions in axis -2", (5, 4)),
("must have exactly 4 dimensions in axis -2", (None, 4)),
("must have exactly 4 dimensions in axis -1", (4, None)),
)
def test_parameters_from_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Checks the inputs of the from_right_handed_shape function."""
self.assert_exception_is_raised(perspective.parameters_from_right_handed,
error_msg, shapes)
@parameterized.parameters(
((4, 4),),
((None, 4, 4),),
((None, None, 4, 4),),
)
def test_parameters_from_right_handed_shape_exception_not_raised(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
perspective.parameters_from_right_handed, shapes)
def test_parameters_from_right_handed_random(self):
"""Tests that parameters_from_right_handed returns the expected values."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist()
vertical_field_of_view_gt = np.random.uniform(
sys.float_info.epsilon, np.pi - sys.float_info.epsilon,
tensor_shape + [1])
aspect_ratio_gt = np.random.uniform(0.1, 10.0, tensor_shape + [1])
near_gt = np.random.uniform(0.1, 100.0, tensor_shape + [1])
far_gt = near_gt + np.random.uniform(0.1, 100.0, tensor_shape + [1])
projection_matrix = perspective.right_handed(vertical_field_of_view_gt,
aspect_ratio_gt, near_gt,
far_gt)
vertical_field_of_view_pred, aspect_ratio_pred, near_pred, far_pred = perspective.parameters_from_right_handed(
projection_matrix)
with self.subTest(name="vertical_field_of_view"):
self.assertAllClose(vertical_field_of_view_gt,
vertical_field_of_view_pred)
with self.subTest(name="aspect_ratio"):
self.assertAllClose(aspect_ratio_gt, aspect_ratio_pred)
with self.subTest(name="near_plane"):
self.assertAllClose(near_gt, near_pred)
with self.subTest(name="far_plane"):
self.assertAllClose(far_gt, far_pred)
def test_parameters_from_right_handed_jacobian_random(self):
"""Tests the Jacobian of parameters_from_right_handed."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist()
vertical_field_of_view = np.random.uniform(sys.float_info.epsilon,
np.pi - sys.float_info.epsilon,
tensor_shape + [1])
aspect_ratio = np.random.uniform(0.1, 10.0, tensor_shape + [1])
near = np.random.uniform(0.1, 100.0, tensor_shape + [1])
far = near + np.random.uniform(0.1, 100.0, tensor_shape + [1])
projection_matrix = perspective.right_handed(vertical_field_of_view,
aspect_ratio, near, far)
with self.subTest(name="vertical_field_of_view"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[0],
[projection_matrix])
with self.subTest(name="aspect_ratio"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[1],
[projection_matrix])
with self.subTest(name="near_plane"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[2],
[projection_matrix])
with self.subTest(name="far_plane"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[3],
[projection_matrix])
def test_perspective_right_handed_preset(self):
"""Tests that perspective_right_handed generates expected results."""
vertical_field_of_view = ((60.0 * math.pi / 180.0,),
(50.0 * math.pi / 180.0,))
aspect_ratio = ((1.5,), (1.1,))
near = ((1.0,), (1.2,))
far = ((10.0,), (5.0,))
pred = perspective.right_handed(vertical_field_of_view, aspect_ratio, near,
far)
gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0),
(0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)),
((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0),
(0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0)))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((1,), (1,), (1,), (1,)),
((None, 1), (None, 1), (None, 1), (None, 1)),
((None, 3, 1), (None, 3, 1), (None, 3, 1), (None, 3, 1)),
)
def test_perspective_right_handed_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.right_handed, shapes)
@parameterized.parameters(
("Not all batch dimensions are identical", (1,), (3, 1), (3, 1), (3, 1)),
("Not all batch dimensions are identical", (3, 1), (None, 3, 1), (3, 1),
(3, 1)),
)
def test_perspective_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.right_handed, error_msg, shapes)
@parameterized.parameters(
((1.0,),
(1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(1.0,)),
((1.0,), (1.0,), (0.0,), (1.0,)),
((1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(0.1,), (1.0,)),
((1.0,), (0.0,), (0.1,), (1.0,)),
((1.0,),
(1.0,), np.random.uniform(1.0, 2.0, size=(1,)).astype(np.float32),
np.random.uniform(0.1, 0.5, size=(1,)).astype(np.float32)),
((1.0,), (1.0,), (0.1,), (0.1,)),
(np.random.uniform(-math.pi, 0.0, size=(1,)).astype(np.float32), (1.0,),
(0.1,), (1.0,)),
(np.random.uniform(math.pi, 2.0 * math.pi, size=(1,)).astype(np.float32),
(1.0,), (0.1,), (1.0,)),
((0.0,), (1.0,), (0.1,), (1.0,)),
((math.pi,), (1.0,), (0.1,), (1.0,)),
)
def test_perspective_right_handed_valid_range_exception_raised(
self, vertical_field_of_view, aspect_ratio, near, far):
"""Tests that an exception is raised with out of bounds values."""
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
perspective.right_handed(vertical_field_of_view, aspect_ratio, near,
far))
def test_perspective_right_handed_cross_jacobian_preset(self):
"""Tests the Jacobian of perspective_right_handed."""
vertical_field_of_view_init = np.array((1.0,))
aspect_ratio_init = np.array((1.0,))
near_init = np.array((1.0,))
far_init = np.array((10.0,))
self.assert_jacobian_is_correct_fn(
perspective.right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
def test_perspective_right_handed_cross_jacobian_random(self):
"""Tests the Jacobian of perspective_right_handed."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
eps = np.finfo(np.float64).eps
vertical_field_of_view_init = np.random.uniform(
eps, math.pi - eps, size=tensor_shape + [1])
aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1])
near_init = np.random.uniform(eps, 10.0, size=tensor_shape + [1])
far_init = np.random.uniform(10 + eps, 100.0, size=tensor_shape + [1])
self.assert_jacobian_is_correct_fn(
perspective.right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
@parameterized.parameters(
((3, 3),),
((3, 3, 3),),
((None, 3, 3),),
)
def test_intrinsics_from_matrix_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.intrinsics_from_matrix,
shapes)
@parameterized.parameters(
("must have a rank greater than 1", (3,)),
("must have exactly 3 dimensions in axis -2", (None, 3)),
("must have exactly 3 dimensions in axis -1", (3, None)),
)
def test_intrinsics_from_matrix_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.intrinsics_from_matrix,
error_msg, shapes)
@parameterized.parameters(
((((0., 0., 0.), (0., 0., 0.), (0., 0., 1.)),), ((0., 0.), (0., 0.),
(0.0,))),
((((1., 0., 3.), (0., 2., 4.), (0., 0., 1.)),), ((1., 2.), (3., 4.),
(0.0,))),
)
def test_intrinsics_from_matrix_preset(self, test_inputs, test_outputs):
"""Tests that intrinsics_from_matrix gives the correct result."""
self.assert_output_is_correct(perspective.intrinsics_from_matrix,
test_inputs, test_outputs)
def test_intrinsics_from_matrix_to_intrinsics_random(self):
"""Tests that converting intrinsics to a matrix and back is consistent."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_skew_coeff = np.random.normal(size=tensor_shape + [1])
matrix = perspective.matrix_from_intrinsics(random_focal,
random_principal_point,
random_skew_coeff)
focal, principal_point, skew_coeff = perspective.intrinsics_from_matrix(
matrix)
random_skew_coeff = np.reshape(random_skew_coeff, (1, 1))
self.assertAllClose(random_focal, focal, rtol=1e-3)
self.assertAllClose(random_principal_point, principal_point, rtol=1e-3)
self.assertAllClose(random_skew_coeff, skew_coeff, rtol=1e-3)
@parameterized.parameters(
((2,), (2,), (1,)),
((2, 2), (2, 2), (2, 1)),
((None, 2), (None, 2), (None, 1)),
)
def test_matrix_from_intrinsics_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics,
shapes)
@parameterized.parameters(
((2,), (2,)),
((2, 2), (2, 2)),
((None, 2), (None, 2)),
)
def test_matrix_from_intrinsics_exception_not_raised_when_skew_not_passed(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics,
shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (None,)),
("Not all batch dimensions are identical.", (3, 2), (2, 2)),
)
def test_matrix_from_intrinsics_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.matrix_from_intrinsics,
error_msg, shapes)
@parameterized.parameters(
(((0.0, 0.0), (0.0, 0.0), (0.0,)), (((0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 1.0)),)),
(((1.0, 2.0), (3.0, 4.0), (0.0,)), (((1.0, 0.0, 3.0), (0.0, 2.0, 4.0),
(0.0, 0.0, 1.0)),)))
def test_matrix_from_intrinsics_preset(self, test_inputs, test_outputs):
"""Tests that matrix_from_intrinsics gives the correct result."""
self.assert_output_is_correct(perspective.matrix_from_intrinsics,
test_inputs, test_outputs)
def test_matrix_from_intrinsics_to_matrix_random(self):
"""Tests that converting a matrix to intrinsics and back is consistent."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
fx = random_focal[..., 0]
fy = random_focal[..., 1]
cx = random_principal_point[..., 0]
cy = random_principal_point[..., 1]
zero = np.zeros_like(fx)
one = np.ones_like(fx)
random_matrix = np.stack((fx, zero, cx, zero, fy, cy, zero, zero, one),
axis=-1).reshape(tensor_shape + [3, 3])
focal, principal_point, skew_coefficient = perspective.intrinsics_from_matrix(
random_matrix)
matrix = perspective.matrix_from_intrinsics(focal,
principal_point,
skew_coefficient)
self.assertAllClose(random_matrix, matrix, rtol=1e-3)
@parameterized.parameters(
((3,), (2,), (2,)),
((2, 3), (2, 2), (2, 2)),
((2, 3), (2,), (2,)),
((None, 3), (None, 2), (None, 2)),
)
def test_project_exception_not_exception_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.project, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (3,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (3,), (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.", (3, 3), (2, 2),
(2, 2)),
)
def test_project_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.project, error_msg, shape)
@parameterized.parameters(
(((0., 0., 1.), (1., 1.), (0., 0.)), ((0., 0.),)),
(((4., 2., 1.), (1., 1.), (-4., -2.)), ((0., 0.),)),
(((4., 2., 10.), (1., 1.), (-.4, -.2)), ((0., 0.),)),
(((4., 2., 10.), (2., 1.), (-.8, -.2)), ((0., 0.),)),
(((4., 2., 10.), (2., 1.), (-.8, 0.)), ((0., .2),)),
)
def test_project_preset(self, test_inputs, test_outputs):
"""Tests that the project function gives the correct result."""
self.assert_output_is_correct(perspective.project, test_inputs,
test_outputs)
def test_project_unproject_random(self):
"""Tests that projecting and unprojecting gives an identity mapping."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_3d = np.random.normal(size=tensor_shape + [3])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1)
point_2d = perspective.project(random_point_3d, random_focal,
random_principal_point)
point_3d = perspective.unproject(point_2d, random_depth, random_focal,
random_principal_point)
self.assertAllClose(random_point_3d, point_3d, rtol=1e-3)
def test_project_ray_random(self):
"""Tests that that ray is pointing toward the correct location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_3d = np.random.normal(size=tensor_shape + [3])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1)
point_2d = perspective.project(random_point_3d, random_focal,
random_principal_point)
ray_3d = perspective.ray(point_2d, random_focal, random_principal_point)
ray_3d = random_depth * ray_3d
self.assertAllClose(random_point_3d, ray_3d, rtol=1e-3)
@parameterized.parameters(
((2,), (2,), (2,)),
((2, 2), (2, 2), (2, 2)),
((3, 2), (1, 2), (2,)),
((None, 2), (None, 2), (None, 2)),
)
def test_ray_exception_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.ray, shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.", (3, 2), (1, 2),
(2, 2)),
)
def test_ray_exception_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.ray, error_msg, shapes)
@parameterized.parameters(
(((0., 0.), (1., 1.), (0., 0.)), ((0., 0., 1.),)),
(((0., 0.), (1., 1.), (-1., -2.)), ((1., 2., 1.),)),
(((0., 0.), (10., 1.), (-1., -2.)), ((.1, 2., 1.),)),
(((-2., -4.), (10., 1.), (-3., -6.)), ((.1, 2., 1.),)),
)
def test_ray_preset(self, test_inputs, test_outputs):
"""Tests that the ray function gives the correct result."""
self.assert_output_is_correct(perspective.ray, test_inputs, test_outputs)
def test_ray_project_random(self):
"""Tests that the end point of the ray projects at the good location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
ray_3d = perspective.ray(random_point_2d, random_focal,
random_principal_point)
point_2d = perspective.project(ray_3d, random_focal, random_principal_point)
self.assertAllClose(random_point_2d, point_2d, rtol=1e-3)
@parameterized.parameters(
((2,), (1,), (2,), (2,)),
((2, 2), (2, 1), (2, 2), (2, 2)),
((None, 2), (None, 1), (None, 2), (None, 2)),
)
def test_unproject_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.unproject, shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (1,), (2,), (2,)),
("must have exactly 1 dimensions in axis -1", (2,), (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (1,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (1,), (2,), (None,)),
("Not all batch dimensions are identical.", (1, 2), (2, 1), (2, 2),
(2, 2)),
)
def test_unproject_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.unproject, error_msg, shapes)
@parameterized.parameters(
(((0., 0.), (1.,), (1., 1.), (0., 0.)), ((0., 0., 1.),)),
(((0., 0.), (1.,), (1., 1.), (-4., -2.)), ((4., 2., 1.),)),
(((0., 0.), (10.,), (1., 1.), (-.4, -.2)), ((4., 2., 10.),)),
(((0., 0.), (10.,), (2., 1.), (-.8, -.2)), ((4., 2., 10.),)),
(((0., .2), (10.,), (2., 1.), (-.8, 0.)), ((4., 2., 10.),)),
)
def test_unproject_preset(self, test_inputs, test_outputs):
"""Tests that the unproject function gives the correct result."""
self.assert_output_is_correct(perspective.unproject, test_inputs,
test_outputs)
def test_unproject_project_random(self):
"""Tests that unprojecting and projecting gives and identity mapping."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.random.normal(size=tensor_shape + [1])
point_3d = perspective.unproject(random_point_2d, random_depth,
random_focal, random_principal_point)
point_2d = perspective.project(point_3d, random_focal,
random_principal_point)
self.assertAllClose(random_point_2d, point_2d, rtol=1e-3)
def test_unproject_ray_random(self):
"""Tests that that ray is pointing toward the correct location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.random.normal(size=tensor_shape + [1])
point_3d = perspective.unproject(random_point_2d, random_depth,
random_focal, random_principal_point)
ray_3d = perspective.ray(random_point_2d, random_focal,
random_principal_point)
ray_3d = random_depth * ray_3d
self.assertAllClose(point_3d, ray_3d, rtol=1e-3)
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/transformation/tests/test_data.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with test data for transformation tests."""
import numpy as np
ANGLE_0 = np.array((0.,))
ANGLE_45 = np.array((np.pi / 4.,))
ANGLE_90 = np.array((np.pi / 2.,))
ANGLE_180 = np.array((np.pi,))
AXIS_2D_0 = np.array((0., 0.))
AXIS_2D_X = np.array((1., 0.))
AXIS_2D_Y = np.array((0., 1.))
def _rotation_2d_x(angle):
"""Creates a 2d rotation matrix.
Args:
angle: The angle.
Returns:
The 2d rotation matrix.
"""
angle = angle.item()
return np.array(((np.cos(angle), -np.sin(angle)),
(np.sin(angle), np.cos(angle)))) # pyformat: disable
MAT_2D_ID = np.eye(2)
MAT_2D_45 = _rotation_2d_x(ANGLE_45)
MAT_2D_90 = _rotation_2d_x(ANGLE_90)
MAT_2D_180 = _rotation_2d_x(ANGLE_180)
AXIS_3D_0 = np.array((0., 0., 0.))
AXIS_3D_X = np.array((1., 0., 0.))
AXIS_3D_Y = np.array((0., 1., 0.))
AXIS_3D_Z = np.array((0., 0., 1.))
def _axis_angle_to_quaternion(axis, angle):
"""Converts an axis-angle representation to a quaternion.
Args:
axis: The axis of rotation.
angle: The angle.
Returns:
The quaternion.
"""
quat = np.zeros(4)
quat[0:3] = axis * np.sin(0.5 * angle)
quat[3] = np.cos(0.5 * angle)
return quat
QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0)
QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45)
QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90)
QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180)
QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45)
QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90)
QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180)
QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45)
QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90)
QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180)
def _rotation_3d_x(angle):
"""Creates a 3d rotation matrix around the x axis.
Args:
angle: The angle.
Returns:
The 3d rotation matrix.
"""
angle = angle.item()
return np.array(((1., 0., 0.),
(0., np.cos(angle), -np.sin(angle)),
(0., np.sin(angle), np.cos(angle)))) # pyformat: disable
def _rotation_3d_y(angle):
"""Creates a 3d rotation matrix around the y axis.
Args:
angle: The angle.
Returns:
The 3d rotation matrix.
"""
angle = angle.item()
return np.array(((np.cos(angle), 0., np.sin(angle)),
(0., 1., 0.),
(-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable
def _rotation_3d_z(angle):
"""Creates a 3d rotation matrix around the z axis.
Args:
angle: The angle.
Returns:
The 3d rotation matrix.
"""
angle = angle.item()
return np.array(((np.cos(angle), -np.sin(angle), 0.),
(np.sin(angle), np.cos(angle), 0.),
(0., 0., 1.))) # pyformat: disable
MAT_3D_ID = np.eye(3)
MAT_3D_X_45 = _rotation_3d_x(ANGLE_45)
MAT_3D_X_90 = _rotation_3d_x(ANGLE_90)
MAT_3D_X_180 = _rotation_3d_x(ANGLE_180)
MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45)
MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90)
MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180)
MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45)
MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90)
MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180)
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with test data for transformation tests."""
import numpy as np
ANGLE_0 = np.array((0.,))
ANGLE_45 = np.array((np.pi / 4.,))
ANGLE_90 = np.array((np.pi / 2.,))
ANGLE_180 = np.array((np.pi,))
AXIS_2D_0 = np.array((0., 0.))
AXIS_2D_X = np.array((1., 0.))
AXIS_2D_Y = np.array((0., 1.))
def _rotation_2d_x(angle):
"""Creates a 2d rotation matrix.
Args:
angle: The angle.
Returns:
The 2d rotation matrix.
"""
angle = angle.item()
return np.array(((np.cos(angle), -np.sin(angle)),
(np.sin(angle), np.cos(angle)))) # pyformat: disable
MAT_2D_ID = np.eye(2)
MAT_2D_45 = _rotation_2d_x(ANGLE_45)
MAT_2D_90 = _rotation_2d_x(ANGLE_90)
MAT_2D_180 = _rotation_2d_x(ANGLE_180)
AXIS_3D_0 = np.array((0., 0., 0.))
AXIS_3D_X = np.array((1., 0., 0.))
AXIS_3D_Y = np.array((0., 1., 0.))
AXIS_3D_Z = np.array((0., 0., 1.))
def _axis_angle_to_quaternion(axis, angle):
"""Converts an axis-angle representation to a quaternion.
Args:
axis: The axis of rotation.
angle: The angle.
Returns:
The quaternion.
"""
quat = np.zeros(4)
quat[0:3] = axis * np.sin(0.5 * angle)
quat[3] = np.cos(0.5 * angle)
return quat
QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0)
QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45)
QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90)
QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180)
QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45)
QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90)
QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180)
QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45)
QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90)
QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180)
def _rotation_3d_x(angle):
"""Creates a 3d rotation matrix around the x axis.
Args:
angle: The angle.
Returns:
The 3d rotation matrix.
"""
angle = angle.item()
return np.array(((1., 0., 0.),
(0., np.cos(angle), -np.sin(angle)),
(0., np.sin(angle), np.cos(angle)))) # pyformat: disable
def _rotation_3d_y(angle):
"""Creates a 3d rotation matrix around the y axis.
Args:
angle: The angle.
Returns:
The 3d rotation matrix.
"""
angle = angle.item()
return np.array(((np.cos(angle), 0., np.sin(angle)),
(0., 1., 0.),
(-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable
def _rotation_3d_z(angle):
"""Creates a 3d rotation matrix around the z axis.
Args:
angle: The angle.
Returns:
The 3d rotation matrix.
"""
angle = angle.item()
return np.array(((np.cos(angle), -np.sin(angle), 0.),
(np.sin(angle), np.cos(angle), 0.),
(0., 0., 1.))) # pyformat: disable
MAT_3D_ID = np.eye(3)
MAT_3D_X_45 = _rotation_3d_x(ANGLE_45)
MAT_3D_X_90 = _rotation_3d_x(ANGLE_90)
MAT_3D_X_180 = _rotation_3d_x(ANGLE_180)
MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45)
MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90)
MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180)
MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45)
MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90)
MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180)
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/io/tests/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/math/tests/math_helpers_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for math_helpers."""
import sys
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.math import math_helpers
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import test_case
class MathTest(test_case.TestCase):
@parameterized.parameters(
(((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)),
(((2.0, 0.0, 0.0),), ((2.0, np.pi / 2.0, 0.0),)),
(((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)),
(((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)),
(((-1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, np.pi),)),
(((0.0, -1.0, 0.0),), ((1.0, np.pi / 2.0, -np.pi / 2.0),)),
(((0.0, 0.0, -1.0),), ((1.0, np.pi, 0.0),)),
)
def test_cartesian_to_spherical_coordinates_preset(self, test_inputs,
test_outputs):
"""Tests that cartesian_to_spherical_coordinates behaves as expected."""
self.assert_output_is_correct(
math_helpers.cartesian_to_spherical_coordinates, test_inputs,
test_outputs)
@parameterized.parameters(
((3,),),
((None, 3),),
)
def test_cartesian_to_spherical_coordinates_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
math_helpers.cartesian_to_spherical_coordinates, shape)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (1,)),)
def test_cartesian_to_spherical_coordinates_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
math_helpers.cartesian_to_spherical_coordinates, error_msg, shape)
def test_cartesian_to_spherical_coordinates_jacobian_random(self):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
point_init = np.random.uniform(-10.0, 10.0, size=tensor_shape + [3])
self.assert_jacobian_is_correct_fn(
math_helpers.cartesian_to_spherical_coordinates, [point_init])
@parameterized.parameters(
(((1.0, 1.0, 1.0),),),
(((1.0, 0.0, 0.0),),),
(((0.0, 1.0, 0.0),),),
)
def test_cartesian_to_spherical_coordinates_jacobian_preset(self, cartesian):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
point_init = np.asarray(cartesian)
self.assert_jacobian_is_correct_fn(
math_helpers.cartesian_to_spherical_coordinates, [point_init])
@parameterized.parameters(
(((1.0, 1.0, 0.0),), ((np.sqrt(2.0), np.pi / 2.0, np.pi / 4.0),)),
(((1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, 0.0),)),
(((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)),
(((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)),
(((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)),
)
def test_cartesian_to_spherical_coordinates_values_preset(
self, test_inputs, test_outputs):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
self.assert_output_is_correct(
math_helpers.cartesian_to_spherical_coordinates, test_inputs,
test_outputs)
@parameterized.parameters(
(((0, 1, 5, 6, 15.0),), ((1, 1, 15, 48, 2027025.0),)),)
def test_double_factorial_preset(self, test_inputs, test_outputs):
"""Tests that double_factorial generates expected results."""
self.assert_output_is_correct(math_helpers.double_factorial, test_inputs,
test_outputs)
@parameterized.parameters(
(((0, 1, 2, 3, 4.0),), ((1, 1, 2, 6, 24.0),)),)
def test_factorial_preset(self, test_inputs, test_outputs):
"""Tests that double_factorial generates expected results."""
self.assert_output_is_correct(math_helpers.factorial, test_inputs,
test_outputs)
@parameterized.parameters(
(((2.0, np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)),
(((2.0, -3.0 * np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)),
(((1.0, np.pi / 2.0, np.pi / 2.0),), ((0.0, 1.0, 0.0),)),
(((1.0, 0.0, 0.0),), ((0.0, 0.0, 1.0),)),
)
def test_spherical_to_cartesian_coordinates_preset(self, test_inputs,
test_outputs):
"""Tests that spherical_to_cartesian_coordinates behaves as expected."""
self.assert_output_is_correct(
math_helpers.spherical_to_cartesian_coordinates, test_inputs,
test_outputs)
@parameterized.parameters(
((3,),),
((None, 3),),
)
def test_spherical_to_cartesian_coordinates_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
math_helpers.spherical_to_cartesian_coordinates, shape)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (1,)),)
def test_spherical_to_cartesian_coordinates_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
math_helpers.spherical_to_cartesian_coordinates, error_msg, shape)
def test_spherical_to_cartesian_coordinates_jacobian_random(self):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
r_init = np.random.uniform(0.0, 10.0, size=tensor_shape + [1])
theta_init = np.random.uniform(
-np.pi / 2.0, np.pi / 2.0, size=tensor_shape + [1])
phi_init = np.random.uniform(-np.pi, np.pi, size=tensor_shape + [1])
data_init = np.stack((r_init, theta_init, phi_init), axis=-1)
self.assert_jacobian_is_correct_fn(
math_helpers.spherical_to_cartesian_coordinates, [data_init])
@parameterized.parameters(
(((0.0, 0.0),), ((1.0, 0.0, 0.0),)),
(((1.0, 0.0),), ((1.0, np.pi, 0.0),)),
(((0.0, 1.0),), ((1.0, 0.0, 2.0 * np.pi),)),
(((1.0, 1.0),), ((1.0, np.pi, 2.0 * np.pi),)),
)
def test_square_to_spherical_coordinates_preset(self, test_inputs,
test_outputs):
"""Tests that square_to_spherical_coordinates generates expected results."""
self.assert_output_is_correct(math_helpers.square_to_spherical_coordinates,
test_inputs, test_outputs)
def test_square_to_spherical_coordinates_jacobian_random(self):
"""Tests the Jacobian of square_to_spherical_coordinates."""
epsilon = 1e-3
point_2d_init = np.random.uniform(epsilon, 1.0 - epsilon, size=(10, 2))
self.assert_jacobian_is_correct_fn(
math_helpers.square_to_spherical_coordinates, [point_2d_init],
atol=1e-3)
def test_square_to_spherical_coordinates_range_exception_raised(self):
"""Tests that the exceptions are raised correctly."""
point_2d_below = np.random.uniform(-1.0, -sys.float_info.epsilon, size=(2,))
point_2d_above = np.random.uniform(
1.0 + asserts.select_eps_for_addition(tf.float32), 2.0, size=(2,))
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
math_helpers.square_to_spherical_coordinates(point_2d_below))
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
math_helpers.square_to_spherical_coordinates(point_2d_above))
@parameterized.parameters(
((2,),),
((None, 2),),
)
def test_square_to_spherical_coordinates_shape_exception_not_raised(
self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
math_helpers.square_to_spherical_coordinates, shape)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (1,)),
("must have exactly 2 dimensions in axis -1", (3,)),
)
def test_square_to_spherical_coordinates_shape_exception_raised(
self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
math_helpers.square_to_spherical_coordinates, error_msg, shape)
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for math_helpers."""
import sys
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.math import math_helpers
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import test_case
class MathTest(test_case.TestCase):
@parameterized.parameters(
(((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)),
(((2.0, 0.0, 0.0),), ((2.0, np.pi / 2.0, 0.0),)),
(((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)),
(((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)),
(((-1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, np.pi),)),
(((0.0, -1.0, 0.0),), ((1.0, np.pi / 2.0, -np.pi / 2.0),)),
(((0.0, 0.0, -1.0),), ((1.0, np.pi, 0.0),)),
)
def test_cartesian_to_spherical_coordinates_preset(self, test_inputs,
test_outputs):
"""Tests that cartesian_to_spherical_coordinates behaves as expected."""
self.assert_output_is_correct(
math_helpers.cartesian_to_spherical_coordinates, test_inputs,
test_outputs)
@parameterized.parameters(
((3,),),
((None, 3),),
)
def test_cartesian_to_spherical_coordinates_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
math_helpers.cartesian_to_spherical_coordinates, shape)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (1,)),)
def test_cartesian_to_spherical_coordinates_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
math_helpers.cartesian_to_spherical_coordinates, error_msg, shape)
def test_cartesian_to_spherical_coordinates_jacobian_random(self):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
point_init = np.random.uniform(-10.0, 10.0, size=tensor_shape + [3])
self.assert_jacobian_is_correct_fn(
math_helpers.cartesian_to_spherical_coordinates, [point_init])
@parameterized.parameters(
(((1.0, 1.0, 1.0),),),
(((1.0, 0.0, 0.0),),),
(((0.0, 1.0, 0.0),),),
)
def test_cartesian_to_spherical_coordinates_jacobian_preset(self, cartesian):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
point_init = np.asarray(cartesian)
self.assert_jacobian_is_correct_fn(
math_helpers.cartesian_to_spherical_coordinates, [point_init])
@parameterized.parameters(
(((1.0, 1.0, 0.0),), ((np.sqrt(2.0), np.pi / 2.0, np.pi / 4.0),)),
(((1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, 0.0),)),
(((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)),
(((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)),
(((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)),
)
def test_cartesian_to_spherical_coordinates_values_preset(
self, test_inputs, test_outputs):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
self.assert_output_is_correct(
math_helpers.cartesian_to_spherical_coordinates, test_inputs,
test_outputs)
@parameterized.parameters(
(((0, 1, 5, 6, 15.0),), ((1, 1, 15, 48, 2027025.0),)),)
def test_double_factorial_preset(self, test_inputs, test_outputs):
"""Tests that double_factorial generates expected results."""
self.assert_output_is_correct(math_helpers.double_factorial, test_inputs,
test_outputs)
@parameterized.parameters(
(((0, 1, 2, 3, 4.0),), ((1, 1, 2, 6, 24.0),)),)
def test_factorial_preset(self, test_inputs, test_outputs):
"""Tests that double_factorial generates expected results."""
self.assert_output_is_correct(math_helpers.factorial, test_inputs,
test_outputs)
@parameterized.parameters(
(((2.0, np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)),
(((2.0, -3.0 * np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)),
(((1.0, np.pi / 2.0, np.pi / 2.0),), ((0.0, 1.0, 0.0),)),
(((1.0, 0.0, 0.0),), ((0.0, 0.0, 1.0),)),
)
def test_spherical_to_cartesian_coordinates_preset(self, test_inputs,
test_outputs):
"""Tests that spherical_to_cartesian_coordinates behaves as expected."""
self.assert_output_is_correct(
math_helpers.spherical_to_cartesian_coordinates, test_inputs,
test_outputs)
@parameterized.parameters(
((3,),),
((None, 3),),
)
def test_spherical_to_cartesian_coordinates_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
math_helpers.spherical_to_cartesian_coordinates, shape)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (1,)),)
def test_spherical_to_cartesian_coordinates_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
math_helpers.spherical_to_cartesian_coordinates, error_msg, shape)
def test_spherical_to_cartesian_coordinates_jacobian_random(self):
"""Test the Jacobian of the spherical_to_cartesian_coordinates function."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
r_init = np.random.uniform(0.0, 10.0, size=tensor_shape + [1])
theta_init = np.random.uniform(
-np.pi / 2.0, np.pi / 2.0, size=tensor_shape + [1])
phi_init = np.random.uniform(-np.pi, np.pi, size=tensor_shape + [1])
data_init = np.stack((r_init, theta_init, phi_init), axis=-1)
self.assert_jacobian_is_correct_fn(
math_helpers.spherical_to_cartesian_coordinates, [data_init])
@parameterized.parameters(
(((0.0, 0.0),), ((1.0, 0.0, 0.0),)),
(((1.0, 0.0),), ((1.0, np.pi, 0.0),)),
(((0.0, 1.0),), ((1.0, 0.0, 2.0 * np.pi),)),
(((1.0, 1.0),), ((1.0, np.pi, 2.0 * np.pi),)),
)
def test_square_to_spherical_coordinates_preset(self, test_inputs,
test_outputs):
"""Tests that square_to_spherical_coordinates generates expected results."""
self.assert_output_is_correct(math_helpers.square_to_spherical_coordinates,
test_inputs, test_outputs)
def test_square_to_spherical_coordinates_jacobian_random(self):
"""Tests the Jacobian of square_to_spherical_coordinates."""
epsilon = 1e-3
point_2d_init = np.random.uniform(epsilon, 1.0 - epsilon, size=(10, 2))
self.assert_jacobian_is_correct_fn(
math_helpers.square_to_spherical_coordinates, [point_2d_init],
atol=1e-3)
def test_square_to_spherical_coordinates_range_exception_raised(self):
"""Tests that the exceptions are raised correctly."""
point_2d_below = np.random.uniform(-1.0, -sys.float_info.epsilon, size=(2,))
point_2d_above = np.random.uniform(
1.0 + asserts.select_eps_for_addition(tf.float32), 2.0, size=(2,))
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
math_helpers.square_to_spherical_coordinates(point_2d_below))
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
math_helpers.square_to_spherical_coordinates(point_2d_above))
@parameterized.parameters(
((2,),),
((None, 2),),
)
def test_square_to_spherical_coordinates_shape_exception_not_raised(
self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
math_helpers.square_to_spherical_coordinates, shape)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (1,)),
("must have exactly 2 dimensions in axis -1", (3,)),
)
def test_square_to_spherical_coordinates_shape_exception_raised(
self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
math_helpers.square_to_spherical_coordinates, error_msg, shape)
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/pix3d/pix3d_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the Pix3D dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow_datasets.public_api as tfds
from tensorflow_graphics.datasets import pix3d
class Pix3dTest(tfds.testing.DatasetBuilderTestCase):
"""Test Cases for Pix3D Dataset implementation."""
DATASET_CLASS = pix3d.Pix3d
SPLITS = {
'train': 2, # Number of fake train example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = ''
EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), 'fakes')
MOCK_OUT_FORBIDDEN_OS_FUNCTIONS = False
# SKIP_CHECKSUMS = True
def setUp(self): # pylint: disable=invalid-name
super(Pix3dTest, self).setUp()
self.builder.TRAIN_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR,
'pix3d_train.npy')
self.builder.TEST_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR,
'pix3d_test.npy')
if __name__ == '__main__':
tfds.testing.test_main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the Pix3D dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow_datasets.public_api as tfds
from tensorflow_graphics.datasets import pix3d
class Pix3dTest(tfds.testing.DatasetBuilderTestCase):
"""Test Cases for Pix3D Dataset implementation."""
DATASET_CLASS = pix3d.Pix3d
SPLITS = {
'train': 2, # Number of fake train example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = ''
EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), 'fakes')
MOCK_OUT_FORBIDDEN_OS_FUNCTIONS = False
# SKIP_CHECKSUMS = True
def setUp(self): # pylint: disable=invalid-name
super(Pix3dTest, self).setUp()
self.builder.TRAIN_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR,
'pix3d_train.npy')
self.builder.TEST_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR,
'pix3d_test.npy')
if __name__ == '__main__':
tfds.testing.test_main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/camera/tests/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/neural_voxel_renderer/models.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of NVR+ keras model."""
import tensorflow.compat.v1 as tf
import tensorflow_graphics.projects.neural_voxel_renderer.layers as layer_utils
initializer = tf.keras.initializers.glorot_normal()
layers = tf.keras.layers
def unet_3x_with_res_in_mid(feat_in, out_filters, norm2d):
"""Helper function of a Unet with res blocks in the middle."""
e1 = layer_utils.residual_block_2d(feat_in,
nfilters=128,
strides=(2, 2),
normalization=norm2d) # 16x128
e2 = layer_utils.residual_block_2d(e1,
nfilters=256,
strides=(2, 2),
normalization=norm2d) # 8x256
e3 = layer_utils.residual_block_2d(e2,
nfilters=512,
strides=(2, 2),
normalization=norm2d) # 4x512
mid1 = layer_utils.residual_block_2d(e3,
nfilters=512,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
mid2 = layer_utils.residual_block_2d(mid1,
nfilters=512,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
mid3 = layer_utils.residual_block_2d(mid2,
nfilters=512,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
d0 = layer_utils.upconv(mid3,
nfilters=256,
size=4,
strides=1) # 8x256
d1 = layers.concatenate([d0, e2]) # 8x512
d2 = layers.Conv2D(256,
kernel_size=4,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(d1) # 8x256
d3 = layer_utils.upconv(d2,
nfilters=128,
size=4,
strides=1) # 16x128
d4 = layers.concatenate([d3, e1]) # 16x256
d5 = layers.Conv2D(128,
kernel_size=4,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(d4) # 8x256
d6 = layer_utils.upconv(d5,
nfilters=64,
size=4,
strides=1) # 32x64
d7 = layers.concatenate([d6, feat_in]) # 32xN
d8 = layers.Conv2D(out_filters,
kernel_size=4,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(d7) # 32xout
return d8
def neural_voxel_renderer_plus(voxels,
rerendering,
light_pos,
size=4,
norm2d='batchnorm',
norm3d='batchnorm'):
"""Neural Voxel Renderer + keras model."""
with tf.name_scope('Network/'):
voxels = layers.Input(tensor=voxels)
rerendering = layers.Input(tensor=rerendering)
light_pos = layers.Input(tensor=light_pos)
nf_2d = 512
with tf.name_scope('VoxelProcessing'):
vol0_a = layer_utils.conv_block_3d(voxels,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 64x64x64x16
vol0_b = layer_utils.conv_block_3d(vol0_a,
nfilters=16,
size=size,
strides=1,
normalization=norm3d) # 64x64x64x16
vol1_a = layer_utils.conv_block_3d(vol0_b,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 32x32x32x16
vol1_b = layer_utils.conv_block_3d(vol1_a,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
vol1_c = layer_utils.conv_block_3d(vol1_b,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
shortcut = vol1_c
vol_a1 = layer_utils.residual_block_3d(vol1_c,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a2 = layer_utils.residual_block_3d(vol_a1,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a3 = layer_utils.residual_block_3d(vol_a2,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a4 = layer_utils.residual_block_3d(vol_a3,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a5 = layer_utils.residual_block_3d(vol_a4,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
encoded_vol = layers.add([shortcut, vol_a5])
encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol)
encoded_vol = layers.Conv2D(nf_2d,
kernel_size=1,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(encoded_vol)
latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512
with tf.name_scope('ProjectionProcessing'):
shortcut = latent_projection # 32x32xnf_2d
e1 = layer_utils.residual_block_2d(latent_projection,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e2 = layer_utils.residual_block_2d(e1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e3 = layer_utils.residual_block_2d(e2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e4 = layer_utils.residual_block_2d(e3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e5 = layer_utils.residual_block_2d(e4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d
with tf.name_scope('LightProcessing'):
fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos)
light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light)
light_code = \
layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code])
light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64
with tf.name_scope('Merger'):
latent_code_final = layers.concatenate([encoded_proj, light_code])
latent_code_final = layer_utils.conv_block_2d(latent_code_final,
nfilters=nf_2d,
size=size,
strides=1,
normalization=norm3d)
shortcut = latent_code_final
m1 = layer_utils.residual_block_2d(latent_code_final,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m2 = layer_utils.residual_block_2d(m1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m3 = layer_utils.residual_block_2d(m2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m4 = layer_utils.residual_block_2d(m3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m5 = layer_utils.residual_block_2d(m4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d
with tf.name_scope('Decoder'):
d7 = layer_utils.conv_t_block_2d(latent_code_final2,
nfilters=128,
size=size,
strides=2,
normalization=norm2d) # 64x64x128
d7 = layer_utils.conv_block_2d(d7,
nfilters=128,
size=size,
strides=1,
normalization=norm2d) # 64x64x128
d8 = layer_utils.conv_t_block_2d(d7,
nfilters=64,
size=size,
strides=2,
normalization=norm2d) # 128x128x64
d8 = layer_utils.conv_block_2d(d8,
nfilters=64,
size=size,
strides=1,
normalization=norm2d) # 128x128x64
d9 = layer_utils.conv_t_block_2d(d8,
nfilters=32,
size=size,
strides=2,
normalization=norm2d) # 256x256x32
d9 = layer_utils.conv_block_2d(d9,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x256x32
rendered_image = layers.Conv2D(32,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(d9) # 256x256x3
with tf.name_scope('ImageProcessingNetwork'):
ec1 = layer_utils.conv_block_2d(rerendering,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
ec2 = layer_utils.conv_block_2d(ec1,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
with tf.name_scope('NeuralRerenderingNetwork'):
latent_img = layers.add([rendered_image, ec2])
target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d)
out0 = layer_utils.conv_block_2d(target_code,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
predicted_image = layers.Conv2D(3,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(out0) # 256x256x3
return tf.keras.Model(inputs=[voxels, rerendering, light_pos],
outputs=[predicted_image])
def neural_voxel_renderer_plus_tf2(size=4,
norm2d='batchnorm',
norm3d='batchnorm'):
"""Neural Voxel Renderer + keras model for tf2."""
with tf.name_scope('Network/'):
voxels = layers.Input(shape=[128, 128, 128, 4])
rerendering = layers.Input(shape=[256, 256, 3])
light_pos = layers.Input(shape=[3])
nf_2d = 512
with tf.name_scope('VoxelProcessing'):
vol0_a = layer_utils.conv_block_3d(voxels,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 64x64x64x16
vol0_b = layer_utils.conv_block_3d(vol0_a,
nfilters=16,
size=size,
strides=1,
normalization=norm3d) # 64x64x64x16
vol1_a = layer_utils.conv_block_3d(vol0_b,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 32x32x32x16
vol1_b = layer_utils.conv_block_3d(vol1_a,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
vol1_c = layer_utils.conv_block_3d(vol1_b,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
shortcut = vol1_c
vol_a1 = layer_utils.residual_block_3d(vol1_c,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a2 = layer_utils.residual_block_3d(vol_a1,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a3 = layer_utils.residual_block_3d(vol_a2,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a4 = layer_utils.residual_block_3d(vol_a3,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a5 = layer_utils.residual_block_3d(vol_a4,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
encoded_vol = layers.add([shortcut, vol_a5])
encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol)
encoded_vol = layers.Conv2D(nf_2d,
kernel_size=1,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(encoded_vol)
latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512
with tf.name_scope('ProjectionProcessing'):
shortcut = latent_projection # 32x32xnf_2d
e1 = layer_utils.residual_block_2d(latent_projection,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e2 = layer_utils.residual_block_2d(e1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e3 = layer_utils.residual_block_2d(e2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e4 = layer_utils.residual_block_2d(e3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e5 = layer_utils.residual_block_2d(e4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d
with tf.name_scope('LightProcessing'):
fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos)
light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light)
light_code = \
layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code])
light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64
with tf.name_scope('Merger'):
latent_code_final = layers.concatenate([encoded_proj, light_code])
latent_code_final = layer_utils.conv_block_2d(latent_code_final,
nfilters=nf_2d,
size=size,
strides=1,
normalization=norm3d)
shortcut = latent_code_final
m1 = layer_utils.residual_block_2d(latent_code_final,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m2 = layer_utils.residual_block_2d(m1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m3 = layer_utils.residual_block_2d(m2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m4 = layer_utils.residual_block_2d(m3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m5 = layer_utils.residual_block_2d(m4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d
with tf.name_scope('Decoder'):
d7 = layer_utils.conv_t_block_2d(latent_code_final2,
nfilters=128,
size=size,
strides=2,
normalization=norm2d) # 64x64x128
d7 = layer_utils.conv_block_2d(d7,
nfilters=128,
size=size,
strides=1,
normalization=norm2d) # 64x64x128
d8 = layer_utils.conv_t_block_2d(d7,
nfilters=64,
size=size,
strides=2,
normalization=norm2d) # 128x128x64
d8 = layer_utils.conv_block_2d(d8,
nfilters=64,
size=size,
strides=1,
normalization=norm2d) # 128x128x64
d9 = layer_utils.conv_t_block_2d(d8,
nfilters=32,
size=size,
strides=2,
normalization=norm2d) # 256x256x32
d9 = layer_utils.conv_block_2d(d9,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x256x32
rendered_image = layers.Conv2D(32,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(d9) # 256x256x3
with tf.name_scope('ImageProcessingNetwork'):
ec1 = layer_utils.conv_block_2d(rerendering,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
ec2 = layer_utils.conv_block_2d(ec1,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
with tf.name_scope('NeuralRerenderingNetwork'):
latent_img = layers.add([rendered_image, ec2])
target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d)
out0 = layer_utils.conv_block_2d(target_code,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
predicted_image = layers.Conv2D(3,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(out0) # 256x256x3
return tf.keras.Model(inputs=[voxels, rerendering, light_pos],
outputs=[predicted_image])
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of NVR+ keras model."""
import tensorflow.compat.v1 as tf
import tensorflow_graphics.projects.neural_voxel_renderer.layers as layer_utils
initializer = tf.keras.initializers.glorot_normal()
layers = tf.keras.layers
def unet_3x_with_res_in_mid(feat_in, out_filters, norm2d):
"""Helper function of a Unet with res blocks in the middle."""
e1 = layer_utils.residual_block_2d(feat_in,
nfilters=128,
strides=(2, 2),
normalization=norm2d) # 16x128
e2 = layer_utils.residual_block_2d(e1,
nfilters=256,
strides=(2, 2),
normalization=norm2d) # 8x256
e3 = layer_utils.residual_block_2d(e2,
nfilters=512,
strides=(2, 2),
normalization=norm2d) # 4x512
mid1 = layer_utils.residual_block_2d(e3,
nfilters=512,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
mid2 = layer_utils.residual_block_2d(mid1,
nfilters=512,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
mid3 = layer_utils.residual_block_2d(mid2,
nfilters=512,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
d0 = layer_utils.upconv(mid3,
nfilters=256,
size=4,
strides=1) # 8x256
d1 = layers.concatenate([d0, e2]) # 8x512
d2 = layers.Conv2D(256,
kernel_size=4,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(d1) # 8x256
d3 = layer_utils.upconv(d2,
nfilters=128,
size=4,
strides=1) # 16x128
d4 = layers.concatenate([d3, e1]) # 16x256
d5 = layers.Conv2D(128,
kernel_size=4,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(d4) # 8x256
d6 = layer_utils.upconv(d5,
nfilters=64,
size=4,
strides=1) # 32x64
d7 = layers.concatenate([d6, feat_in]) # 32xN
d8 = layers.Conv2D(out_filters,
kernel_size=4,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(d7) # 32xout
return d8
def neural_voxel_renderer_plus(voxels,
rerendering,
light_pos,
size=4,
norm2d='batchnorm',
norm3d='batchnorm'):
"""Neural Voxel Renderer + keras model."""
with tf.name_scope('Network/'):
voxels = layers.Input(tensor=voxels)
rerendering = layers.Input(tensor=rerendering)
light_pos = layers.Input(tensor=light_pos)
nf_2d = 512
with tf.name_scope('VoxelProcessing'):
vol0_a = layer_utils.conv_block_3d(voxels,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 64x64x64x16
vol0_b = layer_utils.conv_block_3d(vol0_a,
nfilters=16,
size=size,
strides=1,
normalization=norm3d) # 64x64x64x16
vol1_a = layer_utils.conv_block_3d(vol0_b,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 32x32x32x16
vol1_b = layer_utils.conv_block_3d(vol1_a,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
vol1_c = layer_utils.conv_block_3d(vol1_b,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
shortcut = vol1_c
vol_a1 = layer_utils.residual_block_3d(vol1_c,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a2 = layer_utils.residual_block_3d(vol_a1,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a3 = layer_utils.residual_block_3d(vol_a2,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a4 = layer_utils.residual_block_3d(vol_a3,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a5 = layer_utils.residual_block_3d(vol_a4,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
encoded_vol = layers.add([shortcut, vol_a5])
encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol)
encoded_vol = layers.Conv2D(nf_2d,
kernel_size=1,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(encoded_vol)
latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512
with tf.name_scope('ProjectionProcessing'):
shortcut = latent_projection # 32x32xnf_2d
e1 = layer_utils.residual_block_2d(latent_projection,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e2 = layer_utils.residual_block_2d(e1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e3 = layer_utils.residual_block_2d(e2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e4 = layer_utils.residual_block_2d(e3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e5 = layer_utils.residual_block_2d(e4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d
with tf.name_scope('LightProcessing'):
fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos)
light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light)
light_code = \
layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code])
light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64
with tf.name_scope('Merger'):
latent_code_final = layers.concatenate([encoded_proj, light_code])
latent_code_final = layer_utils.conv_block_2d(latent_code_final,
nfilters=nf_2d,
size=size,
strides=1,
normalization=norm3d)
shortcut = latent_code_final
m1 = layer_utils.residual_block_2d(latent_code_final,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m2 = layer_utils.residual_block_2d(m1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m3 = layer_utils.residual_block_2d(m2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m4 = layer_utils.residual_block_2d(m3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m5 = layer_utils.residual_block_2d(m4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d
with tf.name_scope('Decoder'):
d7 = layer_utils.conv_t_block_2d(latent_code_final2,
nfilters=128,
size=size,
strides=2,
normalization=norm2d) # 64x64x128
d7 = layer_utils.conv_block_2d(d7,
nfilters=128,
size=size,
strides=1,
normalization=norm2d) # 64x64x128
d8 = layer_utils.conv_t_block_2d(d7,
nfilters=64,
size=size,
strides=2,
normalization=norm2d) # 128x128x64
d8 = layer_utils.conv_block_2d(d8,
nfilters=64,
size=size,
strides=1,
normalization=norm2d) # 128x128x64
d9 = layer_utils.conv_t_block_2d(d8,
nfilters=32,
size=size,
strides=2,
normalization=norm2d) # 256x256x32
d9 = layer_utils.conv_block_2d(d9,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x256x32
rendered_image = layers.Conv2D(32,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(d9) # 256x256x3
with tf.name_scope('ImageProcessingNetwork'):
ec1 = layer_utils.conv_block_2d(rerendering,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
ec2 = layer_utils.conv_block_2d(ec1,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
with tf.name_scope('NeuralRerenderingNetwork'):
latent_img = layers.add([rendered_image, ec2])
target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d)
out0 = layer_utils.conv_block_2d(target_code,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
predicted_image = layers.Conv2D(3,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(out0) # 256x256x3
return tf.keras.Model(inputs=[voxels, rerendering, light_pos],
outputs=[predicted_image])
def neural_voxel_renderer_plus_tf2(size=4,
norm2d='batchnorm',
norm3d='batchnorm'):
"""Neural Voxel Renderer + keras model for tf2."""
with tf.name_scope('Network/'):
voxels = layers.Input(shape=[128, 128, 128, 4])
rerendering = layers.Input(shape=[256, 256, 3])
light_pos = layers.Input(shape=[3])
nf_2d = 512
with tf.name_scope('VoxelProcessing'):
vol0_a = layer_utils.conv_block_3d(voxels,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 64x64x64x16
vol0_b = layer_utils.conv_block_3d(vol0_a,
nfilters=16,
size=size,
strides=1,
normalization=norm3d) # 64x64x64x16
vol1_a = layer_utils.conv_block_3d(vol0_b,
nfilters=16,
size=size,
strides=2,
normalization=norm3d) # 32x32x32x16
vol1_b = layer_utils.conv_block_3d(vol1_a,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
vol1_c = layer_utils.conv_block_3d(vol1_b,
nfilters=32,
size=size,
strides=1,
normalization=norm3d) # 32x32x32x32
shortcut = vol1_c
vol_a1 = layer_utils.residual_block_3d(vol1_c,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a2 = layer_utils.residual_block_3d(vol_a1,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a3 = layer_utils.residual_block_3d(vol_a2,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a4 = layer_utils.residual_block_3d(vol_a3,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
vol_a5 = layer_utils.residual_block_3d(vol_a4,
32,
strides=(1, 1, 1),
normalization=norm3d) # 32x
encoded_vol = layers.add([shortcut, vol_a5])
encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol)
encoded_vol = layers.Conv2D(nf_2d,
kernel_size=1,
strides=(1, 1),
padding='same',
kernel_initializer=initializer)(encoded_vol)
latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512
with tf.name_scope('ProjectionProcessing'):
shortcut = latent_projection # 32x32xnf_2d
e1 = layer_utils.residual_block_2d(latent_projection,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e2 = layer_utils.residual_block_2d(e1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e3 = layer_utils.residual_block_2d(e2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e4 = layer_utils.residual_block_2d(e3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
e5 = layer_utils.residual_block_2d(e4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d
with tf.name_scope('LightProcessing'):
fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos)
light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light)
light_code = \
layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code])
light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64
with tf.name_scope('Merger'):
latent_code_final = layers.concatenate([encoded_proj, light_code])
latent_code_final = layer_utils.conv_block_2d(latent_code_final,
nfilters=nf_2d,
size=size,
strides=1,
normalization=norm3d)
shortcut = latent_code_final
m1 = layer_utils.residual_block_2d(latent_code_final,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m2 = layer_utils.residual_block_2d(m1,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m3 = layer_utils.residual_block_2d(m2,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m4 = layer_utils.residual_block_2d(m3,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
m5 = layer_utils.residual_block_2d(m4,
nfilters=nf_2d,
strides=(1, 1),
normalization=norm2d) # 32x32xnf_2d
latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d
with tf.name_scope('Decoder'):
d7 = layer_utils.conv_t_block_2d(latent_code_final2,
nfilters=128,
size=size,
strides=2,
normalization=norm2d) # 64x64x128
d7 = layer_utils.conv_block_2d(d7,
nfilters=128,
size=size,
strides=1,
normalization=norm2d) # 64x64x128
d8 = layer_utils.conv_t_block_2d(d7,
nfilters=64,
size=size,
strides=2,
normalization=norm2d) # 128x128x64
d8 = layer_utils.conv_block_2d(d8,
nfilters=64,
size=size,
strides=1,
normalization=norm2d) # 128x128x64
d9 = layer_utils.conv_t_block_2d(d8,
nfilters=32,
size=size,
strides=2,
normalization=norm2d) # 256x256x32
d9 = layer_utils.conv_block_2d(d9,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x256x32
rendered_image = layers.Conv2D(32,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(d9) # 256x256x3
with tf.name_scope('ImageProcessingNetwork'):
ec1 = layer_utils.conv_block_2d(rerendering,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
ec2 = layer_utils.conv_block_2d(ec1,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
with tf.name_scope('NeuralRerenderingNetwork'):
latent_img = layers.add([rendered_image, ec2])
target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d)
out0 = layer_utils.conv_block_2d(target_code,
nfilters=32,
size=size,
strides=1,
normalization=norm2d) # 256x
predicted_image = layers.Conv2D(3,
size,
strides=1,
padding='same',
kernel_initializer=initializer,
use_bias=False)(out0) # 256x256x3
return tf.keras.Model(inputs=[voxels, rerendering, light_pos],
outputs=[predicted_image])
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/reflectance/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reflectance module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.rendering.reflectance import blinn_phong
from tensorflow_graphics.rendering.reflectance import lambertian
from tensorflow_graphics.rendering.reflectance import phong
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.rendering.reflectance.
__all__ = _export_api.get_modules()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reflectance module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.rendering.reflectance import blinn_phong
from tensorflow_graphics.rendering.reflectance import lambertian
from tensorflow_graphics.rendering.reflectance import phong
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.rendering.reflectance.
__all__ = _export_api.get_modules()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/nn/metric/tests/fscore_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the fscore metric."""
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def binary_recall_function(ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
# Precision = 0.5, Recall = 0.25.
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
# Precision = 1, Recall = 1.
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
# Precision = 0, Recall = 0.
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the fscore metric."""
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def binary_recall_function(ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
# Precision = 0.5, Recall = 0.25.
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
# Precision = 1, Recall = 1.
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
# Precision = 0, Recall = 0.
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/nn/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Network module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.nn import layer
from tensorflow_graphics.nn import loss
from tensorflow_graphics.nn import metric
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.nn.
__all__ = _export_api.get_modules()
# pylint: enable=g-import-not-at-top
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Network module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.nn import layer
from tensorflow_graphics.nn import loss
from tensorflow_graphics.nn import metric
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.nn.
__all__ = _export_api.get_modules()
# pylint: enable=g-import-not-at-top
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.cc | /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "py/tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h"
#include <algorithm>
#include <cmath>
namespace {
using fixed_t = int64;
// Converts to fixed point with 16 fractional bits and 48 integer bits.
// TODO(fcole): fixed-point depth may be too shallow.
// The algorithm requires multiplying two of the xyzw clip-space coordinates
// together, summing, and then multiplying by an NDC pixel coordinate (three
// total multiplies). After three multiplications, the fractional part will be
// 48 bits, leaving 16 bits for the integer part. The NDC pixel coordinates
// are in (-1,1) so they need only 1 integer bit, so as long as the values of
// the inverse matrix are < 2^15, the fixed-point math should not overflow. This
// seems a bit dicey but so far all the tests I've tried pass.
constexpr int kFractionalBits = 16;
constexpr fixed_t ShiftPointLeft(fixed_t x) { return x << kFractionalBits; }
constexpr fixed_t ToFixedPoint(float f) {
return static_cast<fixed_t>(f * ShiftPointLeft(1));
}
// Takes the minimum of a and b, rounds down, and converts to an integer in
// the range [low, high].
inline int ClampedIntegerMin(float a, float b, int low, int high) {
const float value = std::floor(std::min(a, b));
return static_cast<int>(
std::clamp(value, static_cast<float>(low), static_cast<float>(high)));
}
// Takes the maximum of a and b, rounds up, and converts to an integer in the
// range [low, high].
inline int ClampedIntegerMax(float a, float b, int low, int high) {
const float value = std::ceil(std::max(a, b));
return static_cast<int>(
std::clamp(value, static_cast<float>(low), static_cast<float>(high)));
}
// Return true if the near plane is between the eye and the clip-space point
// with the provided z and w.
inline bool IsClipPointVisible(float z, float w) { return w > 0 && z >= -w; }
// Computes the screen-space bounding box of the given clip-space triangle and
// stores it into [left, right, bottom, top], where left and bottom limits are
// inclusive while right and top are not.
// Returns true if the bounding box includes any screen pixels.
bool ComputeTriangleBoundingBox(float v0x, float v0y, float v0z, float v0w,
float v1x, float v1y, float v1z, float v1w,
float v2x, float v2y, float v2z, float v2w,
int image_width, int image_height, int* left,
int* right, int* bottom, int* top) {
// If the triangle is entirely visible, project the vertices to pixel
// coordinates and find the triangle bounding box enlarged to the nearest
// integer and clamped to the image boundaries. If the triangle is not
// entirely visible, intersect the edges that cross the near plane with the
// near plane and use those to compute screen bounds instead.
*left = image_width;
*right = 0;
*bottom = image_height;
*top = 0;
auto add_point = [&](float x, float y, float w) {
const float px = 0.5f * (x / w + 1) * image_width;
const float py = 0.5f * (y / w + 1) * image_height;
*left = ClampedIntegerMin(*left, px, 0, image_width);
*right = ClampedIntegerMax(*right, px, 0, image_width);
*bottom = ClampedIntegerMin(*bottom, py, 0, image_height);
*top = ClampedIntegerMax(*top, py, 0, image_height);
};
auto add_near_point = [&](float x0, float y0, float z0, float w0, float x1,
float y1, float z1, float w1) {
const float denom = z0 - z1 + w0 - w1;
if (denom != 0) {
// Interpolate to near plane, where z/w == -1.
const float t = (z0 + w0) / denom;
const float x = x0 + t * (x1 - x0);
const float y = y0 + t * (y1 - y0);
const float w = w0 + t * (w1 - w0);
add_point(x, y, w);
}
};
const bool visible_v0 = IsClipPointVisible(v0z, v0w);
const bool visible_v1 = IsClipPointVisible(v1z, v1w);
const bool visible_v2 = IsClipPointVisible(v2z, v2w);
if (visible_v0) {
add_point(v0x, v0y, v0w);
if (!visible_v1) add_near_point(v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w);
if (!visible_v2) add_near_point(v0x, v0y, v0z, v0w, v2x, v2y, v2z, v2w);
}
if (visible_v1) {
add_point(v1x, v1y, v1w);
if (!visible_v2) add_near_point(v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w);
if (!visible_v0) add_near_point(v1x, v1y, v1z, v1w, v0x, v0y, v0z, v0w);
}
if (visible_v2) {
add_point(v2x, v2y, v2w);
if (!visible_v0) add_near_point(v2x, v2y, v2z, v2w, v0x, v0y, v0z, v0w);
if (!visible_v1) add_near_point(v2x, v2y, v2z, v2w, v1x, v1y, v1z, v1w);
}
const bool is_valid = (*right > *left) && (*top > *bottom);
return is_valid;
}
// Computes a 3x3 matrix inverse without dividing by the determinant.
// Instead, makes an unnormalized matrix inverse with the correct sign
// by flipping the sign of the matrix if the determinant is negative.
// By leaving out determinant division, the rows of M^-1 only depend on two out
// of three of the columns of M; i.e., the first row of M^-1 only depends on the
// second and third columns of M, the second only depends on the first and
// third, etc. This means we can compute edge functions for two neighboring
// triangles independently and produce exactly the same numerical result up to
// the sign.
// See http://mathworld.wolfram.com/MatrixInverse.html
// Culling is accomplished by inspecting the sign of the determinant as in:
// "Incremental and Hierarchical Hilbert Order Edge Equation Polygon
// Rasterization," McCool, et al., 2001
void ComputeUnnormalizedMatrixInverse(
const fixed_t a11, const fixed_t a12, const fixed_t a13, const fixed_t a21,
const fixed_t a22, const fixed_t a23, const fixed_t a31, const fixed_t a32,
const fixed_t a33, const FaceCullingMode culling_mode, fixed_t m_inv[9]) {
m_inv[0] = a22 * a33 - a32 * a23;
m_inv[1] = a13 * a32 - a33 * a12;
m_inv[2] = a12 * a23 - a22 * a13;
m_inv[3] = a23 * a31 - a33 * a21;
m_inv[4] = a11 * a33 - a31 * a13;
m_inv[5] = a13 * a21 - a23 * a11;
m_inv[6] = a21 * a32 - a31 * a22;
m_inv[7] = a12 * a31 - a32 * a11;
m_inv[8] = a11 * a22 - a21 * a12;
// If the culling mode is kBack, leave the sign of the matrix unchanged.
// Transfer the sign of the determinant if mode is kNone. If mode is kFront,
// just invert the matrix.
if (culling_mode == FaceCullingMode::kNone ||
culling_mode == FaceCullingMode::kFront) {
// The first column of the unnormalized M^-1 contains intermediate values
// for det(M).
const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6];
const float multiplier = (culling_mode == FaceCullingMode::kNone)
? std::copysign(1.0, det)
: -1.0;
for (int i = 0; i < 9; ++i) {
m_inv[i] *= multiplier;
}
}
}
// Computes the edge functions from M^-1 as described by Olano and Greer,
// "Triangle Scan Conversion using 2D Homogeneous Coordinates."
//
// This function combines equations (3) and (4). It first computes
// [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc.,
// then computes edge_i = aX + bY + c
void ComputeEdgeFunctions(const float px, const float py,
const fixed_t m_inv[9], fixed_t values[3]) {
const fixed_t px_i = ToFixedPoint(px);
const fixed_t py_i = ToFixedPoint(py);
for (int i = 0; i < 3; ++i) {
const fixed_t a = m_inv[3 * i + 0];
const fixed_t b = m_inv[3 * i + 1];
const fixed_t c = m_inv[3 * i + 2];
// Before summing, shift the point of c to align with the products of
// multiplication.
values[i] = a * px_i + b * py_i + ShiftPointLeft(c);
}
}
// Determines whether the point p lies inside a triangle. Counts pixels exactly
// on an edge as inside the triangle, as long as the triangle is not degenerate.
// Degenerate (zero-area) triangles always fail the inside test.
bool PixelIsInsideTriangle(const fixed_t edge_values[3]) {
// Check that the edge values are all non-negative and that at least one is
// positive (triangle is non-degenerate).
return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) &&
(edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0);
}
} // namespace
void RasterizeTrianglesImpl(const float* vertices, const int32* triangles,
int32 triangle_count, int32 image_width,
int32 image_height, int32 num_layers,
FaceCullingMode face_culling_mode,
int32* triangle_ids, float* z_buffer,
float* barycentric_coordinates) {
const float half_image_width = 0.5f * image_width;
const float half_image_height = 0.5f * image_height;
fixed_t unnormalized_matrix_inverse[9];
fixed_t b_over_w[3];
int left, right, bottom, top;
for (int32 triangle_id = 0; triangle_id < triangle_count; ++triangle_id) {
const int32 v0_x_id = 4 * triangles[3 * triangle_id];
const int32 v1_x_id = 4 * triangles[3 * triangle_id + 1];
const int32 v2_x_id = 4 * triangles[3 * triangle_id + 2];
const float v0x = vertices[v0_x_id];
const float v0y = vertices[v0_x_id + 1];
const float v0z = vertices[v0_x_id + 2];
const float v0w = vertices[v0_x_id + 3];
const float v1x = vertices[v1_x_id];
const float v1y = vertices[v1_x_id + 1];
const float v1z = vertices[v1_x_id + 2];
const float v1w = vertices[v1_x_id + 3];
const float v2x = vertices[v2_x_id];
const float v2y = vertices[v2_x_id + 1];
const float v2z = vertices[v2_x_id + 2];
const float v2w = vertices[v2_x_id + 3];
const bool is_valid = ComputeTriangleBoundingBox(
v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w, image_width,
image_height, &left, &right, &bottom, &top);
// Ignore triangles that do not overlap with any screen pixels.
if (!is_valid) continue;
ComputeUnnormalizedMatrixInverse(
ToFixedPoint(v0x), ToFixedPoint(v1x), ToFixedPoint(v2x),
ToFixedPoint(v0y), ToFixedPoint(v1y), ToFixedPoint(v2y),
ToFixedPoint(v0w), ToFixedPoint(v1w), ToFixedPoint(v2w),
face_culling_mode, unnormalized_matrix_inverse);
// Iterate over each pixel in the bounding box.
for (int iy = bottom; iy < top; ++iy) {
for (int ix = left; ix < right; ++ix) {
const float px = ((ix + 0.5f) / half_image_width) - 1.0f;
const float py = ((iy + 0.5f) / half_image_height) - 1.0f;
ComputeEdgeFunctions(px, py, unnormalized_matrix_inverse, b_over_w);
if (!PixelIsInsideTriangle(b_over_w)) {
continue;
}
const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2];
const float b0 = b_over_w[0] / one_over_w;
const float b1 = b_over_w[1] / one_over_w;
const float b2 = b_over_w[2] / one_over_w;
// Since we computed an unnormalized w above, we need to recompute
// a properly scaled clip-space w value and then divide clip-space z
// by that.
const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z;
const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w;
const float z = clip_z / clip_w;
// Skip the pixel if it is beyond the near or far clipping plane.
if (z < -1.0f || z > 1.0f) continue;
// Insert into appropriate depth layer with insertion sort.
float z_next = z;
int32 id_next = triangle_id;
float b0_next = b0;
float b1_next = b1;
float b2_next = b2;
const int pixel_idx0 = iy * image_width + ix;
for (int layer = 0; layer < num_layers; ++layer) {
const int pixel_idx = pixel_idx0 + image_height * image_width * layer;
if (z_next < z_buffer[pixel_idx]) {
std::swap(z_next, z_buffer[pixel_idx]);
std::swap(id_next, triangle_ids[pixel_idx]);
if (barycentric_coordinates != nullptr) {
std::swap(b0_next, barycentric_coordinates[3 * pixel_idx + 0]);
std::swap(b1_next, barycentric_coordinates[3 * pixel_idx + 1]);
std::swap(b2_next, barycentric_coordinates[3 * pixel_idx + 2]);
}
}
// Exit the loop early if the clear depth (z == 1) is reached.
if (z_next == 1) break;
}
}
}
}
}
| /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "py/tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h"
#include <algorithm>
#include <cmath>
namespace {
using fixed_t = int64;
// Converts to fixed point with 16 fractional bits and 48 integer bits.
// TODO(fcole): fixed-point depth may be too shallow.
// The algorithm requires multiplying two of the xyzw clip-space coordinates
// together, summing, and then multiplying by an NDC pixel coordinate (three
// total multiplies). After three multiplications, the fractional part will be
// 48 bits, leaving 16 bits for the integer part. The NDC pixel coordinates
// are in (-1,1) so they need only 1 integer bit, so as long as the values of
// the inverse matrix are < 2^15, the fixed-point math should not overflow. This
// seems a bit dicey but so far all the tests I've tried pass.
constexpr int kFractionalBits = 16;
constexpr fixed_t ShiftPointLeft(fixed_t x) { return x << kFractionalBits; }
constexpr fixed_t ToFixedPoint(float f) {
return static_cast<fixed_t>(f * ShiftPointLeft(1));
}
// Takes the minimum of a and b, rounds down, and converts to an integer in
// the range [low, high].
inline int ClampedIntegerMin(float a, float b, int low, int high) {
const float value = std::floor(std::min(a, b));
return static_cast<int>(
std::clamp(value, static_cast<float>(low), static_cast<float>(high)));
}
// Takes the maximum of a and b, rounds up, and converts to an integer in the
// range [low, high].
inline int ClampedIntegerMax(float a, float b, int low, int high) {
const float value = std::ceil(std::max(a, b));
return static_cast<int>(
std::clamp(value, static_cast<float>(low), static_cast<float>(high)));
}
// Return true if the near plane is between the eye and the clip-space point
// with the provided z and w.
inline bool IsClipPointVisible(float z, float w) { return w > 0 && z >= -w; }
// Computes the screen-space bounding box of the given clip-space triangle and
// stores it into [left, right, bottom, top], where left and bottom limits are
// inclusive while right and top are not.
// Returns true if the bounding box includes any screen pixels.
bool ComputeTriangleBoundingBox(float v0x, float v0y, float v0z, float v0w,
float v1x, float v1y, float v1z, float v1w,
float v2x, float v2y, float v2z, float v2w,
int image_width, int image_height, int* left,
int* right, int* bottom, int* top) {
// If the triangle is entirely visible, project the vertices to pixel
// coordinates and find the triangle bounding box enlarged to the nearest
// integer and clamped to the image boundaries. If the triangle is not
// entirely visible, intersect the edges that cross the near plane with the
// near plane and use those to compute screen bounds instead.
*left = image_width;
*right = 0;
*bottom = image_height;
*top = 0;
auto add_point = [&](float x, float y, float w) {
const float px = 0.5f * (x / w + 1) * image_width;
const float py = 0.5f * (y / w + 1) * image_height;
*left = ClampedIntegerMin(*left, px, 0, image_width);
*right = ClampedIntegerMax(*right, px, 0, image_width);
*bottom = ClampedIntegerMin(*bottom, py, 0, image_height);
*top = ClampedIntegerMax(*top, py, 0, image_height);
};
auto add_near_point = [&](float x0, float y0, float z0, float w0, float x1,
float y1, float z1, float w1) {
const float denom = z0 - z1 + w0 - w1;
if (denom != 0) {
// Interpolate to near plane, where z/w == -1.
const float t = (z0 + w0) / denom;
const float x = x0 + t * (x1 - x0);
const float y = y0 + t * (y1 - y0);
const float w = w0 + t * (w1 - w0);
add_point(x, y, w);
}
};
const bool visible_v0 = IsClipPointVisible(v0z, v0w);
const bool visible_v1 = IsClipPointVisible(v1z, v1w);
const bool visible_v2 = IsClipPointVisible(v2z, v2w);
if (visible_v0) {
add_point(v0x, v0y, v0w);
if (!visible_v1) add_near_point(v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w);
if (!visible_v2) add_near_point(v0x, v0y, v0z, v0w, v2x, v2y, v2z, v2w);
}
if (visible_v1) {
add_point(v1x, v1y, v1w);
if (!visible_v2) add_near_point(v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w);
if (!visible_v0) add_near_point(v1x, v1y, v1z, v1w, v0x, v0y, v0z, v0w);
}
if (visible_v2) {
add_point(v2x, v2y, v2w);
if (!visible_v0) add_near_point(v2x, v2y, v2z, v2w, v0x, v0y, v0z, v0w);
if (!visible_v1) add_near_point(v2x, v2y, v2z, v2w, v1x, v1y, v1z, v1w);
}
const bool is_valid = (*right > *left) && (*top > *bottom);
return is_valid;
}
// Computes a 3x3 matrix inverse without dividing by the determinant.
// Instead, makes an unnormalized matrix inverse with the correct sign
// by flipping the sign of the matrix if the determinant is negative.
// By leaving out determinant division, the rows of M^-1 only depend on two out
// of three of the columns of M; i.e., the first row of M^-1 only depends on the
// second and third columns of M, the second only depends on the first and
// third, etc. This means we can compute edge functions for two neighboring
// triangles independently and produce exactly the same numerical result up to
// the sign.
// See http://mathworld.wolfram.com/MatrixInverse.html
// Culling is accomplished by inspecting the sign of the determinant as in:
// "Incremental and Hierarchical Hilbert Order Edge Equation Polygon
// Rasterization," McCool, et al., 2001
void ComputeUnnormalizedMatrixInverse(
const fixed_t a11, const fixed_t a12, const fixed_t a13, const fixed_t a21,
const fixed_t a22, const fixed_t a23, const fixed_t a31, const fixed_t a32,
const fixed_t a33, const FaceCullingMode culling_mode, fixed_t m_inv[9]) {
m_inv[0] = a22 * a33 - a32 * a23;
m_inv[1] = a13 * a32 - a33 * a12;
m_inv[2] = a12 * a23 - a22 * a13;
m_inv[3] = a23 * a31 - a33 * a21;
m_inv[4] = a11 * a33 - a31 * a13;
m_inv[5] = a13 * a21 - a23 * a11;
m_inv[6] = a21 * a32 - a31 * a22;
m_inv[7] = a12 * a31 - a32 * a11;
m_inv[8] = a11 * a22 - a21 * a12;
// If the culling mode is kBack, leave the sign of the matrix unchanged.
// Transfer the sign of the determinant if mode is kNone. If mode is kFront,
// just invert the matrix.
if (culling_mode == FaceCullingMode::kNone ||
culling_mode == FaceCullingMode::kFront) {
// The first column of the unnormalized M^-1 contains intermediate values
// for det(M).
const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6];
const float multiplier = (culling_mode == FaceCullingMode::kNone)
? std::copysign(1.0, det)
: -1.0;
for (int i = 0; i < 9; ++i) {
m_inv[i] *= multiplier;
}
}
}
// Computes the edge functions from M^-1 as described by Olano and Greer,
// "Triangle Scan Conversion using 2D Homogeneous Coordinates."
//
// This function combines equations (3) and (4). It first computes
// [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc.,
// then computes edge_i = aX + bY + c
void ComputeEdgeFunctions(const float px, const float py,
const fixed_t m_inv[9], fixed_t values[3]) {
const fixed_t px_i = ToFixedPoint(px);
const fixed_t py_i = ToFixedPoint(py);
for (int i = 0; i < 3; ++i) {
const fixed_t a = m_inv[3 * i + 0];
const fixed_t b = m_inv[3 * i + 1];
const fixed_t c = m_inv[3 * i + 2];
// Before summing, shift the point of c to align with the products of
// multiplication.
values[i] = a * px_i + b * py_i + ShiftPointLeft(c);
}
}
// Determines whether the point p lies inside a triangle. Counts pixels exactly
// on an edge as inside the triangle, as long as the triangle is not degenerate.
// Degenerate (zero-area) triangles always fail the inside test.
bool PixelIsInsideTriangle(const fixed_t edge_values[3]) {
// Check that the edge values are all non-negative and that at least one is
// positive (triangle is non-degenerate).
return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) &&
(edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0);
}
} // namespace
void RasterizeTrianglesImpl(const float* vertices, const int32* triangles,
int32 triangle_count, int32 image_width,
int32 image_height, int32 num_layers,
FaceCullingMode face_culling_mode,
int32* triangle_ids, float* z_buffer,
float* barycentric_coordinates) {
const float half_image_width = 0.5f * image_width;
const float half_image_height = 0.5f * image_height;
fixed_t unnormalized_matrix_inverse[9];
fixed_t b_over_w[3];
int left, right, bottom, top;
for (int32 triangle_id = 0; triangle_id < triangle_count; ++triangle_id) {
const int32 v0_x_id = 4 * triangles[3 * triangle_id];
const int32 v1_x_id = 4 * triangles[3 * triangle_id + 1];
const int32 v2_x_id = 4 * triangles[3 * triangle_id + 2];
const float v0x = vertices[v0_x_id];
const float v0y = vertices[v0_x_id + 1];
const float v0z = vertices[v0_x_id + 2];
const float v0w = vertices[v0_x_id + 3];
const float v1x = vertices[v1_x_id];
const float v1y = vertices[v1_x_id + 1];
const float v1z = vertices[v1_x_id + 2];
const float v1w = vertices[v1_x_id + 3];
const float v2x = vertices[v2_x_id];
const float v2y = vertices[v2_x_id + 1];
const float v2z = vertices[v2_x_id + 2];
const float v2w = vertices[v2_x_id + 3];
const bool is_valid = ComputeTriangleBoundingBox(
v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w, image_width,
image_height, &left, &right, &bottom, &top);
// Ignore triangles that do not overlap with any screen pixels.
if (!is_valid) continue;
ComputeUnnormalizedMatrixInverse(
ToFixedPoint(v0x), ToFixedPoint(v1x), ToFixedPoint(v2x),
ToFixedPoint(v0y), ToFixedPoint(v1y), ToFixedPoint(v2y),
ToFixedPoint(v0w), ToFixedPoint(v1w), ToFixedPoint(v2w),
face_culling_mode, unnormalized_matrix_inverse);
// Iterate over each pixel in the bounding box.
for (int iy = bottom; iy < top; ++iy) {
for (int ix = left; ix < right; ++ix) {
const float px = ((ix + 0.5f) / half_image_width) - 1.0f;
const float py = ((iy + 0.5f) / half_image_height) - 1.0f;
ComputeEdgeFunctions(px, py, unnormalized_matrix_inverse, b_over_w);
if (!PixelIsInsideTriangle(b_over_w)) {
continue;
}
const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2];
const float b0 = b_over_w[0] / one_over_w;
const float b1 = b_over_w[1] / one_over_w;
const float b2 = b_over_w[2] / one_over_w;
// Since we computed an unnormalized w above, we need to recompute
// a properly scaled clip-space w value and then divide clip-space z
// by that.
const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z;
const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w;
const float z = clip_z / clip_w;
// Skip the pixel if it is beyond the near or far clipping plane.
if (z < -1.0f || z > 1.0f) continue;
// Insert into appropriate depth layer with insertion sort.
float z_next = z;
int32 id_next = triangle_id;
float b0_next = b0;
float b1_next = b1;
float b2_next = b2;
const int pixel_idx0 = iy * image_width + ix;
for (int layer = 0; layer < num_layers; ++layer) {
const int pixel_idx = pixel_idx0 + image_height * image_width * layer;
if (z_next < z_buffer[pixel_idx]) {
std::swap(z_next, z_buffer[pixel_idx]);
std::swap(id_next, triangle_ids[pixel_idx]);
if (barycentric_coordinates != nullptr) {
std::swap(b0_next, barycentric_coordinates[3 * pixel_idx + 0]);
std::swap(b1_next, barycentric_coordinates[3 * pixel_idx + 1]);
std::swap(b2_next, barycentric_coordinates[3 * pixel_idx + 2]);
}
}
// Exit the loop early if the clear depth (z == 1) is reached.
if (z_next == 1) break;
}
}
}
}
}
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/convolution/tests/graph_convolution_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for graph convolution ops."""
import itertools
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_graphics.geometry.convolution.graph_convolution as gc
from tensorflow_graphics.util import test_case
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)
def _dummy_data(batch_size, num_vertices, num_channels):
"""Create inputs for feature_steered_convolution."""
if batch_size > 0:
data = np.zeros(
shape=(batch_size, num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(
np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1)))
else:
data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32))
return data, neighbors
def _dummy_variables(in_channels, out_channels, num_weight_matrices):
"""Create variable substitutes for feature_steered_convolution."""
var_u = tf.zeros(shape=(in_channels, num_weight_matrices))
var_v = tf.zeros(shape=(in_channels, num_weight_matrices))
var_c = tf.zeros(shape=(num_weight_matrices))
var_w = tf.zeros(shape=(num_weight_matrices, in_channels, out_channels))
var_b = tf.zeros(shape=(out_channels))
return var_u, var_v, var_c, var_w, var_b
def _random_data(batch_size,
num_vertices,
num_channels,
padding,
only_self_edges,
data_type=np.float32,
neighbors_type=np.float32,
sizes_type=np.int32):
"""Create random inputs for feature_steered_convolution."""
def _random_data_2d(padding):
size = num_vertices if not padding else np.random.randint(
low=1, high=num_vertices + 1)
data = np.random.uniform(size=(size, num_channels)).astype(data_type)
if only_self_edges:
neighbors = np.eye(size, dtype=neighbors_type)
else:
random = np.random.uniform(size=(size, size)).astype(neighbors_type)
neighbors = np.maximum(
np.where(random > 0.75, np.ones_like(random), np.zeros_like(random)),
np.eye(size, dtype=neighbors_type))
neighbors = neighbors / np.sum(neighbors, axis=1, keepdims=True)
if padding:
data = np.pad(data, ((0, num_vertices - size), (0, 0)), "constant")
neighbors = np.pad(neighbors,
((0, num_vertices - size), (0, num_vertices - size)),
"constant")
return data, neighbors, size
else:
return data, neighbors
if batch_size > 0:
list_2d = [_random_data_2d(padding=padding) for _ in range(batch_size)]
data = np.stack([i[0] for i in list_2d], 0).astype(data_type)
neighbors = np.stack([i[1] for i in list_2d], 0).astype(neighbors_type)
if padding:
sizes = np.stack([i[2] for i in list_2d], 0).astype(sizes_type)
return data, _dense_to_sparse(neighbors), sizes
else:
return data, _dense_to_sparse(neighbors)
else:
if padding:
raise ValueError("Padding only allowed with batched data.")
data, neighbors = _random_data_2d(padding=False)
return data.astype(data_type), _dense_to_sparse(
neighbors.astype(neighbors_type))
def _random_variables(in_channels,
out_channels,
num_weight_matrices,
dtype=np.float32):
"""Create random variables for feature_steered_convolution."""
def _random_constant(shape, dtype):
return tf.constant(np.random.uniform(size=shape).astype(dtype))
var_u = _random_constant([in_channels, num_weight_matrices], dtype)
var_v = _random_constant([in_channels, num_weight_matrices], dtype)
var_c = _random_constant([num_weight_matrices], dtype)
var_w = _random_constant([num_weight_matrices, in_channels, out_channels],
dtype)
var_b = _random_constant([out_channels], dtype)
return var_u, var_v, var_c, var_w, var_b
class GraphConvolutionTestFeatureSteeredConvolutionTests(test_case.TestCase):
@parameterized.parameters(
("'sizes' must have an integer type.", np.float32, np.float32, np.float32,
np.float32),
("'data' must have a float type.", np.int32, np.float32, np.int32,
np.float32),
("'neighbors' and 'data' must have the same type.", np.float32,
np.float64, np.int32, np.float32),
)
def test_feature_steered_convolution_exception_raised_types(
self, err_msg, data_type, neighbors_type, sizes_type, var_type):
"""Check the type errors for invalid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
u, v, c, w, b = _random_variables(3, 3, 1, var_type)
with self.assertRaisesRegexp(TypeError, err_msg):
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
@parameterized.parameters(
(np.float32, np.float32, np.int32, np.float32),
(np.float64, np.float64, np.int32, np.float64),
(np.float32, np.float32, np.int64, np.float32),
(np.float64, np.float64, np.int64, np.float64),
)
def test_feature_steered_convolution_exception_not_raised_types(
self, data_type, neighbors_type, sizes_type, var_type):
"""Check there are no exceptions for valid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
u, v, c, w, b = _random_variables(3, 3, 1, var_type)
try:
gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
def test_feature_steered_convolution_exception_raised_shapes(self):
"""Check that invalid input shapes trigger the right exceptions."""
with self.assertRaisesRegexp(ValueError, "must have a rank of 2"):
data, neighbors = _dummy_data(1, 5, 2)
u, v, c, w, b = _dummy_variables(2, 2, 1)
data = data[0, :]
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"):
u, v, c, w, b = _dummy_variables(2, 2, 1)
data = np.ones(shape=(5), dtype=np.float32)
neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32))
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
with self.assertRaisesRegexp(ValueError,
"Not all batch dimensions are identical."):
data, neighbors = _dummy_data(1, 5, 2)
u, v, c, w, b = _dummy_variables(2, 2, 1)
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=(1, 1),
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
@parameterized.parameters(
(1, 1, 1, 1, 1),
(4, 2, 3, 6, 5),
(0, 1, 1, 1, 1),
(0, 2, 3, 6, 5),
)
def test_feature_steered_convolution_output_shape(self, batch_size,
num_vertices, in_channels,
out_channels,
num_weight_matrices):
"""Check that the output of convolution has the correct shape."""
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
u, v, c, w, b = _dummy_variables(in_channels, out_channels,
num_weight_matrices)
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
y_shape = y.shape.as_list()
self.assertEqual(y_shape[-1], out_channels)
self.assertAllEqual(y_shape[:-1], data.shape[:-1])
@parameterized.parameters(
(1, 1, 1, 1, 1),
(4, 2, 3, 6, 5),
(0, 1, 1, 1, 1),
(0, 2, 3, 6, 5),
)
def test_feature_steered_convolution_only_self_edges(self, batch_size,
num_vertices,
in_channels,
out_channels,
num_weight_matrices):
"""Test convolution when the graph only has self edges."""
data, neighbors = _random_data(
batch_size,
num_vertices,
in_channels,
padding=False,
only_self_edges=True)
u, v, c, w, b = _random_variables(in_channels, out_channels,
num_weight_matrices)
with self.subTest(name="w=0_expect_output=b"):
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=tf.zeros_like(w),
var_b=b)
y_expected = tf.broadcast_to(b, y.shape)
self.assertAllEqual(y, y_expected)
with self.subTest(name="translation_invariant_self_edges"):
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=-u,
var_c=c,
var_w=w,
var_b=b)
q = tf.reshape(
tf.exp(c) / tf.reduce_sum(input_tensor=tf.exp(c)),
(num_weight_matrices, 1, 1))
if batch_size > 0:
q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0, keepdims=True)
q_times_w = tf.tile(q_times_w, (batch_size, 1, 1))
else:
q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0)
y_expected = tf.matmul(data, q_times_w) + tf.broadcast_to(b, y.shape)
self.assertAllClose(y, y_expected)
with self.subTest(name="constant_signal"):
if batch_size > 0:
constant_data = np.tile(
np.random.uniform(size=(batch_size, 1,
in_channels)).astype(np.float32),
(1, num_vertices, 1))
else:
constant_data = np.tile(
np.random.uniform(size=(1, in_channels)).astype(np.float32),
(num_vertices, 1))
y = gc.feature_steered_convolution(
data=constant_data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
if batch_size > 0:
y_expected = tf.tile(y[:, :1, :], (1, num_vertices, 1))
else:
y_expected = tf.tile(y[:1, :], (num_vertices, 1))
self.assertAllClose(y, y_expected)
@parameterized.parameters(
(((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5,),),
((1.3,),), (-0.7,), (((0.8,),),), (3.0,), ((4.6,), (4.6,), (4.6,))),
(((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5, 0.2),),
((0.3, 0.4),), (-0.7, 0.15), (((0.8,),), ((1.1,),)), (3.0,),
((5.011706928844621,), (4.971030281984818,), (4.927388658982911,))),
)
def test_feature_steered_convolution_padding_preset(self, data, neighbors, u,
v, c, w, b, expected):
"""Test expected result for preset data and filter values."""
array = (np.array(i) for i in (data, neighbors, expected))
data, neighbors, expected = array
tensors = (tf.convert_to_tensor(value=np.array(i).astype(data.dtype)) \
for i in (u, v, c, w, b))
u, v, c, w, b = tensors
y = gc.feature_steered_convolution(
data=data,
neighbors=_dense_to_sparse(neighbors),
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
self.assertAllClose(y, expected)
@parameterized.parameters(
(1, 5, 1, 1, 1),
(2, 6, 3, 6, 5),
(5, 15, 6, 12, 8),
)
def test_feature_steered_convolution_padding_random(self, batch_size,
num_vertices, in_channels,
out_channels,
num_weight_matrices):
"""Test mixed topology batches (random vertices and neighbors)."""
data, neighbors, sizes = _random_data(
batch_size,
num_vertices,
in_channels,
padding=True,
only_self_edges=False)
u, v, c, w, b = _random_variables(in_channels, out_channels,
num_weight_matrices)
with self.subTest(name="if_w_is_0_then_y_is_b"):
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=tf.zeros_like(w),
var_b=b)
for k in range(batch_size):
y_crop = y[k, :sizes[k], :]
y_expected = tf.broadcast_to(b, y_crop.shape)
self.assertAllEqual(y_crop, y_expected)
# Check for zeros in the padded region.
self.assertAllEqual(y[k, sizes[k]:, :],
tf.zeros((num_vertices - sizes[k], out_channels)))
with self.subTest(name="convolve_with_constant"):
constant_data = data
for k in range(batch_size):
constant_data[k, :sizes[k], :] = np.tile(data[k, 0, :], (sizes[k], 1))
y = gc.feature_steered_convolution(
data=constant_data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
for k in range(batch_size):
y_crop = y[k, :sizes[k], :]
y_const = tf.broadcast_to(y_crop[0, :], y_crop.shape)
self.assertAllClose(y_crop, y_const)
# Check for zeros in the padded region.
self.assertAllEqual(y[k, sizes[k]:, :],
tf.zeros([num_vertices - sizes[k], out_channels]))
@parameterized.parameters(
(1, 10, 3, 1, True),
(3, 6, 1, 4, True),
(0, 10, 5, 2, False),
(1, 10, 3, 1, False),
(3, 6, 1, 4, False),
(0, 10, 5, 2, False),
)
def test_feature_steered_convolution_jacobian_random(self, batch_size,
num_vertices,
in_channels,
num_weight_matrices,
padding):
"""Test the jacobian for random input data."""
random_data = _random_data(
batch_size,
num_vertices,
in_channels,
padding,
only_self_edges=False,
data_type=np.float64,
neighbors_type=np.float64)
data_init = random_data[0]
neighbors = random_data[1]
sizes = None if not padding else random_data[2]
u, v, c, w, b = _random_variables(
in_channels, in_channels, num_weight_matrices, dtype=np.float64)
def feature_steered_convolution(data):
return gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init])
@parameterized.parameters(
(1, 1, 0.0),
(5, 1, 0.0),
(1, 3, 0.0),
(5, 3, 0.0),
(1, 1, 1.0),
(5, 1, 1.0),
(1, 3, 1.0),
(5, 3, 1.0),
)
def test_feature_steered_convolution_jacobian_preset(self, num_vertices,
num_channels,
data_multiplier):
"""Test the jacobian is correct for preset inputs."""
# Corner cases include one vertex, one channel, and all-zero features.
data_init = data_multiplier * np.random.uniform(
size=(num_vertices, num_channels)).astype(np.float64)
neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64)
u, v, c, w, b = _random_variables(
num_channels, num_channels, 1, dtype=np.float64)
def feature_steered_convolution(data):
return gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init])
class EdgeConvolutionTemplateTests(test_case.TestCase):
def _zeros(self, vertex_features, _, out_dimensions=None):
"""A callable for `edge_convolution_template`."""
if out_dimensions is None:
return tf.zeros_like(vertex_features)
else:
return tf.zeros(
shape=(vertex_features.shape.as_list()[0], out_dimensions),
dtype=vertex_features.dtype)
def _pass_through(self, _, neighbor_features):
"""A callable for `edge_convolution_template`."""
return neighbor_features
def _circular_2d_data(self, num_vertices, include_normals=False):
"""Create data for a circle graph."""
# Vertices are points distributed uniformly on a circle, with each point
# connected to its closest neighbor on either side.
theta = np.linspace(0.0, np.pi * 2.0, num=num_vertices, endpoint=False)
data = np.stack((np.cos(theta), np.sin(theta)), axis=-1)
if include_normals:
data = np.concatenate((data, data), axis=-1)
eye = np.eye(num_vertices)
neighbors = np.maximum(np.roll(eye, 1, axis=1), np.roll(eye, -1,
axis=1)) * 0.5
return data, _dense_to_sparse(neighbors)
def _edge_curvature_2d(self, vertex_features, neighbor_features):
"""A callable for `edge_convolution_template` that computes curvature."""
x_position, x_normal = tf.split(
value=vertex_features, num_or_size_splits=2, axis=-1)
y_position, y_normal = tf.split(
value=neighbor_features, num_or_size_splits=2, axis=-1)
yx_diff = x_position - y_position
curvature_unscaled = tf.abs(
tf.reduce_sum(
input_tensor=(y_normal - x_normal) * yx_diff,
axis=-1,
keepdims=True))
edge_length_squared = tf.reduce_sum(
input_tensor=yx_diff * yx_diff, axis=-1, keepdims=True)
return tf.where(
tf.less(edge_length_squared, 1e-7), tf.zeros_like(edge_length_squared),
curvature_unscaled / edge_length_squared)
@parameterized.parameters(
("'sizes' must have an integer type.", np.float32, np.float32,
np.float32),
("'data' must have a float type.", np.int32, np.float32, np.int32),
("'neighbors' and 'data' must have the same type.", np.float32,
np.float64, np.int32),
)
def test_edge_convolution_template_exception_raised_types(
self, err_msg, data_type, neighbors_type, sizes_type):
"""Check the type errors for invalid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
with self.assertRaisesRegexp(TypeError, err_msg):
gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=sizes,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
@parameterized.parameters(
(np.float32, np.float32, np.int32),
(np.float64, np.float64, np.int32),
(np.float32, np.float32, np.int64),
(np.float64, np.float64, np.int64),
(np.float64, np.float64, np.int8),
(np.float64, np.float64, np.uint8),
(np.float64, np.float64, np.int16),
(np.float64, np.float64, np.uint16),
)
def test_edge_convolution_template_exception_not_raised_types(
self, data_type, neighbors_type, sizes_type):
"""Check there are no exceptions for valid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
try:
gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=sizes,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
def test_edge_convolution_template_exception_raised_shapes(self):
"""Check that invalid input shapes trigger the right exceptions."""
with self.assertRaisesRegexp(ValueError, "must have a rank of 2"):
data, neighbors = _dummy_data(1, 5, 2)
data = data[0, :]
_ = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"):
data = np.ones(shape=(5), dtype=np.float32)
neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32))
_ = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
with self.assertRaisesRegexp(ValueError, "must have a rank of 1"):
data, neighbors = _dummy_data(1, 5, 2)
_ = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=((1, 1), (1, 1)),
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
@parameterized.parameters("", "invalid")
def test_edge_convolution_template_exception_raised_reduction(
self, reduction):
"""Check that an invalid reduction method triggers the exception."""
with self.assertRaisesRegexp(ValueError, "reduction method"):
data, neighbors = _dummy_data(1, 5, 2)
gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._zeros,
reduction=reduction,
edge_function_kwargs=dict())
@parameterized.parameters(
(1, 1, 1, 1, "weighted"),
(4, 2, 3, 6, "weighted"),
(0, 1, 1, 1, "max"),
(0, 2, 3, 6, "max"),
)
def test_edge_convolution_template_output_shape(self, batch_size,
num_vertices, in_channels,
out_channels, reduction):
"""Check that the output of convolution has the correct shape."""
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
y = gc.edge_convolution_template(
data,
neighbors,
None,
self._zeros,
reduction=reduction,
edge_function_kwargs={"out_dimensions": out_channels})
y_shape = y.shape.as_list()
with self.subTest(name="out_channels"):
self.assertEqual(y_shape[-1], out_channels)
with self.subTest(name="shape"):
self.assertAllEqual(y_shape[:-1], data.shape[:-1])
def test_edge_convolution_template_zero_neighbors(self):
"""Check that vertices with no neighbors map to zeros in the output."""
# We can reuse `self._edge_curvature_2d` as the curvature functional.
num_vertices = 500
data, neighbors = self._circular_2d_data(num_vertices, include_normals=True)
# Interleave the data with rows filled with random data, these rows will
# have no neighbors in the adjacency matrix so should map to all zeros in
# the output.
rows_odd = tf.expand_dims(
tf.range(start=1, limit=(2 * num_vertices), delta=2), -1)
rows_even = tf.expand_dims(
tf.range(start=0, limit=(2 * num_vertices + 1), delta=2), -1)
data_interleaved = tf.scatter_nd(
indices=rows_odd,
updates=data,
shape=(2 * num_vertices + 1, tf.shape(input=data)[-1]))
random_data = tf.random.uniform(
shape=(data.shape[0] + 1, data.shape[-1]), dtype=data.dtype)
random_interleaved = tf.scatter_nd(
indices=rows_even,
updates=random_data,
shape=(2 * num_vertices + 1, tf.shape(input=data)[-1]))
data_interleaved = data_interleaved + random_interleaved
neighbors_interleaved_indices = neighbors.indices * 2 + 1
neighbors_interleaved = tf.SparseTensor(
indices=neighbors_interleaved_indices,
values=neighbors.values,
dense_shape=(2 * num_vertices + 1, 2 * num_vertices + 1))
# Convolve the interleaved data.
data_curvature = gc.edge_convolution_template(
data=data_interleaved,
neighbors=neighbors_interleaved,
sizes=None,
edge_function=self._edge_curvature_2d,
reduction="weighted",
edge_function_kwargs=dict())
self.assertEqual(data_curvature.shape, (2 * num_vertices + 1, 1))
# The rows corresponding to the original input data measure the curvature.
# The curvature at any point on a circle of radius 1 should be 1.
# The interleaved rows of random data should map to zeros in the output.
self.assertAllClose(data_curvature[1::2, :],
np.ones(shape=(num_vertices, 1)))
self.assertAllClose(data_curvature[::2, :],
np.zeros(shape=(num_vertices + 1, 1)))
@parameterized.parameters(
(1, 10, 3, True, "weighted"),
(3, 6, 1, True, "weighted"),
(0, 10, 5, False, "weighted"),
(1, 10, 3, False, "max"),
(3, 6, 1, False, "max"),
(0, 10, 5, False, "max"),
)
def test_edge_convolution_template_jacobian_random(self, batch_size,
num_vertices, in_channels,
padding, reduction):
"""Test the jacobian for random input data."""
random_data = _random_data(
batch_size,
num_vertices,
in_channels,
padding,
only_self_edges=False,
data_type=np.float64,
neighbors_type=np.float64)
data_init = random_data[0]
neighbors = random_data[1]
sizes = None if not padding else random_data[2]
def edge_convolution_template(data):
return gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=sizes,
edge_function=self._pass_through,
reduction=reduction,
edge_function_kwargs=dict())
self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init])
def test_edge_convolution_template_preset_max(self):
data = np.array(((1, 2), (3, 4), (5, 6), (7, 8)), np.float32)
neighbors = np.array(
((0, 1, 0, 1), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 1, 1)), np.float32)
neighbors = _dense_to_sparse(neighbors)
true = np.array(((8, 10), (8, 10), (10, 12), (14, 16)), np.float32)
with self.subTest("max_sum"):
max_sum = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=lambda x, y: x + y,
reduction="max",
edge_function_kwargs=dict())
self.assertAllEqual(max_sum, true)
with self.subTest("max_sum_scaled"):
# Max reduction ignores the weights, so scaling the neighbors weights
# should not change the result.
max_sum_scaled = gc.edge_convolution_template(
data=data,
neighbors=neighbors * 10.0,
sizes=None,
edge_function=lambda x, y: x + y,
reduction="max",
edge_function_kwargs=dict())
self.assertAllEqual(max_sum_scaled, true)
@parameterized.parameters(
itertools.product((1, 5), (1, 3), (0.0, 1.0), ("weighted", "max")))
def test_edge_convolution_template_jacobian_preset(self, num_vertices,
num_channels,
data_multiplier,
reduction):
"""Test the jacobian is correct for preset inputs."""
# Corner cases include one vertex, one channel, and all-zero features.
data_init = data_multiplier * np.random.uniform(
size=(num_vertices, num_channels)).astype(np.float64)
neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64)
def edge_convolution_template(data):
return gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._pass_through,
reduction=reduction,
edge_function_kwargs=dict())
self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init])
def test_edge_convolution_template_laplacian_smoothing(self):
r"""Test the expected result with laplacian smoothing.
Laplacian smoothing for meshes is defined as
$$y_i = \frac{1}{|\mathcal{N(i)}|} \sum_{j \in \mathcal{N(i)}} x_j$$
This can be computed using `edge_convolution_template` with `f(x, y)->y`.
"""
# We can reuse `self._pass_through(x, y)->y` as the smoothing functional.
with self.subTest(name="only_self_edges_random"):
num_vertices = 500
data = np.random.uniform(size=(num_vertices, 5))
neighbors = tf.sparse.eye(num_vertices, dtype=tf.as_dtype(data.dtype))
data_smoothed = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._pass_through,
reduction="weighted",
edge_function_kwargs=dict())
self.assertAllEqual(data, data_smoothed)
with self.subTest(name="circular_2d"):
num_vertices = 500
data, neighbors = self._circular_2d_data(num_vertices)
data_smoothed = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._pass_through,
reduction="weighted",
edge_function_kwargs=dict())
# The smoothed points should have the same direction as the originals.
data_smoothed_normalized = tf.nn.l2_normalize(data_smoothed, axis=-1)
self.assertAllClose(data, data_smoothed_normalized)
def test_edge_convolution_template_curvature(self):
r"""Test the expected result with curvature.
(Approximate) curvature for meshes is defined as
$$\kappa_{v_i} = \frac{1}{|\mathcal{N}(v_i)|}
\sum_{v_j \in \mathcal{N}(v_i)}
\frac{(\vec{v_i} - \vec{v_j})^T (\vec{n_{v_i}} -
\vec{n_{v_j}})} {\left|\vec{v_i}-\vec{v_j}\right|^2}
$$
This can be computed using `edge_convolution_template` with
$$f(x, y) = (n_x - n_y)^T (x - y) / ||x - y||^2.$$
where $$n_x$$ and $$n_y$$ are the normals at points $$x$$ and $$y$$
respectively.
"""
# We can reuse `self._edge_curvature_2d` as the curvature functional.
num_vertices = 500
data, neighbors = self._circular_2d_data(num_vertices, include_normals=True)
data_curvature = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._edge_curvature_2d,
reduction="weighted",
edge_function_kwargs=dict())
# The curvature at each point on a circle of radius 1 should be 1.
self.assertAllClose(data_curvature, np.ones(shape=(num_vertices, 1)))
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for graph convolution ops."""
import itertools
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_graphics.geometry.convolution.graph_convolution as gc
from tensorflow_graphics.util import test_case
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)
def _dummy_data(batch_size, num_vertices, num_channels):
"""Create inputs for feature_steered_convolution."""
if batch_size > 0:
data = np.zeros(
shape=(batch_size, num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(
np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1)))
else:
data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32)
neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32))
return data, neighbors
def _dummy_variables(in_channels, out_channels, num_weight_matrices):
"""Create variable substitutes for feature_steered_convolution."""
var_u = tf.zeros(shape=(in_channels, num_weight_matrices))
var_v = tf.zeros(shape=(in_channels, num_weight_matrices))
var_c = tf.zeros(shape=(num_weight_matrices))
var_w = tf.zeros(shape=(num_weight_matrices, in_channels, out_channels))
var_b = tf.zeros(shape=(out_channels))
return var_u, var_v, var_c, var_w, var_b
def _random_data(batch_size,
num_vertices,
num_channels,
padding,
only_self_edges,
data_type=np.float32,
neighbors_type=np.float32,
sizes_type=np.int32):
"""Create random inputs for feature_steered_convolution."""
def _random_data_2d(padding):
size = num_vertices if not padding else np.random.randint(
low=1, high=num_vertices + 1)
data = np.random.uniform(size=(size, num_channels)).astype(data_type)
if only_self_edges:
neighbors = np.eye(size, dtype=neighbors_type)
else:
random = np.random.uniform(size=(size, size)).astype(neighbors_type)
neighbors = np.maximum(
np.where(random > 0.75, np.ones_like(random), np.zeros_like(random)),
np.eye(size, dtype=neighbors_type))
neighbors = neighbors / np.sum(neighbors, axis=1, keepdims=True)
if padding:
data = np.pad(data, ((0, num_vertices - size), (0, 0)), "constant")
neighbors = np.pad(neighbors,
((0, num_vertices - size), (0, num_vertices - size)),
"constant")
return data, neighbors, size
else:
return data, neighbors
if batch_size > 0:
list_2d = [_random_data_2d(padding=padding) for _ in range(batch_size)]
data = np.stack([i[0] for i in list_2d], 0).astype(data_type)
neighbors = np.stack([i[1] for i in list_2d], 0).astype(neighbors_type)
if padding:
sizes = np.stack([i[2] for i in list_2d], 0).astype(sizes_type)
return data, _dense_to_sparse(neighbors), sizes
else:
return data, _dense_to_sparse(neighbors)
else:
if padding:
raise ValueError("Padding only allowed with batched data.")
data, neighbors = _random_data_2d(padding=False)
return data.astype(data_type), _dense_to_sparse(
neighbors.astype(neighbors_type))
def _random_variables(in_channels,
out_channels,
num_weight_matrices,
dtype=np.float32):
"""Create random variables for feature_steered_convolution."""
def _random_constant(shape, dtype):
return tf.constant(np.random.uniform(size=shape).astype(dtype))
var_u = _random_constant([in_channels, num_weight_matrices], dtype)
var_v = _random_constant([in_channels, num_weight_matrices], dtype)
var_c = _random_constant([num_weight_matrices], dtype)
var_w = _random_constant([num_weight_matrices, in_channels, out_channels],
dtype)
var_b = _random_constant([out_channels], dtype)
return var_u, var_v, var_c, var_w, var_b
class GraphConvolutionTestFeatureSteeredConvolutionTests(test_case.TestCase):
@parameterized.parameters(
("'sizes' must have an integer type.", np.float32, np.float32, np.float32,
np.float32),
("'data' must have a float type.", np.int32, np.float32, np.int32,
np.float32),
("'neighbors' and 'data' must have the same type.", np.float32,
np.float64, np.int32, np.float32),
)
def test_feature_steered_convolution_exception_raised_types(
self, err_msg, data_type, neighbors_type, sizes_type, var_type):
"""Check the type errors for invalid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
u, v, c, w, b = _random_variables(3, 3, 1, var_type)
with self.assertRaisesRegexp(TypeError, err_msg):
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
@parameterized.parameters(
(np.float32, np.float32, np.int32, np.float32),
(np.float64, np.float64, np.int32, np.float64),
(np.float32, np.float32, np.int64, np.float32),
(np.float64, np.float64, np.int64, np.float64),
)
def test_feature_steered_convolution_exception_not_raised_types(
self, data_type, neighbors_type, sizes_type, var_type):
"""Check there are no exceptions for valid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
u, v, c, w, b = _random_variables(3, 3, 1, var_type)
try:
gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
def test_feature_steered_convolution_exception_raised_shapes(self):
"""Check that invalid input shapes trigger the right exceptions."""
with self.assertRaisesRegexp(ValueError, "must have a rank of 2"):
data, neighbors = _dummy_data(1, 5, 2)
u, v, c, w, b = _dummy_variables(2, 2, 1)
data = data[0, :]
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"):
u, v, c, w, b = _dummy_variables(2, 2, 1)
data = np.ones(shape=(5), dtype=np.float32)
neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32))
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
with self.assertRaisesRegexp(ValueError,
"Not all batch dimensions are identical."):
data, neighbors = _dummy_data(1, 5, 2)
u, v, c, w, b = _dummy_variables(2, 2, 1)
_ = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=(1, 1),
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
@parameterized.parameters(
(1, 1, 1, 1, 1),
(4, 2, 3, 6, 5),
(0, 1, 1, 1, 1),
(0, 2, 3, 6, 5),
)
def test_feature_steered_convolution_output_shape(self, batch_size,
num_vertices, in_channels,
out_channels,
num_weight_matrices):
"""Check that the output of convolution has the correct shape."""
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
u, v, c, w, b = _dummy_variables(in_channels, out_channels,
num_weight_matrices)
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
y_shape = y.shape.as_list()
self.assertEqual(y_shape[-1], out_channels)
self.assertAllEqual(y_shape[:-1], data.shape[:-1])
@parameterized.parameters(
(1, 1, 1, 1, 1),
(4, 2, 3, 6, 5),
(0, 1, 1, 1, 1),
(0, 2, 3, 6, 5),
)
def test_feature_steered_convolution_only_self_edges(self, batch_size,
num_vertices,
in_channels,
out_channels,
num_weight_matrices):
"""Test convolution when the graph only has self edges."""
data, neighbors = _random_data(
batch_size,
num_vertices,
in_channels,
padding=False,
only_self_edges=True)
u, v, c, w, b = _random_variables(in_channels, out_channels,
num_weight_matrices)
with self.subTest(name="w=0_expect_output=b"):
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=tf.zeros_like(w),
var_b=b)
y_expected = tf.broadcast_to(b, y.shape)
self.assertAllEqual(y, y_expected)
with self.subTest(name="translation_invariant_self_edges"):
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=-u,
var_c=c,
var_w=w,
var_b=b)
q = tf.reshape(
tf.exp(c) / tf.reduce_sum(input_tensor=tf.exp(c)),
(num_weight_matrices, 1, 1))
if batch_size > 0:
q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0, keepdims=True)
q_times_w = tf.tile(q_times_w, (batch_size, 1, 1))
else:
q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0)
y_expected = tf.matmul(data, q_times_w) + tf.broadcast_to(b, y.shape)
self.assertAllClose(y, y_expected)
with self.subTest(name="constant_signal"):
if batch_size > 0:
constant_data = np.tile(
np.random.uniform(size=(batch_size, 1,
in_channels)).astype(np.float32),
(1, num_vertices, 1))
else:
constant_data = np.tile(
np.random.uniform(size=(1, in_channels)).astype(np.float32),
(num_vertices, 1))
y = gc.feature_steered_convolution(
data=constant_data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
if batch_size > 0:
y_expected = tf.tile(y[:, :1, :], (1, num_vertices, 1))
else:
y_expected = tf.tile(y[:1, :], (num_vertices, 1))
self.assertAllClose(y, y_expected)
@parameterized.parameters(
(((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5,),),
((1.3,),), (-0.7,), (((0.8,),),), (3.0,), ((4.6,), (4.6,), (4.6,))),
(((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5, 0.2),),
((0.3, 0.4),), (-0.7, 0.15), (((0.8,),), ((1.1,),)), (3.0,),
((5.011706928844621,), (4.971030281984818,), (4.927388658982911,))),
)
def test_feature_steered_convolution_padding_preset(self, data, neighbors, u,
v, c, w, b, expected):
"""Test expected result for preset data and filter values."""
array = (np.array(i) for i in (data, neighbors, expected))
data, neighbors, expected = array
tensors = (tf.convert_to_tensor(value=np.array(i).astype(data.dtype)) \
for i in (u, v, c, w, b))
u, v, c, w, b = tensors
y = gc.feature_steered_convolution(
data=data,
neighbors=_dense_to_sparse(neighbors),
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
self.assertAllClose(y, expected)
@parameterized.parameters(
(1, 5, 1, 1, 1),
(2, 6, 3, 6, 5),
(5, 15, 6, 12, 8),
)
def test_feature_steered_convolution_padding_random(self, batch_size,
num_vertices, in_channels,
out_channels,
num_weight_matrices):
"""Test mixed topology batches (random vertices and neighbors)."""
data, neighbors, sizes = _random_data(
batch_size,
num_vertices,
in_channels,
padding=True,
only_self_edges=False)
u, v, c, w, b = _random_variables(in_channels, out_channels,
num_weight_matrices)
with self.subTest(name="if_w_is_0_then_y_is_b"):
y = gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=tf.zeros_like(w),
var_b=b)
for k in range(batch_size):
y_crop = y[k, :sizes[k], :]
y_expected = tf.broadcast_to(b, y_crop.shape)
self.assertAllEqual(y_crop, y_expected)
# Check for zeros in the padded region.
self.assertAllEqual(y[k, sizes[k]:, :],
tf.zeros((num_vertices - sizes[k], out_channels)))
with self.subTest(name="convolve_with_constant"):
constant_data = data
for k in range(batch_size):
constant_data[k, :sizes[k], :] = np.tile(data[k, 0, :], (sizes[k], 1))
y = gc.feature_steered_convolution(
data=constant_data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
for k in range(batch_size):
y_crop = y[k, :sizes[k], :]
y_const = tf.broadcast_to(y_crop[0, :], y_crop.shape)
self.assertAllClose(y_crop, y_const)
# Check for zeros in the padded region.
self.assertAllEqual(y[k, sizes[k]:, :],
tf.zeros([num_vertices - sizes[k], out_channels]))
@parameterized.parameters(
(1, 10, 3, 1, True),
(3, 6, 1, 4, True),
(0, 10, 5, 2, False),
(1, 10, 3, 1, False),
(3, 6, 1, 4, False),
(0, 10, 5, 2, False),
)
def test_feature_steered_convolution_jacobian_random(self, batch_size,
num_vertices,
in_channels,
num_weight_matrices,
padding):
"""Test the jacobian for random input data."""
random_data = _random_data(
batch_size,
num_vertices,
in_channels,
padding,
only_self_edges=False,
data_type=np.float64,
neighbors_type=np.float64)
data_init = random_data[0]
neighbors = random_data[1]
sizes = None if not padding else random_data[2]
u, v, c, w, b = _random_variables(
in_channels, in_channels, num_weight_matrices, dtype=np.float64)
def feature_steered_convolution(data):
return gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init])
@parameterized.parameters(
(1, 1, 0.0),
(5, 1, 0.0),
(1, 3, 0.0),
(5, 3, 0.0),
(1, 1, 1.0),
(5, 1, 1.0),
(1, 3, 1.0),
(5, 3, 1.0),
)
def test_feature_steered_convolution_jacobian_preset(self, num_vertices,
num_channels,
data_multiplier):
"""Test the jacobian is correct for preset inputs."""
# Corner cases include one vertex, one channel, and all-zero features.
data_init = data_multiplier * np.random.uniform(
size=(num_vertices, num_channels)).astype(np.float64)
neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64)
u, v, c, w, b = _random_variables(
num_channels, num_channels, 1, dtype=np.float64)
def feature_steered_convolution(data):
return gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=None,
var_u=u,
var_v=v,
var_c=c,
var_w=w,
var_b=b)
self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init])
class EdgeConvolutionTemplateTests(test_case.TestCase):
def _zeros(self, vertex_features, _, out_dimensions=None):
"""A callable for `edge_convolution_template`."""
if out_dimensions is None:
return tf.zeros_like(vertex_features)
else:
return tf.zeros(
shape=(vertex_features.shape.as_list()[0], out_dimensions),
dtype=vertex_features.dtype)
def _pass_through(self, _, neighbor_features):
"""A callable for `edge_convolution_template`."""
return neighbor_features
def _circular_2d_data(self, num_vertices, include_normals=False):
"""Create data for a circle graph."""
# Vertices are points distributed uniformly on a circle, with each point
# connected to its closest neighbor on either side.
theta = np.linspace(0.0, np.pi * 2.0, num=num_vertices, endpoint=False)
data = np.stack((np.cos(theta), np.sin(theta)), axis=-1)
if include_normals:
data = np.concatenate((data, data), axis=-1)
eye = np.eye(num_vertices)
neighbors = np.maximum(np.roll(eye, 1, axis=1), np.roll(eye, -1,
axis=1)) * 0.5
return data, _dense_to_sparse(neighbors)
def _edge_curvature_2d(self, vertex_features, neighbor_features):
"""A callable for `edge_convolution_template` that computes curvature."""
x_position, x_normal = tf.split(
value=vertex_features, num_or_size_splits=2, axis=-1)
y_position, y_normal = tf.split(
value=neighbor_features, num_or_size_splits=2, axis=-1)
yx_diff = x_position - y_position
curvature_unscaled = tf.abs(
tf.reduce_sum(
input_tensor=(y_normal - x_normal) * yx_diff,
axis=-1,
keepdims=True))
edge_length_squared = tf.reduce_sum(
input_tensor=yx_diff * yx_diff, axis=-1, keepdims=True)
return tf.where(
tf.less(edge_length_squared, 1e-7), tf.zeros_like(edge_length_squared),
curvature_unscaled / edge_length_squared)
@parameterized.parameters(
("'sizes' must have an integer type.", np.float32, np.float32,
np.float32),
("'data' must have a float type.", np.int32, np.float32, np.int32),
("'neighbors' and 'data' must have the same type.", np.float32,
np.float64, np.int32),
)
def test_edge_convolution_template_exception_raised_types(
self, err_msg, data_type, neighbors_type, sizes_type):
"""Check the type errors for invalid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
with self.assertRaisesRegexp(TypeError, err_msg):
gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=sizes,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
@parameterized.parameters(
(np.float32, np.float32, np.int32),
(np.float64, np.float64, np.int32),
(np.float32, np.float32, np.int64),
(np.float64, np.float64, np.int64),
(np.float64, np.float64, np.int8),
(np.float64, np.float64, np.uint8),
(np.float64, np.float64, np.int16),
(np.float64, np.float64, np.uint16),
)
def test_edge_convolution_template_exception_not_raised_types(
self, data_type, neighbors_type, sizes_type):
"""Check there are no exceptions for valid input types."""
data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,
neighbors_type, sizes_type)
try:
gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=sizes,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
def test_edge_convolution_template_exception_raised_shapes(self):
"""Check that invalid input shapes trigger the right exceptions."""
with self.assertRaisesRegexp(ValueError, "must have a rank of 2"):
data, neighbors = _dummy_data(1, 5, 2)
data = data[0, :]
_ = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"):
data = np.ones(shape=(5), dtype=np.float32)
neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32))
_ = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
with self.assertRaisesRegexp(ValueError, "must have a rank of 1"):
data, neighbors = _dummy_data(1, 5, 2)
_ = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=((1, 1), (1, 1)),
edge_function=self._zeros,
reduction="weighted",
edge_function_kwargs=dict())
@parameterized.parameters("", "invalid")
def test_edge_convolution_template_exception_raised_reduction(
self, reduction):
"""Check that an invalid reduction method triggers the exception."""
with self.assertRaisesRegexp(ValueError, "reduction method"):
data, neighbors = _dummy_data(1, 5, 2)
gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._zeros,
reduction=reduction,
edge_function_kwargs=dict())
@parameterized.parameters(
(1, 1, 1, 1, "weighted"),
(4, 2, 3, 6, "weighted"),
(0, 1, 1, 1, "max"),
(0, 2, 3, 6, "max"),
)
def test_edge_convolution_template_output_shape(self, batch_size,
num_vertices, in_channels,
out_channels, reduction):
"""Check that the output of convolution has the correct shape."""
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
y = gc.edge_convolution_template(
data,
neighbors,
None,
self._zeros,
reduction=reduction,
edge_function_kwargs={"out_dimensions": out_channels})
y_shape = y.shape.as_list()
with self.subTest(name="out_channels"):
self.assertEqual(y_shape[-1], out_channels)
with self.subTest(name="shape"):
self.assertAllEqual(y_shape[:-1], data.shape[:-1])
def test_edge_convolution_template_zero_neighbors(self):
"""Check that vertices with no neighbors map to zeros in the output."""
# We can reuse `self._edge_curvature_2d` as the curvature functional.
num_vertices = 500
data, neighbors = self._circular_2d_data(num_vertices, include_normals=True)
# Interleave the data with rows filled with random data, these rows will
# have no neighbors in the adjacency matrix so should map to all zeros in
# the output.
rows_odd = tf.expand_dims(
tf.range(start=1, limit=(2 * num_vertices), delta=2), -1)
rows_even = tf.expand_dims(
tf.range(start=0, limit=(2 * num_vertices + 1), delta=2), -1)
data_interleaved = tf.scatter_nd(
indices=rows_odd,
updates=data,
shape=(2 * num_vertices + 1, tf.shape(input=data)[-1]))
random_data = tf.random.uniform(
shape=(data.shape[0] + 1, data.shape[-1]), dtype=data.dtype)
random_interleaved = tf.scatter_nd(
indices=rows_even,
updates=random_data,
shape=(2 * num_vertices + 1, tf.shape(input=data)[-1]))
data_interleaved = data_interleaved + random_interleaved
neighbors_interleaved_indices = neighbors.indices * 2 + 1
neighbors_interleaved = tf.SparseTensor(
indices=neighbors_interleaved_indices,
values=neighbors.values,
dense_shape=(2 * num_vertices + 1, 2 * num_vertices + 1))
# Convolve the interleaved data.
data_curvature = gc.edge_convolution_template(
data=data_interleaved,
neighbors=neighbors_interleaved,
sizes=None,
edge_function=self._edge_curvature_2d,
reduction="weighted",
edge_function_kwargs=dict())
self.assertEqual(data_curvature.shape, (2 * num_vertices + 1, 1))
# The rows corresponding to the original input data measure the curvature.
# The curvature at any point on a circle of radius 1 should be 1.
# The interleaved rows of random data should map to zeros in the output.
self.assertAllClose(data_curvature[1::2, :],
np.ones(shape=(num_vertices, 1)))
self.assertAllClose(data_curvature[::2, :],
np.zeros(shape=(num_vertices + 1, 1)))
@parameterized.parameters(
(1, 10, 3, True, "weighted"),
(3, 6, 1, True, "weighted"),
(0, 10, 5, False, "weighted"),
(1, 10, 3, False, "max"),
(3, 6, 1, False, "max"),
(0, 10, 5, False, "max"),
)
def test_edge_convolution_template_jacobian_random(self, batch_size,
num_vertices, in_channels,
padding, reduction):
"""Test the jacobian for random input data."""
random_data = _random_data(
batch_size,
num_vertices,
in_channels,
padding,
only_self_edges=False,
data_type=np.float64,
neighbors_type=np.float64)
data_init = random_data[0]
neighbors = random_data[1]
sizes = None if not padding else random_data[2]
def edge_convolution_template(data):
return gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=sizes,
edge_function=self._pass_through,
reduction=reduction,
edge_function_kwargs=dict())
self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init])
def test_edge_convolution_template_preset_max(self):
data = np.array(((1, 2), (3, 4), (5, 6), (7, 8)), np.float32)
neighbors = np.array(
((0, 1, 0, 1), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 1, 1)), np.float32)
neighbors = _dense_to_sparse(neighbors)
true = np.array(((8, 10), (8, 10), (10, 12), (14, 16)), np.float32)
with self.subTest("max_sum"):
max_sum = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=lambda x, y: x + y,
reduction="max",
edge_function_kwargs=dict())
self.assertAllEqual(max_sum, true)
with self.subTest("max_sum_scaled"):
# Max reduction ignores the weights, so scaling the neighbors weights
# should not change the result.
max_sum_scaled = gc.edge_convolution_template(
data=data,
neighbors=neighbors * 10.0,
sizes=None,
edge_function=lambda x, y: x + y,
reduction="max",
edge_function_kwargs=dict())
self.assertAllEqual(max_sum_scaled, true)
@parameterized.parameters(
itertools.product((1, 5), (1, 3), (0.0, 1.0), ("weighted", "max")))
def test_edge_convolution_template_jacobian_preset(self, num_vertices,
num_channels,
data_multiplier,
reduction):
"""Test the jacobian is correct for preset inputs."""
# Corner cases include one vertex, one channel, and all-zero features.
data_init = data_multiplier * np.random.uniform(
size=(num_vertices, num_channels)).astype(np.float64)
neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64)
def edge_convolution_template(data):
return gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._pass_through,
reduction=reduction,
edge_function_kwargs=dict())
self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init])
def test_edge_convolution_template_laplacian_smoothing(self):
r"""Test the expected result with laplacian smoothing.
Laplacian smoothing for meshes is defined as
$$y_i = \frac{1}{|\mathcal{N(i)}|} \sum_{j \in \mathcal{N(i)}} x_j$$
This can be computed using `edge_convolution_template` with `f(x, y)->y`.
"""
# We can reuse `self._pass_through(x, y)->y` as the smoothing functional.
with self.subTest(name="only_self_edges_random"):
num_vertices = 500
data = np.random.uniform(size=(num_vertices, 5))
neighbors = tf.sparse.eye(num_vertices, dtype=tf.as_dtype(data.dtype))
data_smoothed = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._pass_through,
reduction="weighted",
edge_function_kwargs=dict())
self.assertAllEqual(data, data_smoothed)
with self.subTest(name="circular_2d"):
num_vertices = 500
data, neighbors = self._circular_2d_data(num_vertices)
data_smoothed = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._pass_through,
reduction="weighted",
edge_function_kwargs=dict())
# The smoothed points should have the same direction as the originals.
data_smoothed_normalized = tf.nn.l2_normalize(data_smoothed, axis=-1)
self.assertAllClose(data, data_smoothed_normalized)
def test_edge_convolution_template_curvature(self):
r"""Test the expected result with curvature.
(Approximate) curvature for meshes is defined as
$$\kappa_{v_i} = \frac{1}{|\mathcal{N}(v_i)|}
\sum_{v_j \in \mathcal{N}(v_i)}
\frac{(\vec{v_i} - \vec{v_j})^T (\vec{n_{v_i}} -
\vec{n_{v_j}})} {\left|\vec{v_i}-\vec{v_j}\right|^2}
$$
This can be computed using `edge_convolution_template` with
$$f(x, y) = (n_x - n_y)^T (x - y) / ||x - y||^2.$$
where $$n_x$$ and $$n_y$$ are the normals at points $$x$$ and $$y$$
respectively.
"""
# We can reuse `self._edge_curvature_2d` as the curvature functional.
num_vertices = 500
data, neighbors = self._circular_2d_data(num_vertices, include_normals=True)
data_curvature = gc.edge_convolution_template(
data=data,
neighbors=neighbors,
sizes=None,
edge_function=self._edge_curvature_2d,
reduction="weighted",
edge_function_kwargs=dict())
# The curvature at each point on a circle of radius 1 should be 1.
self.assertAllClose(data_curvature, np.ones(shape=(num_vertices, 1)))
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/util/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import test_case
from tensorflow_graphics.util import tfg_flags
# pylint: enable=g-import-not-at-top
# The util modules are not exported.
__all__ = []
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import test_case
from tensorflow_graphics.util import tfg_flags
# pylint: enable=g-import-not-at-top
# The util modules are not exported.
__all__ = []
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/transformation/quaternion.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements TensorFlow quaternion utility functions.
A quaternion is written as $$q = xi + yj + zk + w$$, where $$i,j,k$$ forms the
three bases of the imaginary part. The functions implemented in this file
use the Hamilton convention where $$i^2 = j^2 = k^2 = ijk = -1$$. A quaternion
is stored in a 4-D vector $$[x, y, z, w]^T$$.
More details about Hamiltonian quaternions can be found on [this page.]
(https://en.wikipedia.org/wiki/Quaternion)
Note: Some of the functions expect normalized quaternions as inputs where
$$x^2 + y^2 + z^2 + w^2 = 1$$.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
def _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles):
"""Builds a quaternion from sines and cosines of half Euler angles.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
sin_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last
dimension represents the sine of half Euler angles.
cos_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last
dimension represents the cosine of half Euler angles.
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a quaternion.
"""
c1, c2, c3 = tf.unstack(cos_half_angles, axis=-1)
s1, s2, s3 = tf.unstack(sin_half_angles, axis=-1)
w = c1 * c2 * c3 + s1 * s2 * s3
x = -c1 * s2 * s3 + s1 * c2 * c3
y = c1 * s2 * c3 + s1 * c2 * s3
z = -s1 * s2 * c3 + c1 * c2 * s3
return tf.stack((x, y, z, w), axis=-1)
def between_two_vectors_3d(vector1,
vector2,
name="quaternion_between_two_vectors_3d"):
"""Computes quaternion over the shortest arc between two vectors.
Result quaternion describes shortest geodesic rotation from
vector1 to vector2.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vector1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vector.
vector2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vector.
name: A name for this op that defaults to
"quaternion_between_two_vectors_3d".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `vector1` or `vector2` is not supported.
"""
with tf.name_scope(name):
vector1 = tf.convert_to_tensor(value=vector1)
vector2 = tf.convert_to_tensor(value=vector2)
shape.check_static(
tensor=vector1, tensor_name="vector1", has_dim_equals=(-1, 3))
shape.check_static(
tensor=vector2, tensor_name="vector2", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(vector1, vector2), last_axes=-2, broadcast_compatible=True)
# Make sure that we are dealing with unit vectors.
vector1 = tf.nn.l2_normalize(vector1, axis=-1)
vector2 = tf.nn.l2_normalize(vector2, axis=-1)
cos_theta = vector.dot(vector1, vector2)
real_part = 1.0 + cos_theta
axis = vector.cross(vector1, vector2)
# Compute arbitrary antiparallel axes to rotate around in case of opposite
# vectors.
x, y, z = tf.split(vector1, (1, 1, 1), axis=-1)
x_bigger_z = tf.abs(x) > tf.abs(z)
x_bigger_z = tf.concat([x_bigger_z] * 3, axis=-1)
antiparallel_axis = tf.where(x_bigger_z,
tf.concat((-y, x, tf.zeros_like(z)), axis=-1),
tf.concat((tf.zeros_like(x), -z, y), axis=-1))
# Compute rotation between two vectors.
is_antiparallel = real_part < 1e-6
is_antiparallel = tf.concat([is_antiparallel] * 4, axis=-1)
rot = tf.where(
is_antiparallel,
tf.concat((antiparallel_axis, tf.zeros_like(real_part)), axis=-1),
tf.concat((axis, real_part), axis=-1))
return tf.nn.l2_normalize(rot, axis=-1)
def conjugate(quaternion, name="quaternion_conjugate"):
"""Computes the conjugate of a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_conjugate".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
quaternion = asserts.assert_normalized(quaternion)
xyz, w = tf.split(quaternion, (3, 1), axis=-1)
return tf.concat((-xyz, w), axis=-1)
def from_axis_angle(axis, angle, name="quaternion_from_axis_angle"):
"""Converts an axis-angle representation to a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents an angle.
name: A name for this op that defaults to "quaternion_from_axis_angle".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `axis` or `angle` is not supported.
"""
with tf.name_scope(name):
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(axis, angle), last_axes=-2, broadcast_compatible=True)
axis = asserts.assert_normalized(axis)
half_angle = 0.5 * angle
w = tf.cos(half_angle)
xyz = tf.sin(half_angle) * axis
return tf.concat((xyz, w), axis=-1)
def from_euler(angles, name="quaternion_from_euler"):
"""Converts an Euler angle representation to a quaternion.
Note:
Uses the z-y-x rotation convention (Tait-Bryan angles).
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three Euler angles. `[..., 0]` is the angle about `x` in
radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is
the angle about `z` in radians.
name: A name for this op that defaults to "quaternion_from_euler".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `angles` is not supported.
"""
with tf.name_scope(name):
angles = tf.convert_to_tensor(value=angles)
shape.check_static(
tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3))
half_angles = angles / 2.0
cos_half_angles = tf.cos(half_angles)
sin_half_angles = tf.sin(half_angles)
return _build_quaternion_from_sines_and_cosines(sin_half_angles,
cos_half_angles)
def from_euler_with_small_angles_approximation(angles,
name="quaternion_from_euler"):
r"""Converts small Euler angles to quaternions.
Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be
approximated by their second order Taylor expansions, where
$$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$.
In the current implementation, the smallness of the angles is not verified.
Note:
Uses the z-y-x rotation convention (Tait-Bryan angles).
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three Euler angles. `[..., 0]` is the angle about `x` in
radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is the
angle about `z` in radians.
name: A name for this op that defaults to "quaternion_from_euler".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `angles` is not supported.
"""
with tf.name_scope(name):
angles = tf.convert_to_tensor(value=angles)
shape.check_static(
tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3))
half_angles = angles / 2.0
cos_half_angles = 1.0 - 0.5 * half_angles * half_angles
sin_half_angles = half_angles
quaternion = _build_quaternion_from_sines_and_cosines(
sin_half_angles, cos_half_angles)
# We need to normalize the quaternion due to the small angle approximation.
return tf.nn.l2_normalize(quaternion, axis=-1)
def from_rotation_matrix(rotation_matrix,
name="quaternion_from_rotation_matrix"):
"""Converts a rotation matrix representation to a quaternion.
Warning:
This function is not smooth everywhere.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two
dimensions represent a rotation matrix.
name: A name for this op that defaults to "quaternion_from_rotation_matrix".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `rotation_matrix` is not supported.
"""
with tf.name_scope(name):
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_rank_greater_than=1,
has_dim_equals=((-1, 3), (-2, 3)))
rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized(
rotation_matrix)
trace = tf.linalg.trace(rotation_matrix)
eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype)
rows = tf.unstack(rotation_matrix, axis=-2)
entries = [tf.unstack(row, axis=-1) for row in rows]
def tr_positive():
sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_1():
sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qx.
qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qx = 0.25 * sq
qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_2():
sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qy.
qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qy = 0.25 * sq
qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_3():
sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] +
eps_addition) * 2. # sq = 4 * qz.
qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
qz = 0.25 * sq
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_idx(cond):
cond = tf.expand_dims(cond, -1)
cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4])
return cond
where_2 = tf.where(
cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3())
where_1 = tf.where(
cond_idx((entries[0][0] > entries[1][1])
& (entries[0][0] > entries[2][2])), cond_1(), where_2)
quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1)
return quat
def inverse(quaternion, name="quaternion_inverse"):
"""Computes the inverse of a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_inverse".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
quaternion = asserts.assert_normalized(quaternion)
squared_norm = tf.reduce_sum(
input_tensor=tf.square(quaternion), axis=-1, keepdims=True)
return safe_ops.safe_unsigned_div(conjugate(quaternion), squared_norm)
def is_normalized(quaternion, atol=1e-3, name="quaternion_is_normalized"):
"""Determines if quaternion is normalized quaternion or not.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
atol: The absolute tolerance parameter.
name: A name for this op that defaults to "quaternion_is_normalized".
Returns:
A tensor of type `bool` and shape `[A1, ..., An, 1]`, where False indicates
that the quaternion is not normalized.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
norms = tf.norm(tensor=quaternion, axis=-1, keepdims=True)
return tf.where(
tf.abs(norms - 1.) < atol, tf.ones_like(norms, dtype=bool),
tf.zeros_like(norms, dtype=bool))
def normalize(quaternion, eps=1e-12, name="quaternion_normalize"):
"""Normalizes a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
eps: A lower bound value for the norm that defaults to 1e-12.
name: A name for this op that defaults to "quaternion_normalize".
Returns:
A N-D tensor of shape `[?, ..., ?, 1]` where the quaternion elements have
been normalized.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
return tf.math.l2_normalize(quaternion, axis=-1, epsilon=eps)
def multiply(quaternion1, quaternion2, name="quaternion_multiply"):
"""Multiplies two quaternions.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
name: A name for this op that defaults to "quaternion_multiply".
Returns:
A tensor of shape `[A1, ..., An, 4]` representing quaternions.
Raises:
ValueError: If the shape of `quaternion1` or `quaternion2` is not supported.
"""
with tf.name_scope(name):
quaternion1 = tf.convert_to_tensor(value=quaternion1)
quaternion2 = tf.convert_to_tensor(value=quaternion2)
shape.check_static(
tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4))
shape.check_static(
tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4))
x1, y1, z1, w1 = tf.unstack(quaternion1, axis=-1)
x2, y2, z2, w2 = tf.unstack(quaternion2, axis=-1)
x = x1 * w2 + y1 * z2 - z1 * y2 + w1 * x2
y = -x1 * z2 + y1 * w2 + z1 * x2 + w1 * y2
z = x1 * y2 - y1 * x2 + z1 * w2 + w1 * z2
w = -x1 * x2 - y1 * y2 - z1 * z2 + w1 * w2
return tf.stack((x, y, z, w), axis=-1)
def normalized_random_uniform(quaternion_shape,
name="quaternion_normalized_random_uniform"):
"""Random normalized quaternion following a uniform distribution law on SO(3).
Args:
quaternion_shape: A list representing the shape of the output tensor.
name: A name for this op that defaults to
"quaternion_normalized_random_uniform".
Returns:
A tensor of shape `[quaternion_shape[0],...,quaternion_shape[-1], 4]`
representing random normalized quaternions.
"""
with tf.name_scope(name):
quaternion_shape = tf.convert_to_tensor(
value=quaternion_shape, dtype=tf.int32)
quaternion_shape = tf.concat((quaternion_shape, tf.constant([4])), axis=0)
random_normal = tf.random.normal(quaternion_shape)
return normalize(random_normal)
def normalized_random_uniform_initializer():
"""Random unit quaternion initializer."""
# Since variable initializers must take `shape` as input, we cannot prevent
# a clash between util.shape and the argument here. Therefore we have to
# disable redefined-outer-name for this function.
# pylint: disable=redefined-outer-name
def _initializer(shape, dtype=tf.float32, partition_info=None):
"""Generate a random normalized quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
shape: A list representing the shape of the output. The last entry of the
list must be `4`.
dtype: type of the output (tf.float32 is the only type supported).
partition_info: how the variable is partitioned (not used).
Returns:
A tensor of shape `[A1, ..., An, 4]` representing normalized quaternions.
Raises:
ValueError: If `shape` or `dtype` are not supported.
"""
del partition_info # unused
if dtype != tf.float32:
raise ValueError("'dtype' must be tf.float32.")
if shape[-1] != 4:
raise ValueError("Last dimension of 'shape' must be 4.")
return normalized_random_uniform(shape[:-1])
return _initializer
# pylint: enable=redefined-outer-name
def rotate(point, quaternion, name="quaternion_rotate"):
"""Rotates a point using a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a 3d point.
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_rotate".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a
3d point.
Raises:
ValueError: If the shape of `point` or `quaternion` is not supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=point, tensor_name="point", has_dim_equals=(-1, 3))
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
shape.compare_batch_dimensions(
tensors=(point, quaternion), last_axes=-2, broadcast_compatible=True)
quaternion = asserts.assert_normalized(quaternion)
padding = [[0, 0] for _ in range(point.shape.ndims)]
padding[-1][-1] = 1
point = tf.pad(tensor=point, paddings=padding, mode="CONSTANT")
point = multiply(quaternion, point)
point = multiply(point, conjugate(quaternion))
xyz, _ = tf.split(point, (3, 1), axis=-1)
return xyz
def relative_angle(quaternion1, quaternion2, name="quaternion_relative_angle"):
r"""Computes the unsigned relative rotation angle between 2 unit quaternions.
Given two normalized quanternions $$\mathbf{q}_1$$ and $$\mathbf{q}_2$$, the
relative angle is computed as
$$\theta = 2\arccos(\mathbf{q}_1^T\mathbf{q}_2)$$.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_relative_angle".
Returns:
A tensor of shape `[A1, ..., An, 1]` where the last dimension represents
rotation angles in the range [0.0, pi].
Raises:
ValueError: If the shape of `quaternion1` or `quaternion2` is not supported.
"""
with tf.name_scope(name):
quaternion1 = tf.convert_to_tensor(value=quaternion1)
quaternion2 = tf.convert_to_tensor(value=quaternion2)
shape.check_static(
tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4))
shape.check_static(
tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4))
quaternion1 = asserts.assert_normalized(quaternion1)
quaternion2 = asserts.assert_normalized(quaternion2)
dot_product = vector.dot(quaternion1, quaternion2, keepdims=False)
# Ensure dot product is in range [-1. 1].
eps_dot_prod = 4.0 * asserts.select_eps_for_addition(dot_product.dtype)
dot_product = safe_ops.safe_shrink(
dot_product, -1.0, 1.0, False, eps=eps_dot_prod)
return 2.0 * tf.acos(tf.abs(dot_product))
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements TensorFlow quaternion utility functions.
A quaternion is written as $$q = xi + yj + zk + w$$, where $$i,j,k$$ forms the
three bases of the imaginary part. The functions implemented in this file
use the Hamilton convention where $$i^2 = j^2 = k^2 = ijk = -1$$. A quaternion
is stored in a 4-D vector $$[x, y, z, w]^T$$.
More details about Hamiltonian quaternions can be found on [this page.]
(https://en.wikipedia.org/wiki/Quaternion)
Note: Some of the functions expect normalized quaternions as inputs where
$$x^2 + y^2 + z^2 + w^2 = 1$$.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
def _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles):
"""Builds a quaternion from sines and cosines of half Euler angles.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
sin_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last
dimension represents the sine of half Euler angles.
cos_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last
dimension represents the cosine of half Euler angles.
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a quaternion.
"""
c1, c2, c3 = tf.unstack(cos_half_angles, axis=-1)
s1, s2, s3 = tf.unstack(sin_half_angles, axis=-1)
w = c1 * c2 * c3 + s1 * s2 * s3
x = -c1 * s2 * s3 + s1 * c2 * c3
y = c1 * s2 * c3 + s1 * c2 * s3
z = -s1 * s2 * c3 + c1 * c2 * s3
return tf.stack((x, y, z, w), axis=-1)
def between_two_vectors_3d(vector1,
vector2,
name="quaternion_between_two_vectors_3d"):
"""Computes quaternion over the shortest arc between two vectors.
Result quaternion describes shortest geodesic rotation from
vector1 to vector2.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vector1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vector.
vector2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vector.
name: A name for this op that defaults to
"quaternion_between_two_vectors_3d".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `vector1` or `vector2` is not supported.
"""
with tf.name_scope(name):
vector1 = tf.convert_to_tensor(value=vector1)
vector2 = tf.convert_to_tensor(value=vector2)
shape.check_static(
tensor=vector1, tensor_name="vector1", has_dim_equals=(-1, 3))
shape.check_static(
tensor=vector2, tensor_name="vector2", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(vector1, vector2), last_axes=-2, broadcast_compatible=True)
# Make sure that we are dealing with unit vectors.
vector1 = tf.nn.l2_normalize(vector1, axis=-1)
vector2 = tf.nn.l2_normalize(vector2, axis=-1)
cos_theta = vector.dot(vector1, vector2)
real_part = 1.0 + cos_theta
axis = vector.cross(vector1, vector2)
# Compute arbitrary antiparallel axes to rotate around in case of opposite
# vectors.
x, y, z = tf.split(vector1, (1, 1, 1), axis=-1)
x_bigger_z = tf.abs(x) > tf.abs(z)
x_bigger_z = tf.concat([x_bigger_z] * 3, axis=-1)
antiparallel_axis = tf.where(x_bigger_z,
tf.concat((-y, x, tf.zeros_like(z)), axis=-1),
tf.concat((tf.zeros_like(x), -z, y), axis=-1))
# Compute rotation between two vectors.
is_antiparallel = real_part < 1e-6
is_antiparallel = tf.concat([is_antiparallel] * 4, axis=-1)
rot = tf.where(
is_antiparallel,
tf.concat((antiparallel_axis, tf.zeros_like(real_part)), axis=-1),
tf.concat((axis, real_part), axis=-1))
return tf.nn.l2_normalize(rot, axis=-1)
def conjugate(quaternion, name="quaternion_conjugate"):
"""Computes the conjugate of a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_conjugate".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
quaternion = asserts.assert_normalized(quaternion)
xyz, w = tf.split(quaternion, (3, 1), axis=-1)
return tf.concat((-xyz, w), axis=-1)
def from_axis_angle(axis, angle, name="quaternion_from_axis_angle"):
"""Converts an axis-angle representation to a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents an angle.
name: A name for this op that defaults to "quaternion_from_axis_angle".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `axis` or `angle` is not supported.
"""
with tf.name_scope(name):
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(axis, angle), last_axes=-2, broadcast_compatible=True)
axis = asserts.assert_normalized(axis)
half_angle = 0.5 * angle
w = tf.cos(half_angle)
xyz = tf.sin(half_angle) * axis
return tf.concat((xyz, w), axis=-1)
def from_euler(angles, name="quaternion_from_euler"):
"""Converts an Euler angle representation to a quaternion.
Note:
Uses the z-y-x rotation convention (Tait-Bryan angles).
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three Euler angles. `[..., 0]` is the angle about `x` in
radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is
the angle about `z` in radians.
name: A name for this op that defaults to "quaternion_from_euler".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `angles` is not supported.
"""
with tf.name_scope(name):
angles = tf.convert_to_tensor(value=angles)
shape.check_static(
tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3))
half_angles = angles / 2.0
cos_half_angles = tf.cos(half_angles)
sin_half_angles = tf.sin(half_angles)
return _build_quaternion_from_sines_and_cosines(sin_half_angles,
cos_half_angles)
def from_euler_with_small_angles_approximation(angles,
name="quaternion_from_euler"):
r"""Converts small Euler angles to quaternions.
Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be
approximated by their second order Taylor expansions, where
$$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$.
In the current implementation, the smallness of the angles is not verified.
Note:
Uses the z-y-x rotation convention (Tait-Bryan angles).
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three Euler angles. `[..., 0]` is the angle about `x` in
radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is the
angle about `z` in radians.
name: A name for this op that defaults to "quaternion_from_euler".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `angles` is not supported.
"""
with tf.name_scope(name):
angles = tf.convert_to_tensor(value=angles)
shape.check_static(
tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3))
half_angles = angles / 2.0
cos_half_angles = 1.0 - 0.5 * half_angles * half_angles
sin_half_angles = half_angles
quaternion = _build_quaternion_from_sines_and_cosines(
sin_half_angles, cos_half_angles)
# We need to normalize the quaternion due to the small angle approximation.
return tf.nn.l2_normalize(quaternion, axis=-1)
def from_rotation_matrix(rotation_matrix,
name="quaternion_from_rotation_matrix"):
"""Converts a rotation matrix representation to a quaternion.
Warning:
This function is not smooth everywhere.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two
dimensions represent a rotation matrix.
name: A name for this op that defaults to "quaternion_from_rotation_matrix".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `rotation_matrix` is not supported.
"""
with tf.name_scope(name):
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_rank_greater_than=1,
has_dim_equals=((-1, 3), (-2, 3)))
rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized(
rotation_matrix)
trace = tf.linalg.trace(rotation_matrix)
eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype)
rows = tf.unstack(rotation_matrix, axis=-2)
entries = [tf.unstack(row, axis=-1) for row in rows]
def tr_positive():
sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_1():
sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qx.
qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qx = 0.25 * sq
qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_2():
sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qy.
qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qy = 0.25 * sq
qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_3():
sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] +
eps_addition) * 2. # sq = 4 * qz.
qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
qz = 0.25 * sq
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_idx(cond):
cond = tf.expand_dims(cond, -1)
cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4])
return cond
where_2 = tf.where(
cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3())
where_1 = tf.where(
cond_idx((entries[0][0] > entries[1][1])
& (entries[0][0] > entries[2][2])), cond_1(), where_2)
quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1)
return quat
def inverse(quaternion, name="quaternion_inverse"):
"""Computes the inverse of a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_inverse".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
quaternion = asserts.assert_normalized(quaternion)
squared_norm = tf.reduce_sum(
input_tensor=tf.square(quaternion), axis=-1, keepdims=True)
return safe_ops.safe_unsigned_div(conjugate(quaternion), squared_norm)
def is_normalized(quaternion, atol=1e-3, name="quaternion_is_normalized"):
"""Determines if quaternion is normalized quaternion or not.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
atol: The absolute tolerance parameter.
name: A name for this op that defaults to "quaternion_is_normalized".
Returns:
A tensor of type `bool` and shape `[A1, ..., An, 1]`, where False indicates
that the quaternion is not normalized.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
norms = tf.norm(tensor=quaternion, axis=-1, keepdims=True)
return tf.where(
tf.abs(norms - 1.) < atol, tf.ones_like(norms, dtype=bool),
tf.zeros_like(norms, dtype=bool))
def normalize(quaternion, eps=1e-12, name="quaternion_normalize"):
"""Normalizes a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
eps: A lower bound value for the norm that defaults to 1e-12.
name: A name for this op that defaults to "quaternion_normalize".
Returns:
A N-D tensor of shape `[?, ..., ?, 1]` where the quaternion elements have
been normalized.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
return tf.math.l2_normalize(quaternion, axis=-1, epsilon=eps)
def multiply(quaternion1, quaternion2, name="quaternion_multiply"):
"""Multiplies two quaternions.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a quaternion.
name: A name for this op that defaults to "quaternion_multiply".
Returns:
A tensor of shape `[A1, ..., An, 4]` representing quaternions.
Raises:
ValueError: If the shape of `quaternion1` or `quaternion2` is not supported.
"""
with tf.name_scope(name):
quaternion1 = tf.convert_to_tensor(value=quaternion1)
quaternion2 = tf.convert_to_tensor(value=quaternion2)
shape.check_static(
tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4))
shape.check_static(
tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4))
x1, y1, z1, w1 = tf.unstack(quaternion1, axis=-1)
x2, y2, z2, w2 = tf.unstack(quaternion2, axis=-1)
x = x1 * w2 + y1 * z2 - z1 * y2 + w1 * x2
y = -x1 * z2 + y1 * w2 + z1 * x2 + w1 * y2
z = x1 * y2 - y1 * x2 + z1 * w2 + w1 * z2
w = -x1 * x2 - y1 * y2 - z1 * z2 + w1 * w2
return tf.stack((x, y, z, w), axis=-1)
def normalized_random_uniform(quaternion_shape,
name="quaternion_normalized_random_uniform"):
"""Random normalized quaternion following a uniform distribution law on SO(3).
Args:
quaternion_shape: A list representing the shape of the output tensor.
name: A name for this op that defaults to
"quaternion_normalized_random_uniform".
Returns:
A tensor of shape `[quaternion_shape[0],...,quaternion_shape[-1], 4]`
representing random normalized quaternions.
"""
with tf.name_scope(name):
quaternion_shape = tf.convert_to_tensor(
value=quaternion_shape, dtype=tf.int32)
quaternion_shape = tf.concat((quaternion_shape, tf.constant([4])), axis=0)
random_normal = tf.random.normal(quaternion_shape)
return normalize(random_normal)
def normalized_random_uniform_initializer():
"""Random unit quaternion initializer."""
# Since variable initializers must take `shape` as input, we cannot prevent
# a clash between util.shape and the argument here. Therefore we have to
# disable redefined-outer-name for this function.
# pylint: disable=redefined-outer-name
def _initializer(shape, dtype=tf.float32, partition_info=None):
"""Generate a random normalized quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
shape: A list representing the shape of the output. The last entry of the
list must be `4`.
dtype: type of the output (tf.float32 is the only type supported).
partition_info: how the variable is partitioned (not used).
Returns:
A tensor of shape `[A1, ..., An, 4]` representing normalized quaternions.
Raises:
ValueError: If `shape` or `dtype` are not supported.
"""
del partition_info # unused
if dtype != tf.float32:
raise ValueError("'dtype' must be tf.float32.")
if shape[-1] != 4:
raise ValueError("Last dimension of 'shape' must be 4.")
return normalized_random_uniform(shape[:-1])
return _initializer
# pylint: enable=redefined-outer-name
def rotate(point, quaternion, name="quaternion_rotate"):
"""Rotates a point using a quaternion.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a 3d point.
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_rotate".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a
3d point.
Raises:
ValueError: If the shape of `point` or `quaternion` is not supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=point, tensor_name="point", has_dim_equals=(-1, 3))
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
shape.compare_batch_dimensions(
tensors=(point, quaternion), last_axes=-2, broadcast_compatible=True)
quaternion = asserts.assert_normalized(quaternion)
padding = [[0, 0] for _ in range(point.shape.ndims)]
padding[-1][-1] = 1
point = tf.pad(tensor=point, paddings=padding, mode="CONSTANT")
point = multiply(quaternion, point)
point = multiply(point, conjugate(quaternion))
xyz, _ = tf.split(point, (3, 1), axis=-1)
return xyz
def relative_angle(quaternion1, quaternion2, name="quaternion_relative_angle"):
r"""Computes the unsigned relative rotation angle between 2 unit quaternions.
Given two normalized quanternions $$\mathbf{q}_1$$ and $$\mathbf{q}_2$$, the
relative angle is computed as
$$\theta = 2\arccos(\mathbf{q}_1^T\mathbf{q}_2)$$.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "quaternion_relative_angle".
Returns:
A tensor of shape `[A1, ..., An, 1]` where the last dimension represents
rotation angles in the range [0.0, pi].
Raises:
ValueError: If the shape of `quaternion1` or `quaternion2` is not supported.
"""
with tf.name_scope(name):
quaternion1 = tf.convert_to_tensor(value=quaternion1)
quaternion2 = tf.convert_to_tensor(value=quaternion2)
shape.check_static(
tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4))
shape.check_static(
tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4))
quaternion1 = asserts.assert_normalized(quaternion1)
quaternion2 = asserts.assert_normalized(quaternion2)
dot_product = vector.dot(quaternion1, quaternion2, keepdims=False)
# Ensure dot product is in range [-1. 1].
eps_dot_prod = 4.0 * asserts.select_eps_for_addition(dot_product.dtype)
dot_product = safe_ops.safe_shrink(
dot_product, -1.0, 1.0, False, eps=eps_dot_prod)
return 2.0 * tf.acos(tf.abs(dot_product))
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/representation/point.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow point utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def distance_to_ray(point,
origin,
direction,
keepdims=True,
name="point_distance_to_ray"):
"""Computes the distance from a M-d point to a M-d ray.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
point: A tensor of shape `[A1, ..., An, M]`.
origin: A tensor of shape `[A1, ..., An, M]`.
direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be
normalized.
keepdims: A `bool`, whether to keep the last dimension with length 1 or to
remove it.
name: A name for this op. Defaults to "point_distance_to_ray".
Returns:
A tensor of shape `[A1, ..., An, 1]` containing the distance from each point
to the corresponding ray.
Raises:
ValueError: If the shape of `point`, `origin`, or 'direction' is not
supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
origin = tf.convert_to_tensor(value=origin)
direction = tf.convert_to_tensor(value=direction)
shape.compare_dimensions((point, origin, direction), -1,
("point", "origin", "direction"))
shape.compare_batch_dimensions(
tensors=(point, origin, direction),
last_axes=-2,
broadcast_compatible=True)
direction = asserts.assert_normalized(direction)
vec = point - origin
dot = vector.dot(vec, direction)
vec -= dot * direction
return tf.norm(tensor=vec, axis=-1, keepdims=keepdims)
def project_to_ray(point, origin, direction, name="point_project_to_ray"):
"""Computes the projection of a M-d point on a M-d ray.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
point: A tensor of shape `[A1, ..., An, M]`.
origin: A tensor of shape `[A1, ..., An, M]`.
direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be
normalized.
name: A name for this op. Defaults to "point_project_to_ray".
Returns:
A tensor of shape `[A1, ..., An, M]` containing the projected point.
Raises:
ValueError: If the shape of `point`, `origin`, or 'direction' is not
supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
origin = tf.convert_to_tensor(value=origin)
direction = tf.convert_to_tensor(value=direction)
shape.compare_dimensions((point, origin, direction), -1,
("point", "origin", "direction"))
shape.compare_batch_dimensions(
tensors=(point, origin, direction),
last_axes=-2,
broadcast_compatible=True)
direction = asserts.assert_normalized(direction)
vec = point - origin
dot = vector.dot(vec, direction)
return origin + dot * direction
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow point utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def distance_to_ray(point,
origin,
direction,
keepdims=True,
name="point_distance_to_ray"):
"""Computes the distance from a M-d point to a M-d ray.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
point: A tensor of shape `[A1, ..., An, M]`.
origin: A tensor of shape `[A1, ..., An, M]`.
direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be
normalized.
keepdims: A `bool`, whether to keep the last dimension with length 1 or to
remove it.
name: A name for this op. Defaults to "point_distance_to_ray".
Returns:
A tensor of shape `[A1, ..., An, 1]` containing the distance from each point
to the corresponding ray.
Raises:
ValueError: If the shape of `point`, `origin`, or 'direction' is not
supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
origin = tf.convert_to_tensor(value=origin)
direction = tf.convert_to_tensor(value=direction)
shape.compare_dimensions((point, origin, direction), -1,
("point", "origin", "direction"))
shape.compare_batch_dimensions(
tensors=(point, origin, direction),
last_axes=-2,
broadcast_compatible=True)
direction = asserts.assert_normalized(direction)
vec = point - origin
dot = vector.dot(vec, direction)
vec -= dot * direction
return tf.norm(tensor=vec, axis=-1, keepdims=keepdims)
def project_to_ray(point, origin, direction, name="point_project_to_ray"):
"""Computes the projection of a M-d point on a M-d ray.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
point: A tensor of shape `[A1, ..., An, M]`.
origin: A tensor of shape `[A1, ..., An, M]`.
direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be
normalized.
name: A name for this op. Defaults to "point_project_to_ray".
Returns:
A tensor of shape `[A1, ..., An, M]` containing the projected point.
Raises:
ValueError: If the shape of `point`, `origin`, or 'direction' is not
supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
origin = tf.convert_to_tensor(value=origin)
direction = tf.convert_to_tensor(value=direction)
shape.compare_dimensions((point, origin, direction), -1,
("point", "origin", "direction"))
shape.compare_batch_dimensions(
tensors=(point, origin, direction),
last_axes=-2,
broadcast_compatible=True)
direction = asserts.assert_normalized(direction)
vec = point - origin
dot = vector.dot(vec, direction)
return origin + dot * direction
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/util/tests/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/pix3d/fakes/model/bed/IKEA_MALM_2/3d_keypoints.txt | -0.286364 -0.032406 -0.383607
-0.286361 -0.139994 -0.384352
0.286633 -0.032366 -0.383598
0.286637 -0.138093 -0.384767
0.286443 -0.032130 0.364876
-0.286424 -0.032754 0.364012
-0.286583 0.139071 0.384867
0.284924 0.137388 0.385138
0.284489 -0.140542 0.383167
-0.282070 -0.140273 0.385144
| -0.286364 -0.032406 -0.383607
-0.286361 -0.139994 -0.384352
0.286633 -0.032366 -0.383598
0.286637 -0.138093 -0.384767
0.286443 -0.032130 0.364876
-0.286424 -0.032754 0.364012
-0.286583 0.139071 0.384867
0.284924 0.137388 0.385138
0.284489 -0.140542 0.383167
-0.282070 -0.140273 0.385144
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/representation/grid.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow grid utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def _grid(starts, stops, nums):
"""Generates a M-D uniform axis-aligned grid.
Warning:
This op is not differentiable. Indeed, the gradient of tf.linspace and
tf.meshgrid are currently not defined.
Args:
starts: A tensor of shape `[M]` representing the start points for each
dimension.
stops: A tensor of shape `[M]` representing the end points for each
dimension.
nums: A tensor of shape `[M]` representing the number of subdivisions for
each dimension.
Returns:
A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform
grid.
"""
params = [tf.unstack(tensor) for tensor in [starts, stops, nums]]
layout = [tf.linspace(*param) for param in zip(*params)]
return tf.stack(tf.meshgrid(*layout, indexing="ij"), axis=-1)
def generate(starts, stops, nums, name="grid_generate"):
r"""Generates a M-D uniform axis-aligned grid.
Warning:
This op is not differentiable. Indeed, the gradient of tf.linspace and
tf.meshgrid are currently not defined.
Note:
In the following, `B` is an optional batch dimension.
Args:
starts: A tensor of shape `[M]` or `[B, M]`, where the last dimension
represents a M-D start point.
stops: A tensor of shape `[M]` or `[B, M]`, where the last dimension
represents a M-D end point.
nums: A tensor of shape `[M]` representing the number of subdivisions for
each dimension.
name: A name for this op. Defaults to "grid_generate".
Returns:
A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform
grid or a tensor of shape `[B, nums[0], ..., nums[M-1], M]` containing B
M-D uniform grids. Please refer to the example below for more details.
Raises:
ValueError: If the shape of `starts`, `stops`, or `nums` is not supported.
Examples:
```python
print(generate((-1.0, -2.0), (1.0, 2.0), (3, 5)))
>>> [[[-1. -2.]
[-1. -1.]
[-1. 0.]
[-1. 1.]
[-1. 2.]]
[[ 0. -2.]
[ 0. -1.]
[ 0. 0.]
[ 0. 1.]
[ 0. 2.]]
[[ 1. -2.]
[ 1. -1.]
[ 1. 0.]
[ 1. 1.]
[ 1. 2.]]]
```
Generates a 3x5 2d grid from -1.0 to 1.0 with 3 subdivisions for the x
axis and from -2.0 to 2.0 with 5 subdivisions for the y axis. This lead to a
tensor of shape (3, 5, 2).
"""
with tf.name_scope(name):
starts = tf.convert_to_tensor(value=starts)
stops = tf.convert_to_tensor(value=stops)
nums = tf.convert_to_tensor(value=nums)
shape.check_static(
tensor=starts,
tensor_name="starts",
has_rank_greater_than=0,
has_rank_less_than=3)
shape.check_static(
tensor=stops,
tensor_name="stops",
has_rank_greater_than=0,
has_rank_less_than=3)
shape.check_static(tensor=nums, tensor_name="nums", has_rank=1)
shape.compare_batch_dimensions(
tensors=(starts, stops), last_axes=(-1, -1), broadcast_compatible=False)
shape.compare_dimensions((starts, stops, nums), -1,
("starts", "stops", "nums"))
if starts.shape.ndims == 1:
return _grid(starts, stops, nums)
else:
return tf.stack([
_grid(starts, stops, nums)
for starts, stops in zip(tf.unstack(starts), tf.unstack(stops))
])
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow grid utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def _grid(starts, stops, nums):
"""Generates a M-D uniform axis-aligned grid.
Warning:
This op is not differentiable. Indeed, the gradient of tf.linspace and
tf.meshgrid are currently not defined.
Args:
starts: A tensor of shape `[M]` representing the start points for each
dimension.
stops: A tensor of shape `[M]` representing the end points for each
dimension.
nums: A tensor of shape `[M]` representing the number of subdivisions for
each dimension.
Returns:
A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform
grid.
"""
params = [tf.unstack(tensor) for tensor in [starts, stops, nums]]
layout = [tf.linspace(*param) for param in zip(*params)]
return tf.stack(tf.meshgrid(*layout, indexing="ij"), axis=-1)
def generate(starts, stops, nums, name="grid_generate"):
r"""Generates a M-D uniform axis-aligned grid.
Warning:
This op is not differentiable. Indeed, the gradient of tf.linspace and
tf.meshgrid are currently not defined.
Note:
In the following, `B` is an optional batch dimension.
Args:
starts: A tensor of shape `[M]` or `[B, M]`, where the last dimension
represents a M-D start point.
stops: A tensor of shape `[M]` or `[B, M]`, where the last dimension
represents a M-D end point.
nums: A tensor of shape `[M]` representing the number of subdivisions for
each dimension.
name: A name for this op. Defaults to "grid_generate".
Returns:
A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform
grid or a tensor of shape `[B, nums[0], ..., nums[M-1], M]` containing B
M-D uniform grids. Please refer to the example below for more details.
Raises:
ValueError: If the shape of `starts`, `stops`, or `nums` is not supported.
Examples:
```python
print(generate((-1.0, -2.0), (1.0, 2.0), (3, 5)))
>>> [[[-1. -2.]
[-1. -1.]
[-1. 0.]
[-1. 1.]
[-1. 2.]]
[[ 0. -2.]
[ 0. -1.]
[ 0. 0.]
[ 0. 1.]
[ 0. 2.]]
[[ 1. -2.]
[ 1. -1.]
[ 1. 0.]
[ 1. 1.]
[ 1. 2.]]]
```
Generates a 3x5 2d grid from -1.0 to 1.0 with 3 subdivisions for the x
axis and from -2.0 to 2.0 with 5 subdivisions for the y axis. This lead to a
tensor of shape (3, 5, 2).
"""
with tf.name_scope(name):
starts = tf.convert_to_tensor(value=starts)
stops = tf.convert_to_tensor(value=stops)
nums = tf.convert_to_tensor(value=nums)
shape.check_static(
tensor=starts,
tensor_name="starts",
has_rank_greater_than=0,
has_rank_less_than=3)
shape.check_static(
tensor=stops,
tensor_name="stops",
has_rank_greater_than=0,
has_rank_less_than=3)
shape.check_static(tensor=nums, tensor_name="nums", has_rank=1)
shape.compare_batch_dimensions(
tensors=(starts, stops), last_axes=(-1, -1), broadcast_compatible=False)
shape.compare_dimensions((starts, stops, nums), -1,
("starts", "stops", "nums"))
if starts.shape.ndims == 1:
return _grid(starts, stops, nums)
else:
return tf.stack([
_grid(starts, stops, nums)
for starts, stops in zip(tf.unstack(starts), tf.unstack(stops))
])
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/opengl/cleanup.h | /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
#include <type_traits>
#include <utility>
// A move-only RAII object that calls a stored cleanup functor when
// destroyed. Cleanup<F> is the return type of MakeCleanup(F).
template <typename F>
class Cleanup {
public:
Cleanup()
: released_(true), f_() {}
template <typename G>
explicit Cleanup(G&& f) // NOLINT
: f_(std::forward<G>(f)) {} // NOLINT(build/c++11)
Cleanup(Cleanup&& src) // NOLINT
: released_(src.is_released()), f_(src.release()) { }
// Implicitly move-constructible from any compatible Cleanup<G>.
// The source will be released as if src.release() were called.
// A moved-from Cleanup can be safely destroyed or reassigned.
template <typename G>
Cleanup(Cleanup<G>&& src) // NOLINT
: released_(src.is_released()), f_(src.release()) { }
// Assignment to a Cleanup object behaves like destroying it
// and making a new one in its place, analogous to unique_ptr
// semantics.
Cleanup& operator=(Cleanup&& src) { // NOLINT
if (!released_) std::move(f_)();
released_ = src.released_;
f_ = src.release();
return *this;
}
~Cleanup() {
if (!released_) std::move(f_)();
}
// Releases the cleanup function instead of running it.
// Hint: use c.release()() to run early.
F release() {
released_ = true;
return std::move(f_);
}
bool is_released() const { return released_; }
private:
static_assert(!std::is_reference<F>::value, "F must not be a reference");
bool released_ = false;
F f_;
};
// MakeCleanup(f) returns an RAII cleanup object that calls 'f' in its
// destructor. The easiest way to use MakeCleanup is with a lambda argument,
// capturing the return value in an 'auto' local variable. Most users will not
// need more sophisticated syntax than that.
//
// Example:
// void func() {
// FILE* fp = fopen("data.txt", "r");
// if (fp == nullptr) return;
// auto fp_cleaner = gtl::MakeCleanup([fp] { fclose(fp); });
// // No matter what, fclose(fp) will happen.
// DataObject d;
// while (ReadDataObject(fp, &d)) {
// if (d.IsBad()) {
// LOG(ERROR) << "Bad Data";
// return;
// }
// PushGoodData(d);
// }
// }
//
// You can use Cleanup<F> directly, instead of using MakeCleanup and auto,
// but there's rarely a reason to do that.
//
// You can call 'release()' on a Cleanup object to cancel the cleanup.
template <int&... ExplicitParameterBarrier, typename F,
typename DecayF = typename std::decay<F>::type>
Cleanup<DecayF> MakeCleanup(F&& f) {
return Cleanup<DecayF>(std::forward<F>(f));
}
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
| /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
#include <type_traits>
#include <utility>
// A move-only RAII object that calls a stored cleanup functor when
// destroyed. Cleanup<F> is the return type of MakeCleanup(F).
template <typename F>
class Cleanup {
public:
Cleanup()
: released_(true), f_() {}
template <typename G>
explicit Cleanup(G&& f) // NOLINT
: f_(std::forward<G>(f)) {} // NOLINT(build/c++11)
Cleanup(Cleanup&& src) // NOLINT
: released_(src.is_released()), f_(src.release()) { }
// Implicitly move-constructible from any compatible Cleanup<G>.
// The source will be released as if src.release() were called.
// A moved-from Cleanup can be safely destroyed or reassigned.
template <typename G>
Cleanup(Cleanup<G>&& src) // NOLINT
: released_(src.is_released()), f_(src.release()) { }
// Assignment to a Cleanup object behaves like destroying it
// and making a new one in its place, analogous to unique_ptr
// semantics.
Cleanup& operator=(Cleanup&& src) { // NOLINT
if (!released_) std::move(f_)();
released_ = src.released_;
f_ = src.release();
return *this;
}
~Cleanup() {
if (!released_) std::move(f_)();
}
// Releases the cleanup function instead of running it.
// Hint: use c.release()() to run early.
F release() {
released_ = true;
return std::move(f_);
}
bool is_released() const { return released_; }
private:
static_assert(!std::is_reference<F>::value, "F must not be a reference");
bool released_ = false;
F f_;
};
// MakeCleanup(f) returns an RAII cleanup object that calls 'f' in its
// destructor. The easiest way to use MakeCleanup is with a lambda argument,
// capturing the return value in an 'auto' local variable. Most users will not
// need more sophisticated syntax than that.
//
// Example:
// void func() {
// FILE* fp = fopen("data.txt", "r");
// if (fp == nullptr) return;
// auto fp_cleaner = gtl::MakeCleanup([fp] { fclose(fp); });
// // No matter what, fclose(fp) will happen.
// DataObject d;
// while (ReadDataObject(fp, &d)) {
// if (d.IsBad()) {
// LOG(ERROR) << "Bad Data";
// return;
// }
// PushGoodData(d);
// }
// }
//
// You can use Cleanup<F> directly, instead of using MakeCleanup and auto,
// but there's rarely a reason to do that.
//
// You can call 'release()' on a Cleanup object to cancel the cleanup.
template <int&... ExplicitParameterBarrier, typename F,
typename DecayF = typename std::decay<F>::type>
Cleanup<DecayF> MakeCleanup(F&& f) {
return Cleanup<DecayF>(std::forward<F>(f));
}
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/pointnet/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointNet module."""
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointNet module."""
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/opengl/egl_util.h | /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
#include <EGL/egl.h>
#ifdef __cplusplus
extern "C" {
#endif
// Creates and initializes an EGL display at the specified device_index. Unlike
// the standard eglGetDisplay(), this function takes a device_index, iterates
// through all the available devices on the machine using EGL extensions, and
// returns the Nth successfully initialized EGLDisplay. This allows us to get a
// valid EGL display on multi-GPU machines, where we limit access to a sub-set
// of the available GPU devices. Returns an initialized EGLDisplay or
// EGL_NO_DISPLAY on error.
EGLDisplay CreateInitializedEGLDisplayAtIndex(int device_index);
// Helper function to create EGL display at device index 0.
EGLDisplay CreateInitializedEGLDisplay(void);
// Helper function to only call eglTerminate() once all instances created from
// CreateInitializedEGLDisplay() have been terminated. This is necessary because
// calling eglTerminate will invalidate *all* contexts associated with a given
// display within the same address space.
EGLBoolean TerminateInitializedEGLDisplay(EGLDisplay display);
// Helper function that unloads any remaining resources used for internal
// bookkeeping. Ordinary user code generally should not need to call this,
// but it is useful when, say, using this code as part of a DSO that is
// loaded and unloaded repeatedly. This function must not be called more
// than once per process (or DSO load). It should generally be called just
// before exit.
void ShutDownEGLSubsystem(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
| /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
#include <EGL/egl.h>
#ifdef __cplusplus
extern "C" {
#endif
// Creates and initializes an EGL display at the specified device_index. Unlike
// the standard eglGetDisplay(), this function takes a device_index, iterates
// through all the available devices on the machine using EGL extensions, and
// returns the Nth successfully initialized EGLDisplay. This allows us to get a
// valid EGL display on multi-GPU machines, where we limit access to a sub-set
// of the available GPU devices. Returns an initialized EGLDisplay or
// EGL_NO_DISPLAY on error.
EGLDisplay CreateInitializedEGLDisplayAtIndex(int device_index);
// Helper function to create EGL display at device index 0.
EGLDisplay CreateInitializedEGLDisplay(void);
// Helper function to only call eglTerminate() once all instances created from
// CreateInitializedEGLDisplay() have been terminated. This is necessary because
// calling eglTerminate will invalidate *all* contexts associated with a given
// display within the same address space.
EGLBoolean TerminateInitializedEGLDisplay(EGLDisplay display);
// Helper function that unloads any remaining resources used for internal
// bookkeeping. Ordinary user code generally should not need to call this,
// but it is useful when, say, using this code as part of a DSO that is
// loaded and unloaded repeatedly. This function must not be called more
// than once per process (or DSO load). It should generally be called just
// before exit.
void ShutDownEGLSubsystem(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/transformation/axis_angle.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""This module implements axis-angle functionalities.
The axis-angle representation is defined as $$\theta\mathbf{a}$$, where
$$\mathbf{a}$$ is a unit vector indicating the direction of rotation and
$$\theta$$ is a scalar controlling the angle of rotation. It is important to
note that the axis-angle does not perform rotation by itself, but that it can be
used to rotate any given vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into
a vector $$\mathbf{v}'$$ using the Rodrigues' rotation formula:
$$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta)
+\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$
More details about the axis-angle formalism can be found on [this page.]
(https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation)
Note: Some of the functions defined in the module expect
a normalized axis $$\mathbf{a} = [x, y, z]^T$$ as inputs where
$$x^2 + y^2 + z^2 = 1$$.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import quaternion as quaternion_lib
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
def from_euler(angles, name="axis_angle_from_euler"):
r"""Converts Euler angles to an axis-angle representation.
Note:
The conversion is performed by first converting to a quaternion
representation, and then by converting the quaternion to an axis-angle.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three Euler angles. `[A1, ..., An, 0]` is the angle about
`x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and
`[A1, ..., An, 2]` is the angle about `z` in radians.
name: A name for this op that defaults to "axis_angle_from_euler".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
"""
with tf.name_scope(name):
quaternion = quaternion_lib.from_euler(angles)
return from_quaternion(quaternion)
def from_euler_with_small_angles_approximation(
angles, name="axis_angle_from_euler_with_small_angles_approximation"):
r"""Converts small Euler angles to an axis-angle representation.
Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be
approximated by their second order Taylor expansions, where
$$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$.
In the current implementation, the smallness of the angles is not verified.
Note:
The conversion is performed by first converting to a quaternion
representation, and then by converting the quaternion to an axis-angle.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three small Euler angles. `[A1, ..., An, 0]` is the angle
about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians
and `[A1, ..., An, 2]` is the angle about `z` in radians.
name: A name for this op that defaults to
"axis_angle_from_euler_with_small_angles_approximation".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
"""
with tf.name_scope(name):
quaternion = quaternion_lib.from_euler_with_small_angles_approximation(
angles)
return from_quaternion(quaternion)
def from_quaternion(quaternion, name="axis_angle_from_quaternion"):
"""Converts a quaternion to an axis-angle representation.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "axis_angle_from_quaternion".
Returns:
Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`,
where the first tensor represents the axis, and the second represents the
angle. The resulting axis is a normalized vector.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
quaternion = asserts.assert_normalized(quaternion)
# This prevents zero norm xyz and zero w, and is differentiable.
quaternion += asserts.select_eps_for_addition(quaternion.dtype)
xyz, w = tf.split(quaternion, (3, 1), axis=-1)
norm = tf.norm(tensor=xyz, axis=-1, keepdims=True)
angle = 2.0 * tf.atan2(norm, tf.abs(w))
axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm)
return axis, angle
def from_rotation_matrix(rotation_matrix,
name="axis_angle_from_rotation_matrix"):
"""Converts a rotation matrix to an axis-angle representation.
Note:
In the current version the returned axis-angle representation is not unique
for a given rotation matrix. Since a direct conversion would not really be
faster, we first transform the rotation matrix to a quaternion, and finally
perform the conversion from that quaternion to the corresponding axis-angle
representation.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two
dimensions represent a rotation matrix.
name: A name for this op that defaults to "axis_angle_from_rotation_matrix".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
Raises:
ValueError: If the shape of `rotation_matrix` is not supported.
"""
with tf.name_scope(name):
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_rank_greater_than=1,
has_dim_equals=((-2, 3), (-1, 3)))
rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized(
rotation_matrix)
quaternion = quaternion_lib.from_rotation_matrix(rotation_matrix)
return from_quaternion(quaternion)
def inverse(axis, angle, name="axis_angle_inverse"):
"""Computes the axis-angle that is the inverse of the input axis-angle.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension
represents an angle.
name: A name for this op that defaults to "axis_angle_inverse".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
Raises:
ValueError: If the shape of `axis` or `angle` is not supported.
"""
with tf.name_scope(name):
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(axis, angle),
tensor_names=("axis", "angle"),
last_axes=-2,
broadcast_compatible=True)
axis = asserts.assert_normalized(axis)
return axis, -angle
def is_normalized(axis, angle, atol=1e-3, name="axis_angle_is_normalized"):
"""Determines if the axis-angle is normalized or not.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension
represents an angle.
atol: The absolute tolerance parameter.
name: A name for this op that defaults to "axis_angle_is_normalized".
Returns:
A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is
not normalized.
"""
with tf.name_scope(name):
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(axis, angle),
tensor_names=("axis", "angle"),
last_axes=-2,
broadcast_compatible=True)
norms = tf.norm(tensor=axis, axis=-1, keepdims=True)
return tf.abs(norms - 1.) < atol
def rotate(point, axis, angle, name="axis_angle_rotate"):
r"""Rotates a 3d point using an axis-angle by applying the Rodrigues' formula.
Rotates a vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector
$$\mathbf{v}' \in {\mathbb{R}^3}$$ using the Rodrigues' rotation formula:
$$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta)
+\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$
Note:
In the following, A1 to An are optional batch dimensions.
Args:
point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a 3d point to rotate.
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents an angle.
name: A name for this op that defaults to "axis_angle_rotate".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
a 3d point.
Raises:
ValueError: If `point`, `axis`, or `angle` are of different shape or if
their respective shape is not supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(
tensor=point, tensor_name="point", has_dim_equals=(-1, 3))
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(point, axis, angle),
tensor_names=("point", "axis", "angle"),
last_axes=-2,
broadcast_compatible=True)
axis = asserts.assert_normalized(axis)
cos_angle = tf.cos(angle)
axis_dot_point = vector.dot(axis, point)
return point * cos_angle + vector.cross(
axis, point) * tf.sin(angle) + axis * axis_dot_point * (1.0 - cos_angle)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""This module implements axis-angle functionalities.
The axis-angle representation is defined as $$\theta\mathbf{a}$$, where
$$\mathbf{a}$$ is a unit vector indicating the direction of rotation and
$$\theta$$ is a scalar controlling the angle of rotation. It is important to
note that the axis-angle does not perform rotation by itself, but that it can be
used to rotate any given vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into
a vector $$\mathbf{v}'$$ using the Rodrigues' rotation formula:
$$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta)
+\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$
More details about the axis-angle formalism can be found on [this page.]
(https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation)
Note: Some of the functions defined in the module expect
a normalized axis $$\mathbf{a} = [x, y, z]^T$$ as inputs where
$$x^2 + y^2 + z^2 = 1$$.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import quaternion as quaternion_lib
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
def from_euler(angles, name="axis_angle_from_euler"):
r"""Converts Euler angles to an axis-angle representation.
Note:
The conversion is performed by first converting to a quaternion
representation, and then by converting the quaternion to an axis-angle.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three Euler angles. `[A1, ..., An, 0]` is the angle about
`x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and
`[A1, ..., An, 2]` is the angle about `z` in radians.
name: A name for this op that defaults to "axis_angle_from_euler".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
"""
with tf.name_scope(name):
quaternion = quaternion_lib.from_euler(angles)
return from_quaternion(quaternion)
def from_euler_with_small_angles_approximation(
angles, name="axis_angle_from_euler_with_small_angles_approximation"):
r"""Converts small Euler angles to an axis-angle representation.
Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be
approximated by their second order Taylor expansions, where
$$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$.
In the current implementation, the smallness of the angles is not verified.
Note:
The conversion is performed by first converting to a quaternion
representation, and then by converting the quaternion to an axis-angle.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the three small Euler angles. `[A1, ..., An, 0]` is the angle
about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians
and `[A1, ..., An, 2]` is the angle about `z` in radians.
name: A name for this op that defaults to
"axis_angle_from_euler_with_small_angles_approximation".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
"""
with tf.name_scope(name):
quaternion = quaternion_lib.from_euler_with_small_angles_approximation(
angles)
return from_quaternion(quaternion)
def from_quaternion(quaternion, name="axis_angle_from_quaternion"):
"""Converts a quaternion to an axis-angle representation.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "axis_angle_from_quaternion".
Returns:
Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`,
where the first tensor represents the axis, and the second represents the
angle. The resulting axis is a normalized vector.
Raises:
ValueError: If the shape of `quaternion` is not supported.
"""
with tf.name_scope(name):
quaternion = tf.convert_to_tensor(value=quaternion)
shape.check_static(
tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
quaternion = asserts.assert_normalized(quaternion)
# This prevents zero norm xyz and zero w, and is differentiable.
quaternion += asserts.select_eps_for_addition(quaternion.dtype)
xyz, w = tf.split(quaternion, (3, 1), axis=-1)
norm = tf.norm(tensor=xyz, axis=-1, keepdims=True)
angle = 2.0 * tf.atan2(norm, tf.abs(w))
axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm)
return axis, angle
def from_rotation_matrix(rotation_matrix,
name="axis_angle_from_rotation_matrix"):
"""Converts a rotation matrix to an axis-angle representation.
Note:
In the current version the returned axis-angle representation is not unique
for a given rotation matrix. Since a direct conversion would not really be
faster, we first transform the rotation matrix to a quaternion, and finally
perform the conversion from that quaternion to the corresponding axis-angle
representation.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two
dimensions represent a rotation matrix.
name: A name for this op that defaults to "axis_angle_from_rotation_matrix".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
Raises:
ValueError: If the shape of `rotation_matrix` is not supported.
"""
with tf.name_scope(name):
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_rank_greater_than=1,
has_dim_equals=((-2, 3), (-1, 3)))
rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized(
rotation_matrix)
quaternion = quaternion_lib.from_rotation_matrix(rotation_matrix)
return from_quaternion(quaternion)
def inverse(axis, angle, name="axis_angle_inverse"):
"""Computes the axis-angle that is the inverse of the input axis-angle.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension
represents an angle.
name: A name for this op that defaults to "axis_angle_inverse".
Returns:
A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and
`[A1, ..., An, 1]`, where the first tensor represents the axis, and the
second represents the angle. The resulting axis is a normalized vector.
Raises:
ValueError: If the shape of `axis` or `angle` is not supported.
"""
with tf.name_scope(name):
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(axis, angle),
tensor_names=("axis", "angle"),
last_axes=-2,
broadcast_compatible=True)
axis = asserts.assert_normalized(axis)
return axis, -angle
def is_normalized(axis, angle, atol=1e-3, name="axis_angle_is_normalized"):
"""Determines if the axis-angle is normalized or not.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension
represents an angle.
atol: The absolute tolerance parameter.
name: A name for this op that defaults to "axis_angle_is_normalized".
Returns:
A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is
not normalized.
"""
with tf.name_scope(name):
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(axis, angle),
tensor_names=("axis", "angle"),
last_axes=-2,
broadcast_compatible=True)
norms = tf.norm(tensor=axis, axis=-1, keepdims=True)
return tf.abs(norms - 1.) < atol
def rotate(point, axis, angle, name="axis_angle_rotate"):
r"""Rotates a 3d point using an axis-angle by applying the Rodrigues' formula.
Rotates a vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector
$$\mathbf{v}' \in {\mathbb{R}^3}$$ using the Rodrigues' rotation formula:
$$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta)
+\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$
Note:
In the following, A1 to An are optional batch dimensions.
Args:
point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a 3d point to rotate.
axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents a normalized axis.
angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents an angle.
name: A name for this op that defaults to "axis_angle_rotate".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
a 3d point.
Raises:
ValueError: If `point`, `axis`, or `angle` are of different shape or if
their respective shape is not supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
axis = tf.convert_to_tensor(value=axis)
angle = tf.convert_to_tensor(value=angle)
shape.check_static(
tensor=point, tensor_name="point", has_dim_equals=(-1, 3))
shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
shape.compare_batch_dimensions(
tensors=(point, axis, angle),
tensor_names=("point", "axis", "angle"),
last_axes=-2,
broadcast_compatible=True)
axis = asserts.assert_normalized(axis)
cos_angle = tf.cos(angle)
axis_dot_point = vector.dot(axis, point)
return point * cos_angle + vector.cross(
axis, point) * tf.sin(angle) + axis * axis_dot_point * (1.0 - cos_angle)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/local_implicit_grid/README.md | ## Local Implicit Grid Representations for 3D Scenes
By: [Chiyu "Max" Jiang](http://maxjiang.ml/),
[Avneesh Sud](https://research.google/people/105052/),
[Ameesh Makadia](http://www.ameeshmakadia.com/index.html),
[Jingwei Huang](http://stanford.edu/~jingweih/),
[Matthias Niessner](http://niessnerlab.org/members/matthias_niessner/profile.html),
[Thomas Funkhouser](https://www.cs.princeton.edu/~funk/)
\[[Project Website](http://maxjiang.ml/proj/lig)\] \[[Paper PDF Preprint](https://arxiv.org/abs/2003.08981)\]

### Introduction
This repository is based on our CVPR 2020 paper:
[Local Implicit Grid Representations for 3D Scenes](https://arxiv.org/abs/2003.08981).
The [project webpage](http://maxjiang.ml/proj/lig) presents an overview of the
project.
Shape priors learned from data are commonly used to reconstruct 3D objects from
partial or noisy data. Yet no such shape priors are available for indoor scenes,
since typical 3D autoencoders cannot handle their scale, complexity, or
diversity. In this paper, we introduce Local Implicit Grid Representations, a
new 3D shape representation designed for scalability and generality. The
motivating idea is that most 3D surfaces share geometric details at some
scale -- i.e., at a scale smaller than an entire object and larger than a small
patch. We train an autoencoder to learn an embedding of local crops of 3D shapes
at that size. Then, we use the decoder as a component in a shape optimization
that solves for a set of latent codes on a regular grid of overlapping crops
such that an interpolation of the decoded local shapes matches a partial or
noisy observation. We demonstrate the value of this proposed approach for 3D
surface reconstruction from sparse point observations, showing significantly
better results than alternative approaches.
Our deep learning code base is written using [Tensorflow](https://www.tensorflow.org/).
### Getting started
Code is tested with python 3.7+ and tensorflow 1.14+. Please install the
necessary dependencies. `pip` is a recommended way to do this.
```bash
pip install -r requirements.txt
```
### Scene reconstruction using pretrained part encoding
Currently we are releasing the evaluation code to use our pretrained model for
scene reconstruction, along with definitions for the local implicit grid layer
and part-autoencoder model. To directly use our script for surface
reconstruction, prepare the input point cloud as a `.ply` file with vertex
attributes: `x, y, z, nx, ny, nz`. See `resample_geometry.py` for creating an
input `.ply` file from a mesh. For demo input data, refer to the inputs
under `demo_data/`.
To reconstruct a meshed surface given an input point cloud,
run `reconstruct_geometry.py` as follows:
```bash
# Be sure to add root of tensorflow_graphics direectory to your PYTHONPATH
# Assuming PWD=<path/to/teensorflow_graphics>
export PYTHONPATH="$PWD:$PYTHONPATH"
pushd tensorflow_graphics/projects/local_implicit_grid/
# using one GPU is sufficient
export CUDA_VISIBLE_DEVICES=0
# download the model weights.
wget https://storage.googleapis.com/local-implicit-grids/pretrained_ckpt.zip
unzip pretrained_ckpt.zip; rm pretrained_ckpt.zip
# fetch a test object and compute point cloud.
mkdir -p demo_data
wget https://cs.uwaterloo.ca/~c2batty/bunny_watertight.obj
mv bunny_watertight.obj demo_data
# reconstruct an object. since objects are much smaller than entire scenes,
# we can use a smaller point number and number of optimization steps to speed
# up.
python reconstruct_geometry.py \
--input_ply demo_data/bunny.ply \
--part_size=0.20 --npoints=2048 --steps=3001
# download more demo data for scene reconstruction.
wget http://storage.googleapis.com/local-implicit-grids/demo_data.zip
unzip demo_data.zip; rm demo_data.zip
# reconstruct a dense scene
python reconstruct_geometry.py \
--input_ply demo_data/living_room_33_1000_per_m2.ply \
--part_size=0.25
# reconstruct a sparser scene using a larger part size
python reconstruct_geometry.py \
--input_ply demo_data/living_room_33_100_per_m2.ply \
--part_size=0.50
```
The part size parameter controls the granularity of the local implicit grid. For
scenes it should be in the range of 0.25 - 0.5 (meters). For objects, it depends
on the scale of the coordinates. Generally for normalized objects (max bounding
box length ~ 1) use a part size of ~0.2. Generally `part_size` should not be
greater than 1/4 of the minimum bounding box width.
### References
If you find our code or paper useful, please consider citing
@inproceedings{Local_Implicit_Grid_CVPR20,
title = {Local Implicit Grid Representations for 3D Scenes},
author = {Chiyu Max Jiang and Avneesh Sud and Ameesh Makadia and Jingwei Huang and Matthias Nießner and Thomas Funkhouser},
booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
year = {2020}
}
### Contact
Please contact [Max Jiang](mailto:[email protected]) or
[Avneesh Sud](mailto:[email protected]) if you have further questions!
| ## Local Implicit Grid Representations for 3D Scenes
By: [Chiyu "Max" Jiang](http://maxjiang.ml/),
[Avneesh Sud](https://research.google/people/105052/),
[Ameesh Makadia](http://www.ameeshmakadia.com/index.html),
[Jingwei Huang](http://stanford.edu/~jingweih/),
[Matthias Niessner](http://niessnerlab.org/members/matthias_niessner/profile.html),
[Thomas Funkhouser](https://www.cs.princeton.edu/~funk/)
\[[Project Website](http://maxjiang.ml/proj/lig)\] \[[Paper PDF Preprint](https://arxiv.org/abs/2003.08981)\]

### Introduction
This repository is based on our CVPR 2020 paper:
[Local Implicit Grid Representations for 3D Scenes](https://arxiv.org/abs/2003.08981).
The [project webpage](http://maxjiang.ml/proj/lig) presents an overview of the
project.
Shape priors learned from data are commonly used to reconstruct 3D objects from
partial or noisy data. Yet no such shape priors are available for indoor scenes,
since typical 3D autoencoders cannot handle their scale, complexity, or
diversity. In this paper, we introduce Local Implicit Grid Representations, a
new 3D shape representation designed for scalability and generality. The
motivating idea is that most 3D surfaces share geometric details at some
scale -- i.e., at a scale smaller than an entire object and larger than a small
patch. We train an autoencoder to learn an embedding of local crops of 3D shapes
at that size. Then, we use the decoder as a component in a shape optimization
that solves for a set of latent codes on a regular grid of overlapping crops
such that an interpolation of the decoded local shapes matches a partial or
noisy observation. We demonstrate the value of this proposed approach for 3D
surface reconstruction from sparse point observations, showing significantly
better results than alternative approaches.
Our deep learning code base is written using [Tensorflow](https://www.tensorflow.org/).
### Getting started
Code is tested with python 3.7+ and tensorflow 1.14+. Please install the
necessary dependencies. `pip` is a recommended way to do this.
```bash
pip install -r requirements.txt
```
### Scene reconstruction using pretrained part encoding
Currently we are releasing the evaluation code to use our pretrained model for
scene reconstruction, along with definitions for the local implicit grid layer
and part-autoencoder model. To directly use our script for surface
reconstruction, prepare the input point cloud as a `.ply` file with vertex
attributes: `x, y, z, nx, ny, nz`. See `resample_geometry.py` for creating an
input `.ply` file from a mesh. For demo input data, refer to the inputs
under `demo_data/`.
To reconstruct a meshed surface given an input point cloud,
run `reconstruct_geometry.py` as follows:
```bash
# Be sure to add root of tensorflow_graphics direectory to your PYTHONPATH
# Assuming PWD=<path/to/teensorflow_graphics>
export PYTHONPATH="$PWD:$PYTHONPATH"
pushd tensorflow_graphics/projects/local_implicit_grid/
# using one GPU is sufficient
export CUDA_VISIBLE_DEVICES=0
# download the model weights.
wget https://storage.googleapis.com/local-implicit-grids/pretrained_ckpt.zip
unzip pretrained_ckpt.zip; rm pretrained_ckpt.zip
# fetch a test object and compute point cloud.
mkdir -p demo_data
wget https://cs.uwaterloo.ca/~c2batty/bunny_watertight.obj
mv bunny_watertight.obj demo_data
# reconstruct an object. since objects are much smaller than entire scenes,
# we can use a smaller point number and number of optimization steps to speed
# up.
python reconstruct_geometry.py \
--input_ply demo_data/bunny.ply \
--part_size=0.20 --npoints=2048 --steps=3001
# download more demo data for scene reconstruction.
wget http://storage.googleapis.com/local-implicit-grids/demo_data.zip
unzip demo_data.zip; rm demo_data.zip
# reconstruct a dense scene
python reconstruct_geometry.py \
--input_ply demo_data/living_room_33_1000_per_m2.ply \
--part_size=0.25
# reconstruct a sparser scene using a larger part size
python reconstruct_geometry.py \
--input_ply demo_data/living_room_33_100_per_m2.ply \
--part_size=0.50
```
The part size parameter controls the granularity of the local implicit grid. For
scenes it should be in the range of 0.25 - 0.5 (meters). For objects, it depends
on the scale of the coordinates. Generally for normalized objects (max bounding
box length ~ 1) use a part size of ~0.2. Generally `part_size` should not be
greater than 1/4 of the minimum bounding box width.
### References
If you find our code or paper useful, please consider citing
@inproceedings{Local_Implicit_Grid_CVPR20,
title = {Local Implicit Grid Representations for 3D Scenes},
author = {Chiyu Max Jiang and Avneesh Sud and Ameesh Makadia and Jingwei Huang and Matthias Nießner and Thomas Funkhouser},
booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
year = {2020}
}
### Contact
Please contact [Max Jiang](mailto:[email protected]) or
[Avneesh Sud](mailto:[email protected]) if you have further questions!
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/nn/layer/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.nn.layer import graph_convolution
from tensorflow_graphics.nn.layer import pointnet
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.geometry.
__all__ = _export_api.get_modules()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.nn.layer import graph_convolution
from tensorflow_graphics.nn.layer import pointnet
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.geometry.
__all__ = _export_api.get_modules()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/math/interpolation/tests/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./.git/hooks/pre-rebase.sample | #!/bin/sh
#
# Copyright (c) 2006, 2008 Junio C Hamano
#
# The "pre-rebase" hook is run just before "git rebase" starts doing
# its job, and can prevent the command from running by exiting with
# non-zero status.
#
# The hook is called with the following parameters:
#
# $1 -- the upstream the series was forked from.
# $2 -- the branch being rebased (or empty when rebasing the current branch).
#
# This sample shows how to prevent topic branches that are already
# merged to 'next' branch from getting rebased, because allowing it
# would result in rebasing already published history.
publish=next
basebranch="$1"
if test "$#" = 2
then
topic="refs/heads/$2"
else
topic=`git symbolic-ref HEAD` ||
exit 0 ;# we do not interrupt rebasing detached HEAD
fi
case "$topic" in
refs/heads/??/*)
;;
*)
exit 0 ;# we do not interrupt others.
;;
esac
# Now we are dealing with a topic branch being rebased
# on top of master. Is it OK to rebase it?
# Does the topic really exist?
git show-ref -q "$topic" || {
echo >&2 "No such branch $topic"
exit 1
}
# Is topic fully merged to master?
not_in_master=`git rev-list --pretty=oneline ^master "$topic"`
if test -z "$not_in_master"
then
echo >&2 "$topic is fully merged to master; better remove it."
exit 1 ;# we could allow it, but there is no point.
fi
# Is topic ever merged to next? If so you should not be rebasing it.
only_next_1=`git rev-list ^master "^$topic" ${publish} | sort`
only_next_2=`git rev-list ^master ${publish} | sort`
if test "$only_next_1" = "$only_next_2"
then
not_in_topic=`git rev-list "^$topic" master`
if test -z "$not_in_topic"
then
echo >&2 "$topic is already up to date with master"
exit 1 ;# we could allow it, but there is no point.
else
exit 0
fi
else
not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"`
/usr/bin/perl -e '
my $topic = $ARGV[0];
my $msg = "* $topic has commits already merged to public branch:\n";
my (%not_in_next) = map {
/^([0-9a-f]+) /;
($1 => 1);
} split(/\n/, $ARGV[1]);
for my $elem (map {
/^([0-9a-f]+) (.*)$/;
[$1 => $2];
} split(/\n/, $ARGV[2])) {
if (!exists $not_in_next{$elem->[0]}) {
if ($msg) {
print STDERR $msg;
undef $msg;
}
print STDERR " $elem->[1]\n";
}
}
' "$topic" "$not_in_next" "$not_in_master"
exit 1
fi
<<\DOC_END
This sample hook safeguards topic branches that have been
published from being rewound.
The workflow assumed here is:
* Once a topic branch forks from "master", "master" is never
merged into it again (either directly or indirectly).
* Once a topic branch is fully cooked and merged into "master",
it is deleted. If you need to build on top of it to correct
earlier mistakes, a new topic branch is created by forking at
the tip of the "master". This is not strictly necessary, but
it makes it easier to keep your history simple.
* Whenever you need to test or publish your changes to topic
branches, merge them into "next" branch.
The script, being an example, hardcodes the publish branch name
to be "next", but it is trivial to make it configurable via
$GIT_DIR/config mechanism.
With this workflow, you would want to know:
(1) ... if a topic branch has ever been merged to "next". Young
topic branches can have stupid mistakes you would rather
clean up before publishing, and things that have not been
merged into other branches can be easily rebased without
affecting other people. But once it is published, you would
not want to rewind it.
(2) ... if a topic branch has been fully merged to "master".
Then you can delete it. More importantly, you should not
build on top of it -- other people may already want to
change things related to the topic as patches against your
"master", so if you need further changes, it is better to
fork the topic (perhaps with the same name) afresh from the
tip of "master".
Let's look at this example:
o---o---o---o---o---o---o---o---o---o "next"
/ / / /
/ a---a---b A / /
/ / / /
/ / c---c---c---c B /
/ / / \ /
/ / / b---b C \ /
/ / / / \ /
---o---o---o---o---o---o---o---o---o---o---o "master"
A, B and C are topic branches.
* A has one fix since it was merged up to "next".
* B has finished. It has been fully merged up to "master" and "next",
and is ready to be deleted.
* C has not merged to "next" at all.
We would want to allow C to be rebased, refuse A, and encourage
B to be deleted.
To compute (1):
git rev-list ^master ^topic next
git rev-list ^master next
if these match, topic has not merged in next at all.
To compute (2):
git rev-list master..topic
if this is empty, it is fully merged to "master".
DOC_END
| #!/bin/sh
#
# Copyright (c) 2006, 2008 Junio C Hamano
#
# The "pre-rebase" hook is run just before "git rebase" starts doing
# its job, and can prevent the command from running by exiting with
# non-zero status.
#
# The hook is called with the following parameters:
#
# $1 -- the upstream the series was forked from.
# $2 -- the branch being rebased (or empty when rebasing the current branch).
#
# This sample shows how to prevent topic branches that are already
# merged to 'next' branch from getting rebased, because allowing it
# would result in rebasing already published history.
publish=next
basebranch="$1"
if test "$#" = 2
then
topic="refs/heads/$2"
else
topic=`git symbolic-ref HEAD` ||
exit 0 ;# we do not interrupt rebasing detached HEAD
fi
case "$topic" in
refs/heads/??/*)
;;
*)
exit 0 ;# we do not interrupt others.
;;
esac
# Now we are dealing with a topic branch being rebased
# on top of master. Is it OK to rebase it?
# Does the topic really exist?
git show-ref -q "$topic" || {
echo >&2 "No such branch $topic"
exit 1
}
# Is topic fully merged to master?
not_in_master=`git rev-list --pretty=oneline ^master "$topic"`
if test -z "$not_in_master"
then
echo >&2 "$topic is fully merged to master; better remove it."
exit 1 ;# we could allow it, but there is no point.
fi
# Is topic ever merged to next? If so you should not be rebasing it.
only_next_1=`git rev-list ^master "^$topic" ${publish} | sort`
only_next_2=`git rev-list ^master ${publish} | sort`
if test "$only_next_1" = "$only_next_2"
then
not_in_topic=`git rev-list "^$topic" master`
if test -z "$not_in_topic"
then
echo >&2 "$topic is already up to date with master"
exit 1 ;# we could allow it, but there is no point.
else
exit 0
fi
else
not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"`
/usr/bin/perl -e '
my $topic = $ARGV[0];
my $msg = "* $topic has commits already merged to public branch:\n";
my (%not_in_next) = map {
/^([0-9a-f]+) /;
($1 => 1);
} split(/\n/, $ARGV[1]);
for my $elem (map {
/^([0-9a-f]+) (.*)$/;
[$1 => $2];
} split(/\n/, $ARGV[2])) {
if (!exists $not_in_next{$elem->[0]}) {
if ($msg) {
print STDERR $msg;
undef $msg;
}
print STDERR " $elem->[1]\n";
}
}
' "$topic" "$not_in_next" "$not_in_master"
exit 1
fi
<<\DOC_END
This sample hook safeguards topic branches that have been
published from being rewound.
The workflow assumed here is:
* Once a topic branch forks from "master", "master" is never
merged into it again (either directly or indirectly).
* Once a topic branch is fully cooked and merged into "master",
it is deleted. If you need to build on top of it to correct
earlier mistakes, a new topic branch is created by forking at
the tip of the "master". This is not strictly necessary, but
it makes it easier to keep your history simple.
* Whenever you need to test or publish your changes to topic
branches, merge them into "next" branch.
The script, being an example, hardcodes the publish branch name
to be "next", but it is trivial to make it configurable via
$GIT_DIR/config mechanism.
With this workflow, you would want to know:
(1) ... if a topic branch has ever been merged to "next". Young
topic branches can have stupid mistakes you would rather
clean up before publishing, and things that have not been
merged into other branches can be easily rebased without
affecting other people. But once it is published, you would
not want to rewind it.
(2) ... if a topic branch has been fully merged to "master".
Then you can delete it. More importantly, you should not
build on top of it -- other people may already want to
change things related to the topic as patches against your
"master", so if you need further changes, it is better to
fork the topic (perhaps with the same name) afresh from the
tip of "master".
Let's look at this example:
o---o---o---o---o---o---o---o---o---o "next"
/ / / /
/ a---a---b A / /
/ / / /
/ / c---c---c---c B /
/ / / \ /
/ / / b---b C \ /
/ / / / \ /
---o---o---o---o---o---o---o---o---o---o---o "master"
A, B and C are topic branches.
* A has one fix since it was merged up to "next".
* B has finished. It has been fully merged up to "master" and "next",
and is ready to be deleted.
* C has not merged to "next" at all.
We would want to allow C to be rebased, refuse A, and encourage
B to be deleted.
To compute (1):
git rev-list ^master ^topic next
git rev-list ^master next
if these match, topic has not merged in next at all.
To compute (2):
git rev-list master..topic
if this is empty, it is fully merged to "master".
DOC_END
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/representation/tests/grid_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grid."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.util import test_case
class GridTest(test_case.TestCase):
@parameterized.parameters(
(((1,), (1,), (1,)), (tf.float32, tf.float32, tf.int32)),
(((1, 1), (1, 1), (1,)), (tf.float32, tf.float32, tf.int32)),
)
def test_generate_exception_not_raised(self, shapes, dtypes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(grid.generate, shapes, dtypes)
@parameterized.parameters(
("starts must have a rank greater than 0", (), (None,), (None,)),
("stops must have a rank greater than 0", (None,), (), (None,)),
("nums must have a rank of 1", (None,), (None,), ()),
("Not all batch dimensions are identical.", (1,), (0,), (1,)),
("Not all batch dimensions are identical.", (0,), (1,), (1,)),
("must have the same number of dimensions", (1,), (1,), (0,)),
)
def test_generate_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_raised(grid.generate, error_msg, shapes)
@parameterized.parameters(
(((-1.,), (1.,), (3,)), (((-1.,), (0.,), (1.,)),)),
((((-1.,), (-1.,)), ((1.,), (1.,)), (1,)), ((((-1.,),), ((-1.,),)),)),
)
def test_generate_preset(self, test_inputs, test_outputs):
"""Test the uniform grid generation using fix test cases."""
self.assert_output_is_correct(
grid.generate, test_inputs, test_outputs, tile=False)
def test_generate_random(self):
"""Test the uniform grid generation."""
starts = np.array((0., 0.), dtype=np.float32)
stops = np.random.randint(1, 10, size=(2))
nums = stops + 1
stops = stops.astype(np.float32)
g = grid.generate(starts, stops, nums)
shape = nums.tolist() + [2]
xv, yv = np.meshgrid(range(shape[0]), range(shape[1]), indexing="ij")
gt = np.stack((xv, yv), axis=-1).astype(np.float32)
self.assertAllClose(g, gt)
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grid."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.util import test_case
class GridTest(test_case.TestCase):
@parameterized.parameters(
(((1,), (1,), (1,)), (tf.float32, tf.float32, tf.int32)),
(((1, 1), (1, 1), (1,)), (tf.float32, tf.float32, tf.int32)),
)
def test_generate_exception_not_raised(self, shapes, dtypes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(grid.generate, shapes, dtypes)
@parameterized.parameters(
("starts must have a rank greater than 0", (), (None,), (None,)),
("stops must have a rank greater than 0", (None,), (), (None,)),
("nums must have a rank of 1", (None,), (None,), ()),
("Not all batch dimensions are identical.", (1,), (0,), (1,)),
("Not all batch dimensions are identical.", (0,), (1,), (1,)),
("must have the same number of dimensions", (1,), (1,), (0,)),
)
def test_generate_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_raised(grid.generate, error_msg, shapes)
@parameterized.parameters(
(((-1.,), (1.,), (3,)), (((-1.,), (0.,), (1.,)),)),
((((-1.,), (-1.,)), ((1.,), (1.,)), (1,)), ((((-1.,),), ((-1.,),)),)),
)
def test_generate_preset(self, test_inputs, test_outputs):
"""Test the uniform grid generation using fix test cases."""
self.assert_output_is_correct(
grid.generate, test_inputs, test_outputs, tile=False)
def test_generate_random(self):
"""Test the uniform grid generation."""
starts = np.array((0., 0.), dtype=np.float32)
stops = np.random.randint(1, 10, size=(2))
nums = stops + 1
stops = stops.astype(np.float32)
g = grid.generate(starts, stops, nums)
shape = nums.tolist() + [2]
xv, yv = np.meshgrid(range(shape[0]), range(shape[1]), indexing="ij")
gt = np.stack((xv, yv), axis=-1).astype(np.float32)
self.assertAllClose(g, gt)
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/tensorboard/mesh_visualizer/tf_mesh_dashboard/array-buffer-data-provider.js | /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/**
* @fileoverview ArrayBufferProvider responsible for making requests to server,
* receive and parse response.
*/
// TODO(b/135959734): this class must be refactored into base DataProvider and
// subclass ArrayBufferDataProvider later.
var vz_mesh;
(function(vz_mesh) {
/**
* Types of errors during network data roundtrip.
* @enum {number}
*/
vz_mesh.ErrorCodes = {
CANCELLED: 1 // Happens when the request was cancelled before it finished.
};
/**
* Types of content displayed by the plugin.
* @enum {number}
*/
const ContentType = {
VERTEX: 1,
FACE: 2,
COLOR: 3
};
/**
* Types of content displayed by the plugin mapped to underlying data types.
* @enum {string}
*/
const ContentTypeToItemType = {
VERTEX: 'float32',
FACE: 'int32',
COLOR: 'uint8'
};
class ArrayBufferDataProvider {
/**
* ArrayBufferDataProvider constructor, initializes everything needed for
* future requests to the server.
* @param {!Object} requestManager Request manager to communicate with the
* server.
*/
constructor(requestManager) {
this._requestManager = requestManager;
this._canceller = new tf_backend.Canceller();
}
/**
* Requests new data from the server.
*/
reload(run, tag, sample) {
this._canceller.cancelAll();
return this._fetchMetadata(run, tag, sample);
}
/**
* Requests new data of some particular type from the server.
* @param {string} run Name of the run to get data for.
* @param {string} tag Name of the tag to get data for.
* @param {string} content_type Type of the content to retrieve.
* @param {!array} metadata List of metadata to complete with data from the
* server.
* @param {number} sample Sample index from a batch of data.
* @param {number} step Step value, representing a point in the time when the
event occurred.
* @param {!Object} meshData Map to populate with mesh data.
* @return {!Object} Promise object representing server request.
* @private
*/
_fetchDataByStep(run, tag, content_type, sample, step, meshData) {
const url = tf_backend.getRouter().pluginRoute(
'mesh', '/data',
new URLSearchParams({tag, run, content_type, sample, step}));
const reshapeTo1xNx3 = function (data) {
const channelsCount = 3;
let items = [];
for (let i = 0; i < data.length / channelsCount; i++) {
let dataEntry = [];
for (let j = 0; j < channelsCount; j++) {
dataEntry.push(data[i * channelsCount + j]);
}
items.push(dataEntry);
}
return items;
};
const processData = this._canceller.cancellable(response => {
if (response.cancelled) {
return Promise.reject({
code: vz_mesh.ErrorCodes.CANCELLED,
message: 'Response was invalidated.'
});
}
let buffer = response.value;
switch(content_type) {
case 'VERTEX':
meshData.vertices = reshapeTo1xNx3(new Float32Array(buffer));
break;
case 'FACE':
meshData.faces = reshapeTo1xNx3(new Int32Array(buffer));
break;
case 'COLOR':
meshData.colors = reshapeTo1xNx3(new Uint8Array(buffer));
break;
}
return meshData;
});
return this._requestManager
.fetch(
url, null, 'arraybuffer',
ContentTypeToItemType[content_type])
.then(response => response.arrayBuffer())
.then(processData);
}
/**
* Requests new data for each type of metadata from the server.
* Metadata consists of wall_time, step, tensor shape, content type and other
* info, but not tensor data itself.
* @param {!Object} stepDatum Dictionary with mesh data for a current step.
* @param {string} run Name of the run to get data for.
* @param {string} tag Name of the tug to get data for.
* @param {number} sample Sample index from a batch of data.
* @return {!Object} Joint promise for all requests being sent.
* @private
*/
fetchData(stepDatum, run, tag, sample) {
let promises = [];
// Map to populate with mesh data, i.e. vertices, faces, etc.
let meshData = new Map();
Object.keys(ContentType).forEach(contentType => {
const component = (1 << ContentType[contentType]);
if (stepDatum.components & component) {
promises.push(this._fetchDataByStep(
run, tag, contentType, sample, stepDatum.step,
meshData));
}
});
return Promise.all(promises);
}
/**
* Requests new metadata from the server
* @param {string} run Name of the run to get data for.
* @param {string} tag Name of the tug to get data for.
* @param {number} sample Sample index from a batch of data.
* completion.
* @return {!Object} Promise for requested metadata.
* @private
*/
_fetchMetadata(run, tag, sample) {
this._canceller.cancelAll();
const url = tf_backend.getRouter().pluginRoute(
'mesh', '/meshes', new URLSearchParams({tag, run, sample}));
const requestData = this._canceller.cancellable(response => {
if (response.cancelled) {
return Promise.reject({
code: vz_mesh.ErrorCodes.CANCELLED,
message: 'Response was invalidated.'
});
}
return response.value;
});
return this._requestManager.fetch(url)
.then(response => response.json())
.then(requestData)
.then(this._processMetadata.bind(this));
}
/**
* Process server raw data into frontend friendly format.
* @param {!Array|undefined} data list of raw server records.
* @return {!Array} list of step datums.
* @private
*/
_processMetadata(data) {
if (!data) return;
const stepToData = new Map();
for (let i = 0; i < data.length; i++) {
let dataEntry = data[i];
if (!stepToData.has(dataEntry.step)) {
stepToData.set(dataEntry.step, []);
}
stepToData.get(dataEntry.step).push(dataEntry);
}
let datums = [];
stepToData.forEach((data) => {
let datum = this._createStepDatum(data[0]);
datums.push(datum);
});
return datums;
}
/**
* Process single row of server-side data and puts it in more structured form.
* @param {!Object} metadata Object describing step summary.
* @private
* @return {!Object} with wall_time, step number and data for the step.
*/
_createStepDatum(metadata) {
return {
// The wall time within the metadata is in seconds. The Date
// constructor accepts a time in milliseconds, so we multiply by 1000.
wall_time: new Date(metadata.wall_time * 1000),
step: metadata.step,
config: metadata.config,
content_type: metadata.content_type,
components: metadata.components
};
}
}
vz_mesh.ArrayBufferDataProvider = ArrayBufferDataProvider;
})(vz_mesh || (vz_mesh = {})); // end of vz_mesh namespace
| /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/**
* @fileoverview ArrayBufferProvider responsible for making requests to server,
* receive and parse response.
*/
// TODO(b/135959734): this class must be refactored into base DataProvider and
// subclass ArrayBufferDataProvider later.
var vz_mesh;
(function(vz_mesh) {
/**
* Types of errors during network data roundtrip.
* @enum {number}
*/
vz_mesh.ErrorCodes = {
CANCELLED: 1 // Happens when the request was cancelled before it finished.
};
/**
* Types of content displayed by the plugin.
* @enum {number}
*/
const ContentType = {
VERTEX: 1,
FACE: 2,
COLOR: 3
};
/**
* Types of content displayed by the plugin mapped to underlying data types.
* @enum {string}
*/
const ContentTypeToItemType = {
VERTEX: 'float32',
FACE: 'int32',
COLOR: 'uint8'
};
class ArrayBufferDataProvider {
/**
* ArrayBufferDataProvider constructor, initializes everything needed for
* future requests to the server.
* @param {!Object} requestManager Request manager to communicate with the
* server.
*/
constructor(requestManager) {
this._requestManager = requestManager;
this._canceller = new tf_backend.Canceller();
}
/**
* Requests new data from the server.
*/
reload(run, tag, sample) {
this._canceller.cancelAll();
return this._fetchMetadata(run, tag, sample);
}
/**
* Requests new data of some particular type from the server.
* @param {string} run Name of the run to get data for.
* @param {string} tag Name of the tag to get data for.
* @param {string} content_type Type of the content to retrieve.
* @param {!array} metadata List of metadata to complete with data from the
* server.
* @param {number} sample Sample index from a batch of data.
* @param {number} step Step value, representing a point in the time when the
event occurred.
* @param {!Object} meshData Map to populate with mesh data.
* @return {!Object} Promise object representing server request.
* @private
*/
_fetchDataByStep(run, tag, content_type, sample, step, meshData) {
const url = tf_backend.getRouter().pluginRoute(
'mesh', '/data',
new URLSearchParams({tag, run, content_type, sample, step}));
const reshapeTo1xNx3 = function (data) {
const channelsCount = 3;
let items = [];
for (let i = 0; i < data.length / channelsCount; i++) {
let dataEntry = [];
for (let j = 0; j < channelsCount; j++) {
dataEntry.push(data[i * channelsCount + j]);
}
items.push(dataEntry);
}
return items;
};
const processData = this._canceller.cancellable(response => {
if (response.cancelled) {
return Promise.reject({
code: vz_mesh.ErrorCodes.CANCELLED,
message: 'Response was invalidated.'
});
}
let buffer = response.value;
switch(content_type) {
case 'VERTEX':
meshData.vertices = reshapeTo1xNx3(new Float32Array(buffer));
break;
case 'FACE':
meshData.faces = reshapeTo1xNx3(new Int32Array(buffer));
break;
case 'COLOR':
meshData.colors = reshapeTo1xNx3(new Uint8Array(buffer));
break;
}
return meshData;
});
return this._requestManager
.fetch(
url, null, 'arraybuffer',
ContentTypeToItemType[content_type])
.then(response => response.arrayBuffer())
.then(processData);
}
/**
* Requests new data for each type of metadata from the server.
* Metadata consists of wall_time, step, tensor shape, content type and other
* info, but not tensor data itself.
* @param {!Object} stepDatum Dictionary with mesh data for a current step.
* @param {string} run Name of the run to get data for.
* @param {string} tag Name of the tug to get data for.
* @param {number} sample Sample index from a batch of data.
* @return {!Object} Joint promise for all requests being sent.
* @private
*/
fetchData(stepDatum, run, tag, sample) {
let promises = [];
// Map to populate with mesh data, i.e. vertices, faces, etc.
let meshData = new Map();
Object.keys(ContentType).forEach(contentType => {
const component = (1 << ContentType[contentType]);
if (stepDatum.components & component) {
promises.push(this._fetchDataByStep(
run, tag, contentType, sample, stepDatum.step,
meshData));
}
});
return Promise.all(promises);
}
/**
* Requests new metadata from the server
* @param {string} run Name of the run to get data for.
* @param {string} tag Name of the tug to get data for.
* @param {number} sample Sample index from a batch of data.
* completion.
* @return {!Object} Promise for requested metadata.
* @private
*/
_fetchMetadata(run, tag, sample) {
this._canceller.cancelAll();
const url = tf_backend.getRouter().pluginRoute(
'mesh', '/meshes', new URLSearchParams({tag, run, sample}));
const requestData = this._canceller.cancellable(response => {
if (response.cancelled) {
return Promise.reject({
code: vz_mesh.ErrorCodes.CANCELLED,
message: 'Response was invalidated.'
});
}
return response.value;
});
return this._requestManager.fetch(url)
.then(response => response.json())
.then(requestData)
.then(this._processMetadata.bind(this));
}
/**
* Process server raw data into frontend friendly format.
* @param {!Array|undefined} data list of raw server records.
* @return {!Array} list of step datums.
* @private
*/
_processMetadata(data) {
if (!data) return;
const stepToData = new Map();
for (let i = 0; i < data.length; i++) {
let dataEntry = data[i];
if (!stepToData.has(dataEntry.step)) {
stepToData.set(dataEntry.step, []);
}
stepToData.get(dataEntry.step).push(dataEntry);
}
let datums = [];
stepToData.forEach((data) => {
let datum = this._createStepDatum(data[0]);
datums.push(datum);
});
return datums;
}
/**
* Process single row of server-side data and puts it in more structured form.
* @param {!Object} metadata Object describing step summary.
* @private
* @return {!Object} with wall_time, step number and data for the step.
*/
_createStepDatum(metadata) {
return {
// The wall time within the metadata is in seconds. The Date
// constructor accepts a time in milliseconds, so we multiply by 1000.
wall_time: new Date(metadata.wall_time * 1000),
step: metadata.step,
config: metadata.config,
content_type: metadata.content_type,
components: metadata.components
};
}
}
vz_mesh.ArrayBufferDataProvider = ArrayBufferDataProvider;
})(vz_mesh || (vz_mesh = {})); // end of vz_mesh namespace
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/nasa/lib/utils.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General helper functions."""
from os import path
import numpy as np
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
from tensorflow_graphics.projects.nasa.lib import datasets
from tensorflow_graphics.projects.nasa.lib import models
import tensorflow_probability as tfp
from tqdm import trange
import trimesh
tf.disable_eager_execution()
tfd = tfp.distributions
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Dataset Parameters
flags.DEFINE_enum("dataset", "amass",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_string("data_dir", None, "Directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.")
flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.")
flags.DEFINE_integer("batch_size", 12, "Batch size.")
flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.")
flags.DEFINE_integer("subject", 0, "Index of the subject for training.")
# Model Parameters
flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_integer("n_parts", 24, "Number of parts.")
flags.DEFINE_integer("total_dim", 960,
"Dimension of the latent vector (in total).")
flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.")
flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.")
flags.DEFINE_bool("projection", True,
"Whether to use projected shape features.")
flags.DEFINE_float("level_set", 0.5, "The value of the level_set.")
flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.")
# Training Parameters
flags.DEFINE_float("lr", 1e-4, "Learning rate")
flags.DEFINE_string("train_dir", None, "Training directory.")
flags.mark_flag_as_required("train_dir")
flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.")
flags.DEFINE_integer("save_every", 5000,
"Number of steps to save checkpoint.")
flags.DEFINE_integer("summary_every", 500,
"Number of steps to save checkpoint.")
flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.")
flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.")
flags.DEFINE_bool("use_vert", True,
"Whether to use vertices on the mesh for training.")
flags.DEFINE_bool("use_joint", True,
"Whether to use joint-based transformation.")
flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.")
# Evalulation Parameters
flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.")
# Tracking Parameters
flags.DEFINE_float("theta_lr", 5e-4, "Learning rate")
flags.DEFINE_integer("max_steps_per_frame", 1792,
"Number of optimization steps for tracking each frame.")
flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"],
"Type of gradient to use in theta optimization.")
flags.DEFINE_integer("sample_track_vert", 1024,
"Number of vertex samples for tracking each frame.")
flags.DEFINE_integer("n_noisy_samples", 8,
"Number of noisy samples per vertex")
flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.")
flags.DEFINE_bool(
"left_trans", False,
"Whether to use left side transformation (True) or right side (False).")
flags.DEFINE_string("joint_data", None, "Path to load joint data.")
flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.")
flags.DEFINE_float("trans_range", 1., "The range of allowed translations.")
def gen_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=0):
"""Generating meshes given a trained NASA model."""
scale = 1.1 # Scale of the padded bbox regarding the tight one.
level_set = hparams.level_set
latent_val = sess.run(latent, feed_dict)
mesh_extractor = mise.MISE(32, 3, level_set)
points = mesh_extractor.query()
gt_verts = batch_val["vert"].reshape([-1, 3])
gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0)
gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5
gt_scale = (gt_bbox[1] - gt_bbox[0]).max()
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution -
0.5) * scale
points = points * gt_scale + gt_center
n_points = points.shape[1]
values = []
for i in range(0, n_points,
100000): # Add this to prevent OOM due to points overload.
feed_dict[latent_holder] = latent_val
feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1)
value = sess.run(occ[:, idx], feed_dict)
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
try:
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(level_set, value_grid.max()))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3,
value_grid.shape[2] - 3
],
dtype=np.float32)
verts = scale * (verts - 0.5)
verts = verts * gt_scale + gt_center
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
mesh = trimesh.Trimesh(vertices=verts, faces=faces)
return mesh
except: # pylint: disable=bare-except
return None
def save_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
pth="meshes"):
"""Generate and save meshes to disk given a trained NASA model."""
name = batch_val["name"][0].decode("utf-8")
subject, motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
start = hparams.n_parts
for i in range(start, hparams.n_parts + 1):
mesh_model = gen_mesh(
sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=i)
mesh_name = "full_pred.obj"
if mesh_model is not None:
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
mesh_model.export(fout, file_type="obj")
return subject, motion, frame, mesh_model
def save_pointcloud(data, hparams, pth="pointcloud"):
"""Save pointcloud to disk."""
name = data["name"][0].decode("utf-8")
unused_subject, unused_motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
mesh_name = "pointcloud.obj"
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
pointcloud = data["vert"].reshape([-1, 3])
for v in pointcloud:
fout.write("v {0} {1} {2}\n".format(*v.tolist()))
def amass_name_helper(name):
name, frame = name.split("-")
subject = name[:5]
motion = name[6:]
return subject, motion, frame
def make_summary_feed_dict(
iou_hook,
iou,
best_hook,
best_iou,
):
feed_dict = {}
feed_dict[iou_hook] = iou
feed_dict[best_hook] = best_iou
return feed_dict
def parse_global_step(ckpt):
basename = path.basename(ckpt)
return int(basename.split("-")[-1])
def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ,
point, label, hparams):
"""Compute IoU."""
iou = 0.
eps = 1e-9
latent_val = sess.run(latent, feed_dict)
n_points = point.shape[2]
preds = []
for start in range(0, n_points, 100000):
feed_dict[point_holder] = point[:, :, start:start + 100000]
feed_dict[latent_holder] = latent_val
pred = sess.run(occ, feed_dict)
preds.append(pred)
pred = np.concatenate(preds, axis=2)
pred = (pred >= hparams.level_set).astype(np.float32)
label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1)
iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps)
return iou
def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans,
joints, hparams):
"""Compute the prior term as a glue loss."""
n_dims = hparams.n_dims
# Invert the transformation
r_inv = inv_transforms[..., :n_dims, :n_dims]
t_inv = inv_transforms[..., :n_dims, -1:]
r = tf.transpose(r_inv, [0, 2, 1])
t = -tf.matmul(r, t_inv)
transforms = tf.concat(
[tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2)
transforms = tf.matmul(transforms, inv_first_frame_trans)
# Compute transformations of father joints and apply it to vectors from frame0
father_transforms = tf.reduce_sum(
tf.expand_dims(transforms, axis=1) *
connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]),
axis=0)
end_pts_homo = tf.expand_dims(
tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1)
end_pts_transformed = tf.matmul(father_transforms, end_pts_homo)
end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims]
# Compute vectors in current configuration
pred_links = tf.reshape(joints, [hparams.n_parts, n_dims])
# Compute distance between links and transformed vectors
return tf.reduce_sum(tf.square(pred_links - end_pts_transformed))
def vanilla_theta_gradient(model_fn, batch_holder, hparams):
"""A vanilla gradient estimator for the pose, theta."""
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh")
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def reparam_theta_gradient(model_fn, batch_holder, hparams):
"""A gradient estimaor for the pose, theta, using the reparam trick."""
sigma = hparams.bandwidth
n_samples = hparams.n_noisy_samples
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
dist = tfd.Normal(loc=0., scale=sigma)
n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert
noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims))
unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None,
"gen_mesh")
occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])
occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3)
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss,
sess, k, hparams):
"""Optimize the pose, theta, during tracking."""
sess.run(reset_op)
loss_val = 0
glue_val = 0
with trange(hparams.max_steps_per_frame) as t:
for unused_i in t:
loss_val, unused_var, rec_val, glue_val = sess.run(
[loss, train_op, rec_loss, glue_loss], feed_dict)
t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format(
k, rec_val, glue_val))
return loss_val, glue_val
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General helper functions."""
from os import path
import numpy as np
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
from tensorflow_graphics.projects.nasa.lib import datasets
from tensorflow_graphics.projects.nasa.lib import models
import tensorflow_probability as tfp
from tqdm import trange
import trimesh
tf.disable_eager_execution()
tfd = tfp.distributions
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Dataset Parameters
flags.DEFINE_enum("dataset", "amass",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_string("data_dir", None, "Directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.")
flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.")
flags.DEFINE_integer("batch_size", 12, "Batch size.")
flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.")
flags.DEFINE_integer("subject", 0, "Index of the subject for training.")
# Model Parameters
flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_integer("n_parts", 24, "Number of parts.")
flags.DEFINE_integer("total_dim", 960,
"Dimension of the latent vector (in total).")
flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.")
flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.")
flags.DEFINE_bool("projection", True,
"Whether to use projected shape features.")
flags.DEFINE_float("level_set", 0.5, "The value of the level_set.")
flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.")
# Training Parameters
flags.DEFINE_float("lr", 1e-4, "Learning rate")
flags.DEFINE_string("train_dir", None, "Training directory.")
flags.mark_flag_as_required("train_dir")
flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.")
flags.DEFINE_integer("save_every", 5000,
"Number of steps to save checkpoint.")
flags.DEFINE_integer("summary_every", 500,
"Number of steps to save checkpoint.")
flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.")
flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.")
flags.DEFINE_bool("use_vert", True,
"Whether to use vertices on the mesh for training.")
flags.DEFINE_bool("use_joint", True,
"Whether to use joint-based transformation.")
flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.")
# Evalulation Parameters
flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.")
# Tracking Parameters
flags.DEFINE_float("theta_lr", 5e-4, "Learning rate")
flags.DEFINE_integer("max_steps_per_frame", 1792,
"Number of optimization steps for tracking each frame.")
flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"],
"Type of gradient to use in theta optimization.")
flags.DEFINE_integer("sample_track_vert", 1024,
"Number of vertex samples for tracking each frame.")
flags.DEFINE_integer("n_noisy_samples", 8,
"Number of noisy samples per vertex")
flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.")
flags.DEFINE_bool(
"left_trans", False,
"Whether to use left side transformation (True) or right side (False).")
flags.DEFINE_string("joint_data", None, "Path to load joint data.")
flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.")
flags.DEFINE_float("trans_range", 1., "The range of allowed translations.")
def gen_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=0):
"""Generating meshes given a trained NASA model."""
scale = 1.1 # Scale of the padded bbox regarding the tight one.
level_set = hparams.level_set
latent_val = sess.run(latent, feed_dict)
mesh_extractor = mise.MISE(32, 3, level_set)
points = mesh_extractor.query()
gt_verts = batch_val["vert"].reshape([-1, 3])
gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0)
gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5
gt_scale = (gt_bbox[1] - gt_bbox[0]).max()
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution -
0.5) * scale
points = points * gt_scale + gt_center
n_points = points.shape[1]
values = []
for i in range(0, n_points,
100000): # Add this to prevent OOM due to points overload.
feed_dict[latent_holder] = latent_val
feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1)
value = sess.run(occ[:, idx], feed_dict)
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
try:
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(level_set, value_grid.max()))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3,
value_grid.shape[2] - 3
],
dtype=np.float32)
verts = scale * (verts - 0.5)
verts = verts * gt_scale + gt_center
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
mesh = trimesh.Trimesh(vertices=verts, faces=faces)
return mesh
except: # pylint: disable=bare-except
return None
def save_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
pth="meshes"):
"""Generate and save meshes to disk given a trained NASA model."""
name = batch_val["name"][0].decode("utf-8")
subject, motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
start = hparams.n_parts
for i in range(start, hparams.n_parts + 1):
mesh_model = gen_mesh(
sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=i)
mesh_name = "full_pred.obj"
if mesh_model is not None:
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
mesh_model.export(fout, file_type="obj")
return subject, motion, frame, mesh_model
def save_pointcloud(data, hparams, pth="pointcloud"):
"""Save pointcloud to disk."""
name = data["name"][0].decode("utf-8")
unused_subject, unused_motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
mesh_name = "pointcloud.obj"
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
pointcloud = data["vert"].reshape([-1, 3])
for v in pointcloud:
fout.write("v {0} {1} {2}\n".format(*v.tolist()))
def amass_name_helper(name):
name, frame = name.split("-")
subject = name[:5]
motion = name[6:]
return subject, motion, frame
def make_summary_feed_dict(
iou_hook,
iou,
best_hook,
best_iou,
):
feed_dict = {}
feed_dict[iou_hook] = iou
feed_dict[best_hook] = best_iou
return feed_dict
def parse_global_step(ckpt):
basename = path.basename(ckpt)
return int(basename.split("-")[-1])
def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ,
point, label, hparams):
"""Compute IoU."""
iou = 0.
eps = 1e-9
latent_val = sess.run(latent, feed_dict)
n_points = point.shape[2]
preds = []
for start in range(0, n_points, 100000):
feed_dict[point_holder] = point[:, :, start:start + 100000]
feed_dict[latent_holder] = latent_val
pred = sess.run(occ, feed_dict)
preds.append(pred)
pred = np.concatenate(preds, axis=2)
pred = (pred >= hparams.level_set).astype(np.float32)
label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1)
iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps)
return iou
def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans,
joints, hparams):
"""Compute the prior term as a glue loss."""
n_dims = hparams.n_dims
# Invert the transformation
r_inv = inv_transforms[..., :n_dims, :n_dims]
t_inv = inv_transforms[..., :n_dims, -1:]
r = tf.transpose(r_inv, [0, 2, 1])
t = -tf.matmul(r, t_inv)
transforms = tf.concat(
[tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2)
transforms = tf.matmul(transforms, inv_first_frame_trans)
# Compute transformations of father joints and apply it to vectors from frame0
father_transforms = tf.reduce_sum(
tf.expand_dims(transforms, axis=1) *
connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]),
axis=0)
end_pts_homo = tf.expand_dims(
tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1)
end_pts_transformed = tf.matmul(father_transforms, end_pts_homo)
end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims]
# Compute vectors in current configuration
pred_links = tf.reshape(joints, [hparams.n_parts, n_dims])
# Compute distance between links and transformed vectors
return tf.reduce_sum(tf.square(pred_links - end_pts_transformed))
def vanilla_theta_gradient(model_fn, batch_holder, hparams):
"""A vanilla gradient estimator for the pose, theta."""
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh")
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def reparam_theta_gradient(model_fn, batch_holder, hparams):
"""A gradient estimaor for the pose, theta, using the reparam trick."""
sigma = hparams.bandwidth
n_samples = hparams.n_noisy_samples
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
dist = tfd.Normal(loc=0., scale=sigma)
n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert
noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims))
unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None,
"gen_mesh")
occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])
occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3)
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss,
sess, k, hparams):
"""Optimize the pose, theta, during tracking."""
sess.run(reset_op)
loss_val = 0
glue_val = 0
with trange(hparams.max_steps_per_frame) as t:
for unused_i in t:
loss_val, unused_var, rec_val, glue_val = sess.run(
[loss, train_op, rec_loss, glue_loss], feed_dict)
t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format(
k, rec_val, glue_val))
return loss_val, glue_val
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/modelnet40/modelnet40_show.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Visualization in 3D of modelnet40 dataset.
See: https://www.tensorflow.org/datasets/api_docs/python/tfds/load
"""
from absl import app
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint:disable=unused-import
from tensorflow_graphics.datasets.modelnet40 import ModelNet40
def main(_):
ds_train, _ = ModelNet40.load(
split="train", data_dir="~/tensorflow_dataset", with_info=True)
for example in ds_train.take(1):
points = example["points"]
label = example["label"]
fig = plt.figure()
ax3 = fig.add_subplot(111, projection="3d")
ax3.set_title("Example with label {}".format(label))
scatter3 = lambda p, c="r", *args: ax3.scatter(p[:, 0], p[:, 1], p[:, 2], c)
scatter3(points)
if __name__ == "__main__":
app.run(main)
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Visualization in 3D of modelnet40 dataset.
See: https://www.tensorflow.org/datasets/api_docs/python/tfds/load
"""
from absl import app
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint:disable=unused-import
from tensorflow_graphics.datasets.modelnet40 import ModelNet40
def main(_):
ds_train, _ = ModelNet40.load(
split="train", data_dir="~/tensorflow_dataset", with_info=True)
for example in ds_train.take(1):
points = example["points"]
label = example["label"]
fig = plt.figure()
ax3 = fig.add_subplot(111, projection="3d")
ax3.set_title("Example with label {}".format(label))
scatter3 = lambda p, c="r", *args: ax3.scatter(p[:, 0], p[:, 1], p[:, 2], c)
scatter3(points)
if __name__ == "__main__":
app.run(main)
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/util/doc.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query environment variable for documentation building."""
import os
def _import_tfg_docs():
"""Checks if __init__.py imports should be executed (for buildling docs)."""
return os.getenv("TFG_DOC_IMPORTS", "0") == "1"
def enable_tfg_doc_imports():
"""Re-enables the imports in the __init__.py so that docs can be built."""
os.environ["TFG_DOC_IMPORTS"] = "1"
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query environment variable for documentation building."""
import os
def _import_tfg_docs():
"""Checks if __init__.py imports should be executed (for buildling docs)."""
return os.getenv("TFG_DOC_IMPORTS", "0") == "1"
def enable_tfg_doc_imports():
"""Re-enables the imports in the __init__.py so that docs can be built."""
os.environ["TFG_DOC_IMPORTS"] = "1"
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/local_implicit_grid/reconstruct_geometry.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reconstruct scene using LIG.
"""
import os
import warnings
from absl import app
from absl import flags
import numpy as np
from tensorflow.compat.v1.io import gfile
from tensorflow_graphics.projects.local_implicit_grid.core import point_utils as pu
from tensorflow_graphics.projects.local_implicit_grid.core import postprocess
from tensorflow_graphics.projects.local_implicit_grid.core import reconstruction as rec
import trimesh
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
flags.DEFINE_string('input_ply', '', 'Input point sample ply file.')
flags.DEFINE_string('output_ply', '', 'Reconstructed scene ply file.')
flags.DEFINE_integer('steps', 10000, 'Number of optimization steps.')
flags.DEFINE_integer('npoints', 10000,
'Number of points to sample per iteration during optim.')
flags.DEFINE_float('part_size', 0.25, 'Size of parts per cell (meters).')
flags.DEFINE_float('init_std', 0.02, 'Initial std to draw random code from.')
flags.DEFINE_integer('res_per_part', 0,
'Evaluation resolution per part. A higher value produces a'
'finer output mesh. 0 to use default value. '
'Recommended value: 8, 16 or 32.')
flags.DEFINE_boolean('overlap', True, 'Use overlapping latent grids.')
flags.DEFINE_boolean('postprocess', True, 'Post process to remove backfaces.')
flags.DEFINE_string('ckpt_dir', 'pretrained_ckpt',
'Checkpoint directory.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if not FLAGS.input_ply:
raise IOError('--input_ply must be specified.')
if not FLAGS.output_ply:
FLAGS.output_ply = FLAGS.input_ply.replace('.ply', '.reconstruct.ply')
# load point cloud from ply file
v, n = pu.read_point_ply(FLAGS.input_ply)
# check if part size is too large
min_bb = np.min(np.max(v, axis=0) - np.min(v, axis=0))
if FLAGS.part_size > 0.25 * min_bb:
warnings.warn(
'WARNING: part_size seems too large. Recommend using a part_size < '
'{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning)
surface_points = np.concatenate([v, n], axis=1)
near_surface_samples = rec.get_in_out_from_ray(
surface_points, sample_factor=10, std=0.01)
xmin = np.min(surface_points[:, :3], 0)
xmax = np.max(surface_points[:, :3], 0)
# add some extra slack to xmin and xmax
xmin -= FLAGS.part_size
xmax += FLAGS.part_size
if FLAGS.res_per_part == 0:
res_per_part = int(64*FLAGS.part_size)
else:
res_per_part = FLAGS.res_per_part
npts = min(near_surface_samples.shape[0], FLAGS.npoints)-1
print('Performing latent grid optimization...')
v, f, _, _ = rec.encode_decoder_one_scene(
near_surface_samples, FLAGS.ckpt_dir, FLAGS.part_size, overlap=True,
indep_pt_loss=True, init_std=FLAGS.init_std,
xmin=xmin, xmax=xmax, res_per_part=res_per_part,
npts=npts, steps=FLAGS.steps)
out_dir = os.path.dirname(FLAGS.output_ply)
if out_dir and not gfile.exists(out_dir):
gfile.makedirs(out_dir)
mesh = trimesh.Trimesh(v, f)
if FLAGS.postprocess:
print('Postprocessing generated mesh...')
mesh = postprocess.remove_backface(mesh, surface_points)
print('Writing reconstructed mesh to {}'.format(FLAGS.output_ply))
with gfile.GFile(FLAGS.output_ply, 'wb') as fh:
mesh.export(fh, 'ply')
if __name__ == '__main__':
app.run(main)
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reconstruct scene using LIG.
"""
import os
import warnings
from absl import app
from absl import flags
import numpy as np
from tensorflow.compat.v1.io import gfile
from tensorflow_graphics.projects.local_implicit_grid.core import point_utils as pu
from tensorflow_graphics.projects.local_implicit_grid.core import postprocess
from tensorflow_graphics.projects.local_implicit_grid.core import reconstruction as rec
import trimesh
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
flags.DEFINE_string('input_ply', '', 'Input point sample ply file.')
flags.DEFINE_string('output_ply', '', 'Reconstructed scene ply file.')
flags.DEFINE_integer('steps', 10000, 'Number of optimization steps.')
flags.DEFINE_integer('npoints', 10000,
'Number of points to sample per iteration during optim.')
flags.DEFINE_float('part_size', 0.25, 'Size of parts per cell (meters).')
flags.DEFINE_float('init_std', 0.02, 'Initial std to draw random code from.')
flags.DEFINE_integer('res_per_part', 0,
'Evaluation resolution per part. A higher value produces a'
'finer output mesh. 0 to use default value. '
'Recommended value: 8, 16 or 32.')
flags.DEFINE_boolean('overlap', True, 'Use overlapping latent grids.')
flags.DEFINE_boolean('postprocess', True, 'Post process to remove backfaces.')
flags.DEFINE_string('ckpt_dir', 'pretrained_ckpt',
'Checkpoint directory.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if not FLAGS.input_ply:
raise IOError('--input_ply must be specified.')
if not FLAGS.output_ply:
FLAGS.output_ply = FLAGS.input_ply.replace('.ply', '.reconstruct.ply')
# load point cloud from ply file
v, n = pu.read_point_ply(FLAGS.input_ply)
# check if part size is too large
min_bb = np.min(np.max(v, axis=0) - np.min(v, axis=0))
if FLAGS.part_size > 0.25 * min_bb:
warnings.warn(
'WARNING: part_size seems too large. Recommend using a part_size < '
'{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning)
surface_points = np.concatenate([v, n], axis=1)
near_surface_samples = rec.get_in_out_from_ray(
surface_points, sample_factor=10, std=0.01)
xmin = np.min(surface_points[:, :3], 0)
xmax = np.max(surface_points[:, :3], 0)
# add some extra slack to xmin and xmax
xmin -= FLAGS.part_size
xmax += FLAGS.part_size
if FLAGS.res_per_part == 0:
res_per_part = int(64*FLAGS.part_size)
else:
res_per_part = FLAGS.res_per_part
npts = min(near_surface_samples.shape[0], FLAGS.npoints)-1
print('Performing latent grid optimization...')
v, f, _, _ = rec.encode_decoder_one_scene(
near_surface_samples, FLAGS.ckpt_dir, FLAGS.part_size, overlap=True,
indep_pt_loss=True, init_std=FLAGS.init_std,
xmin=xmin, xmax=xmax, res_per_part=res_per_part,
npts=npts, steps=FLAGS.steps)
out_dir = os.path.dirname(FLAGS.output_ply)
if out_dir and not gfile.exists(out_dir):
gfile.makedirs(out_dir)
mesh = trimesh.Trimesh(v, f)
if FLAGS.postprocess:
print('Postprocessing generated mesh...')
mesh = postprocess.remove_backface(mesh, surface_points)
print('Writing reconstructed mesh to {}'.format(FLAGS.output_ply))
with gfile.GFile(FLAGS.output_ply, 'wb') as fh:
mesh.export(fh, 'ply')
if __name__ == '__main__':
app.run(main)
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/datasets/modelnet40/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""`tensorflow_graphics.datasets.modelnet40` module."""
from tensorflow_graphics.datasets.modelnet40.modelnet40 import ModelNet40
__all__ = [
"ModelNet40",
]
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""`tensorflow_graphics.datasets.modelnet40` module."""
from tensorflow_graphics.datasets.modelnet40.modelnet40 import ModelNet40
__all__ = [
"ModelNet40",
]
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./CONTRIBUTING.md | # How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## What features to add?
The library is open to any contributions along the lines of computer graphics,
with the top-level themes being rendering, physics simulation, and geometry
processing. Contributions can be in the form of low level functions (majority of
the library), Neural Networks layers or Colaboratory notebooks.
## Guidelines for Tensorflow operations
TensorFlow Graphics follows the TensorFlow
[contributor guidelines](https://www.tensorflow.org/community/contribute) and
[code style guide](\(https://www.tensorflow.org/community/contribute/code_style\)).
Besides these, TensorFlow Graphics has a few more guidelines which you can find
below.
### Programming languages
Unless this comes at a significant performance hit, pure Python is preferred.
### Structure of a function
The general structure of a function should be as follows:
* Name of the function followed by inputs to that function
* Doc-string documentation
* Definition of the scope using `tf.compat.v1.name_scope`
* Functions that take tensors as arguments should call `tf.convert_to_tensor`
* Checking the shape and value of the inputs as necessary
* Main logic of the function
### Function names
Prefer function names that are concise, descriptive, and integrate well with the
module name when called. For instance, the `rotate` function from the
`rotation_matrix_3d` sub-module can be called using `rotation_matrix_3d.rotate`,
and makes it easy for anyone to understand what is being calculated. Functions
that are only meant to be local to the file in which they are written should
have an underscore before their name.
### Input parameters
The first arguments should be tensors, followed by python parameters, and
finally the name scope for the TensorFlow operation.
### Input shapes
* The first dimensions of a tensor should represent the shape of the batch,
and the last dimensions should represent the core shape of the elements used
by the function. For instance, `rotation_matrix_3d.rotate` accepts rotation
matrices of shape `[A1, ..., An, 3, 3]` where `[A1, ..., An]` are the
optional batch dimensions, and `[3, 3]` is the shape required to capture 3D
rotation matrices.
* Every function must support batch dimensions of any shape, including tensors
with no batch dimensions.
* For input tensors with common batch shapes, document whether they can be
broadcast compatible or not, and try to make them compatible when possible
by, for instance, using `shape.get_broadcasted_shape` and `tf.broadcast_to`.
### Documentation
Every function must have a docstring-type documentation describing what the
function is performing, its arguments, and what is returned. The input sizes
must be written between backquotes with batch dimensions indexed by letters and
numbers, for instance: \`[A1, ..., An, 3]\`. Here `[A1, ..., An]` are the batch
dimensions, and 3 is the intrinsic dimension required for the operation (e.g. a
point in 3d). Prefer to put the batch dimension first.
### Error handling
Handling unexpected inputs usually consists in checking that their shapes are
consistent with expectations, which can be performed with `shape.check_static`,
but also checking that the content of the tensors are valid (e.g. value in a
specific range, no NaNs etc.), which can be performed with utilities provided in
the `asserts` module.
### Differentiability and stable gradients
There are several TF operations that can turn derivatives to zero at unintended
points of your functions / operations. This can be avoided by using tools
provided in the util.safe_ops module. If it can not be avoided, make sure to add
tests checking the Jacobians of the function at the potentially discontinuous
points of the function. See [Testing Jacobians](#testing-jacobians) below.
Examples of such functions include:
* tf.maximum / tf.minimum(a(x), b(x)): These create piecewise functions, which
means derivatives can be discontinuous or zero for some ranges or points.
* tf.clip_by_value / tf.clip_by_norm: These are also piecewise functions where
the actual function is replaced with a constant piece for certain points or
ranges, which makes the derivative zero, even if it actually isn’t.
* tf.where(cond, a, b): This is another way of creating piecewise functions.
This should be used only if it is really meant to create a piecewise
function.
The util.safe_ops submodule contains helper functions that can resolve issues
with divisions by zero, but also helpers to ensure that the data is in the
appropriate range. For instance a dot product of two normalized vectors can
result in values outside of [-1.0, 1.0] range due to fixed point arithmetic.
This in turn may result in NaN if used with arcsin or arccos. In such cases,
safe_shrink in util.safe_ops should be used rather than clipping the range,
since clipping removes derivatives which should be non-zero at these points.
Cases involving zero divided by zero are a bit more involved and require
dedicated workarounds.
### Software compatibility
The library is intended to be compatible with the latest stable TensorFlow 1
release as well as the latest nightly package for TensorFlow 2. We also aim to
be compatible with a couple of versions of Python. Testing for all the above is
automatically performed using
[travis](https://travis-ci.org/tensorflow/graphics).
### Hardware compatibility
Except for performance reasons, every function must be hardware agnostic (e.g.
CPU / GPU / TPU).
### Python modules
Each module must contain a \_\_init__.py file which lists all the sub-modules it
contains.
## Tests
Testing code is essential to make the library usable by everyone at all times.
In the following, we will briefly review our policies around unit testing and
code coverage.
### Unit testing
* all test classes must derive from
tensorflow_graphics.util.test_case.TestCase
* to improve readability of the code, and minimize duplication, the parameters
passed to all the test functions described below are passed using
`parameterized.parameters` provided by `absl.testing`.
#### Test files
Each module containing code has associated tests in the module's test
sub-folder. Each test sub-folder must contain an empty \_\_init__.py, and one
file per .py file in the module. For instance, if the `transformation` module
contains `quaternion.py`, the tests associated with that python file should be
located in `transformation/tests/quaterion_test.py`.
In the following, we use FN as shorthand for the name of the function to be
tested. Let's now have a look at how tests are structured and specific things to
test for.
#### Structure of a test
TensorFlow Graphics follow the arrange-act-assert testing pattern. Moreover, if
multiple tests are used in a single function to test for different but similar
behavior, self.subTest should be used to create separate blocks.
#### Testing return values
The function names and behavior to use for testing return values are as follows:
* `test_FN_random` to ensure that functions return the expected result for any
valid input.
* `test_FN_preset` to test specific inputs, and to make sure that corner cases
are handled appropriately.
#### Error handling
Following are the function names and behavior to use for testing that errors are
handled appropriately:
* `test_FN_exception_raised` to test that functions return the expected error
messages when input parameters are invalid (e.g. shape or values).
* `test_FN_exception_not_raised` to make sure that valid arguments do not
raise any errors.
N.B.: For both test functions above, make sure to include `None` in some of the
input shapes.
#### Testing Jacobians
Derivatives and gradients being at the core of Deep Learning training
algorithms, testing for the stability and correctness of gradients is core to
prevent problems, especially while training large networks. We perform numerical
differentiation to ensure the correctness and stability of the Jacobians of any
function by defining:
* `test_FN_jacobian_random` to ensure that Jacobians are correct on the whole
input domain.
* `test_FN_jacobian_preset` to test the stability of Jacobian around corner
cases, or points where the function might not be smooth / continuous.
N.B.: for both test functions above, make sure to decorate them with
`@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)` to avoid potential errors
arising due to finite differentiation (e.g. tensor not normalized anymore)
### Coverage
The GitHub mirror of Tensorflow Graphics is using
<a href="https://coveralls.io/">coveralls</a> to assess the test coverage. The
version of Tensorflow Graphics that is internal to Google contains the same
features compared to what is available on GitHub, but has access to more tools
for testing. For this project, our internal policy is to only submit code for
which our internal testing tools report at least 99% coverage. This number might
seem to be a steep requirement, but given the nature of the project, this is
obtained with reasonable efforts.
| # How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## What features to add?
The library is open to any contributions along the lines of computer graphics,
with the top-level themes being rendering, physics simulation, and geometry
processing. Contributions can be in the form of low level functions (majority of
the library), Neural Networks layers or Colaboratory notebooks.
## Guidelines for Tensorflow operations
TensorFlow Graphics follows the TensorFlow
[contributor guidelines](https://www.tensorflow.org/community/contribute) and
[code style guide](\(https://www.tensorflow.org/community/contribute/code_style\)).
Besides these, TensorFlow Graphics has a few more guidelines which you can find
below.
### Programming languages
Unless this comes at a significant performance hit, pure Python is preferred.
### Structure of a function
The general structure of a function should be as follows:
* Name of the function followed by inputs to that function
* Doc-string documentation
* Definition of the scope using `tf.compat.v1.name_scope`
* Functions that take tensors as arguments should call `tf.convert_to_tensor`
* Checking the shape and value of the inputs as necessary
* Main logic of the function
### Function names
Prefer function names that are concise, descriptive, and integrate well with the
module name when called. For instance, the `rotate` function from the
`rotation_matrix_3d` sub-module can be called using `rotation_matrix_3d.rotate`,
and makes it easy for anyone to understand what is being calculated. Functions
that are only meant to be local to the file in which they are written should
have an underscore before their name.
### Input parameters
The first arguments should be tensors, followed by python parameters, and
finally the name scope for the TensorFlow operation.
### Input shapes
* The first dimensions of a tensor should represent the shape of the batch,
and the last dimensions should represent the core shape of the elements used
by the function. For instance, `rotation_matrix_3d.rotate` accepts rotation
matrices of shape `[A1, ..., An, 3, 3]` where `[A1, ..., An]` are the
optional batch dimensions, and `[3, 3]` is the shape required to capture 3D
rotation matrices.
* Every function must support batch dimensions of any shape, including tensors
with no batch dimensions.
* For input tensors with common batch shapes, document whether they can be
broadcast compatible or not, and try to make them compatible when possible
by, for instance, using `shape.get_broadcasted_shape` and `tf.broadcast_to`.
### Documentation
Every function must have a docstring-type documentation describing what the
function is performing, its arguments, and what is returned. The input sizes
must be written between backquotes with batch dimensions indexed by letters and
numbers, for instance: \`[A1, ..., An, 3]\`. Here `[A1, ..., An]` are the batch
dimensions, and 3 is the intrinsic dimension required for the operation (e.g. a
point in 3d). Prefer to put the batch dimension first.
### Error handling
Handling unexpected inputs usually consists in checking that their shapes are
consistent with expectations, which can be performed with `shape.check_static`,
but also checking that the content of the tensors are valid (e.g. value in a
specific range, no NaNs etc.), which can be performed with utilities provided in
the `asserts` module.
### Differentiability and stable gradients
There are several TF operations that can turn derivatives to zero at unintended
points of your functions / operations. This can be avoided by using tools
provided in the util.safe_ops module. If it can not be avoided, make sure to add
tests checking the Jacobians of the function at the potentially discontinuous
points of the function. See [Testing Jacobians](#testing-jacobians) below.
Examples of such functions include:
* tf.maximum / tf.minimum(a(x), b(x)): These create piecewise functions, which
means derivatives can be discontinuous or zero for some ranges or points.
* tf.clip_by_value / tf.clip_by_norm: These are also piecewise functions where
the actual function is replaced with a constant piece for certain points or
ranges, which makes the derivative zero, even if it actually isn’t.
* tf.where(cond, a, b): This is another way of creating piecewise functions.
This should be used only if it is really meant to create a piecewise
function.
The util.safe_ops submodule contains helper functions that can resolve issues
with divisions by zero, but also helpers to ensure that the data is in the
appropriate range. For instance a dot product of two normalized vectors can
result in values outside of [-1.0, 1.0] range due to fixed point arithmetic.
This in turn may result in NaN if used with arcsin or arccos. In such cases,
safe_shrink in util.safe_ops should be used rather than clipping the range,
since clipping removes derivatives which should be non-zero at these points.
Cases involving zero divided by zero are a bit more involved and require
dedicated workarounds.
### Software compatibility
The library is intended to be compatible with the latest stable TensorFlow 1
release as well as the latest nightly package for TensorFlow 2. We also aim to
be compatible with a couple of versions of Python. Testing for all the above is
automatically performed using
[travis](https://travis-ci.org/tensorflow/graphics).
### Hardware compatibility
Except for performance reasons, every function must be hardware agnostic (e.g.
CPU / GPU / TPU).
### Python modules
Each module must contain a \_\_init__.py file which lists all the sub-modules it
contains.
## Tests
Testing code is essential to make the library usable by everyone at all times.
In the following, we will briefly review our policies around unit testing and
code coverage.
### Unit testing
* all test classes must derive from
tensorflow_graphics.util.test_case.TestCase
* to improve readability of the code, and minimize duplication, the parameters
passed to all the test functions described below are passed using
`parameterized.parameters` provided by `absl.testing`.
#### Test files
Each module containing code has associated tests in the module's test
sub-folder. Each test sub-folder must contain an empty \_\_init__.py, and one
file per .py file in the module. For instance, if the `transformation` module
contains `quaternion.py`, the tests associated with that python file should be
located in `transformation/tests/quaterion_test.py`.
In the following, we use FN as shorthand for the name of the function to be
tested. Let's now have a look at how tests are structured and specific things to
test for.
#### Structure of a test
TensorFlow Graphics follow the arrange-act-assert testing pattern. Moreover, if
multiple tests are used in a single function to test for different but similar
behavior, self.subTest should be used to create separate blocks.
#### Testing return values
The function names and behavior to use for testing return values are as follows:
* `test_FN_random` to ensure that functions return the expected result for any
valid input.
* `test_FN_preset` to test specific inputs, and to make sure that corner cases
are handled appropriately.
#### Error handling
Following are the function names and behavior to use for testing that errors are
handled appropriately:
* `test_FN_exception_raised` to test that functions return the expected error
messages when input parameters are invalid (e.g. shape or values).
* `test_FN_exception_not_raised` to make sure that valid arguments do not
raise any errors.
N.B.: For both test functions above, make sure to include `None` in some of the
input shapes.
#### Testing Jacobians
Derivatives and gradients being at the core of Deep Learning training
algorithms, testing for the stability and correctness of gradients is core to
prevent problems, especially while training large networks. We perform numerical
differentiation to ensure the correctness and stability of the Jacobians of any
function by defining:
* `test_FN_jacobian_random` to ensure that Jacobians are correct on the whole
input domain.
* `test_FN_jacobian_preset` to test the stability of Jacobian around corner
cases, or points where the function might not be smooth / continuous.
N.B.: for both test functions above, make sure to decorate them with
`@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)` to avoid potential errors
arising due to finite differentiation (e.g. tensor not normalized anymore)
### Coverage
The GitHub mirror of Tensorflow Graphics is using
<a href="https://coveralls.io/">coveralls</a> to assess the test coverage. The
version of Tensorflow Graphics that is internal to Google contains the same
features compared to what is available on GitHub, but has access to more tools
for testing. For this project, our internal policy is to only submit code for
which our internal testing tools report at least 99% coverage. This number might
seem to be a steep requirement, but given the nature of the project, this is
obtained with reasonable efforts.
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/nn/loss/tests/chamfer_distance_test.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the chamfer distance loss."""
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.loss import chamfer_distance
from tensorflow_graphics.util import test_case
def _random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def _random_tensor_shape():
tensor_size = np.random.randint(3) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def _random_point_sets():
space_dimensions = np.random.randint(3) + 1
batch_shape = _random_tensor_shape()
point_set_a_size = np.random.randint(10) + 1
point_set_b_size = np.random.randint(10) + 1
point_set_a_init = np.random.uniform(
low=-100.0,
high=100.0,
size=batch_shape + [point_set_a_size, space_dimensions])
point_set_b_init = np.random.uniform(
low=-100.0,
high=100.0,
size=batch_shape + [point_set_b_size, space_dimensions])
return (point_set_a_init, point_set_b_init)
class ChamferDistanceTest(test_case.TestCase):
@parameterized.parameters(
(((0., 0), (0, 1), (1, 0), (-1, 0)),
((0., 0), (0, 2), (0.7, 0.4), (-0.5, -0.5)),
# a[0] -> b[0]
(0 + \
# a[1] -> b[2]
0.7**2 + 0.6**2 + \
# a[2] -> b[2]
0.3**2 + 0.4**2 + \
# a[3] -> b[3]
0.5) / 4 + \
# b[0] -> a[0]
(0 + \
# b[1] -> a[1]
1 + \
# b[2] -> a[2]
0.3**2 + 0.4**2 + \
# b[3] -> a[3]
0.5) / 4),
(((0., 1, 4), (3, 4, 2)),
((2., 2, 2), (2, 3, 4), (3, 2, 2)),
# a[0] -> b[1]
(8 + \
# a[1] -> b[2]
4) / 2 + \
# b[0] -> a[1]
(5 + \
# b[1] -> a[1]
6 + \
# b[2] -> a[1]
4) / 3),
)
def test_evaluate_preset(self, point_set_a, point_set_b, expected_distance):
tensor_shape = _random_tensor_shape()
point_set_a = np.tile(point_set_a, tensor_shape + [1, 1])
point_set_b = np.tile(point_set_b, tensor_shape + [1, 1])
expected = np.tile(expected_distance, tensor_shape)
result = chamfer_distance.evaluate(point_set_a, point_set_b)
self.assertAllClose(expected, result)
def test_chamfer_distance_evaluate_jacobian(self):
"""Tests the Jacobian of the Chamfer distance loss."""
point_set_a, point_set_b = _random_point_sets()
with self.subTest(name="jacobian_wrt_point_set_a"):
self.assert_jacobian_is_correct_fn(
lambda x: chamfer_distance.evaluate(x, point_set_b), [point_set_a],
atol=1e-5)
with self.subTest(name="jacobian_wrt_point_set_b"):
self.assert_jacobian_is_correct_fn(
lambda x: chamfer_distance.evaluate(point_set_a, x), [point_set_b],
atol=1e-5)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 3, 5, 3),
(2, 4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 3, 5),
(2, 4, 5)),
("point_set_b must have exactly 3 dimensions in axis -1,.", (2, 4, 3),
(2, 4, 2)),
("point_set_b must have exactly 2 dimensions in axis -1,.", (2, 4, 2),
(2, 4, 3)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(chamfer_distance.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 6, 3), (2, 5, 9, 3)),
((None, 2, 6, 2), (4, 2, None, 4, 2)),
((3, 5, 8, 7), (3, 1, 1, 7)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(chamfer_distance.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the chamfer distance loss."""
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.loss import chamfer_distance
from tensorflow_graphics.util import test_case
def _random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def _random_tensor_shape():
tensor_size = np.random.randint(3) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def _random_point_sets():
space_dimensions = np.random.randint(3) + 1
batch_shape = _random_tensor_shape()
point_set_a_size = np.random.randint(10) + 1
point_set_b_size = np.random.randint(10) + 1
point_set_a_init = np.random.uniform(
low=-100.0,
high=100.0,
size=batch_shape + [point_set_a_size, space_dimensions])
point_set_b_init = np.random.uniform(
low=-100.0,
high=100.0,
size=batch_shape + [point_set_b_size, space_dimensions])
return (point_set_a_init, point_set_b_init)
class ChamferDistanceTest(test_case.TestCase):
@parameterized.parameters(
(((0., 0), (0, 1), (1, 0), (-1, 0)),
((0., 0), (0, 2), (0.7, 0.4), (-0.5, -0.5)),
# a[0] -> b[0]
(0 + \
# a[1] -> b[2]
0.7**2 + 0.6**2 + \
# a[2] -> b[2]
0.3**2 + 0.4**2 + \
# a[3] -> b[3]
0.5) / 4 + \
# b[0] -> a[0]
(0 + \
# b[1] -> a[1]
1 + \
# b[2] -> a[2]
0.3**2 + 0.4**2 + \
# b[3] -> a[3]
0.5) / 4),
(((0., 1, 4), (3, 4, 2)),
((2., 2, 2), (2, 3, 4), (3, 2, 2)),
# a[0] -> b[1]
(8 + \
# a[1] -> b[2]
4) / 2 + \
# b[0] -> a[1]
(5 + \
# b[1] -> a[1]
6 + \
# b[2] -> a[1]
4) / 3),
)
def test_evaluate_preset(self, point_set_a, point_set_b, expected_distance):
tensor_shape = _random_tensor_shape()
point_set_a = np.tile(point_set_a, tensor_shape + [1, 1])
point_set_b = np.tile(point_set_b, tensor_shape + [1, 1])
expected = np.tile(expected_distance, tensor_shape)
result = chamfer_distance.evaluate(point_set_a, point_set_b)
self.assertAllClose(expected, result)
def test_chamfer_distance_evaluate_jacobian(self):
"""Tests the Jacobian of the Chamfer distance loss."""
point_set_a, point_set_b = _random_point_sets()
with self.subTest(name="jacobian_wrt_point_set_a"):
self.assert_jacobian_is_correct_fn(
lambda x: chamfer_distance.evaluate(x, point_set_b), [point_set_a],
atol=1e-5)
with self.subTest(name="jacobian_wrt_point_set_b"):
self.assert_jacobian_is_correct_fn(
lambda x: chamfer_distance.evaluate(point_set_a, x), [point_set_b],
atol=1e-5)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 3, 5, 3),
(2, 4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 3, 5),
(2, 4, 5)),
("point_set_b must have exactly 3 dimensions in axis -1,.", (2, 4, 3),
(2, 4, 2)),
("point_set_b must have exactly 2 dimensions in axis -1,.", (2, 4, 2),
(2, 4, 3)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(chamfer_distance.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 6, 3), (2, 5, 9, 3)),
((None, 2, 6, 2), (4, 2, None, 4, 2)),
((3, 5, 8, 7), (3, 1, 1, 7)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(chamfer_distance.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./.github/workflows/build.yml | # Continuous integration tests executed on push and pull request actions
# see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Build
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install system requirements
run: |
sudo xargs apt-get update
sudo xargs apt-get -y install < requirements.unix
- name: Install pip requirements
run: |
python -m pip install --upgrade pip
pip install -U -r requirements.txt
pip install -U pytest coveralls
pip install -U flake8
pip install -U setuptools wheel
- name: Build ops
run: |
bazel build tensorflow_graphics/... --define=BASEDIR=$(pwd) --sandbox_writable_path=$(pwd)
bazel clean --expunge
- name: Run python tests and coverage
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
MESA_GL_VERSION_OVERRIDE: 4.5
MESA_GLSL_VERSION_OVERRIDE: 450
run: |
coverage run --source tensorflow_graphics -m py.test
coveralls --service=github
- name: Linter
run: |
flake8 --config=.flake8 tensorflow_graphics/
- name: Build pip package and install
run: |
python setup.py sdist bdist_wheel
pip install dist/*.whl
- name: Test install
run: |
cd $(mktemp -d) && python -c 'import tensorflow_graphics as tfg'
| # Continuous integration tests executed on push and pull request actions
# see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Build
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install system requirements
run: |
sudo xargs apt-get update
sudo xargs apt-get -y install < requirements.unix
- name: Install pip requirements
run: |
python -m pip install --upgrade pip
pip install -U -r requirements.txt
pip install -U pytest coveralls
pip install -U flake8
pip install -U setuptools wheel
- name: Build ops
run: |
bazel build tensorflow_graphics/... --define=BASEDIR=$(pwd) --sandbox_writable_path=$(pwd)
bazel clean --expunge
- name: Run python tests and coverage
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
MESA_GL_VERSION_OVERRIDE: 4.5
MESA_GLSL_VERSION_OVERRIDE: 450
run: |
coverage run --source tensorflow_graphics -m py.test
coveralls --service=github
- name: Linter
run: |
flake8 --config=.flake8 tensorflow_graphics/
- name: Build pip package and install
run: |
python setup.py sdist bdist_wheel
pip install dist/*.whl
- name: Test install
run: |
cd $(mktemp -d) && python -c 'import tensorflow_graphics as tfg'
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/geometry/representation/triangle.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow triangle utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def normal(v0, v1, v2, clockwise=False, normalize=True, name="triangle_normal"):
"""Computes face normals (triangles).
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vertex of a triangle.
v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vertex of a triangle.
v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the third vertex of a triangle.
clockwise: Winding order to determine front-facing triangles.
normalize: A `bool` indicating whether output normals should be normalized
by the function.
name: A name for this op. Defaults to "triangle_normal".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
a normalized vector.
Raises:
ValueError: If the shape of `v0`, `v1`, or `v2` is not supported.
"""
with tf.name_scope(name):
v0 = tf.convert_to_tensor(value=v0)
v1 = tf.convert_to_tensor(value=v1)
v2 = tf.convert_to_tensor(value=v2)
shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3))
shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3))
shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True)
normal_vector = vector.cross(v1 - v0, v2 - v0, axis=-1)
normal_vector = asserts.assert_nonzero_norm(normal_vector)
if not clockwise:
normal_vector *= -1.0
if normalize:
return tf.nn.l2_normalize(normal_vector, axis=-1)
return normal_vector
def area(v0, v1, v2, name="triangle_area"):
"""Computes triangle areas.
Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges
of triangle. A degenerate triangle will return 0 area, whereas the normal
for a degenerate triangle is not defined.
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vertex of a triangle.
v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vertex of a triangle.
v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the third vertex of a triangle.
name: A name for this op. Defaults to "triangle_area".
Returns:
A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents
a normalized vector.
"""
with tf.name_scope(name):
v0 = tf.convert_to_tensor(value=v0)
v1 = tf.convert_to_tensor(value=v1)
v2 = tf.convert_to_tensor(value=v2)
shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3))
shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3))
shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True)
normals = vector.cross(v1 - v0, v2 - v0, axis=-1)
return 0.5 * tf.linalg.norm(tensor=normals, axis=-1, keepdims=True)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow triangle utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def normal(v0, v1, v2, clockwise=False, normalize=True, name="triangle_normal"):
"""Computes face normals (triangles).
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vertex of a triangle.
v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vertex of a triangle.
v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the third vertex of a triangle.
clockwise: Winding order to determine front-facing triangles.
normalize: A `bool` indicating whether output normals should be normalized
by the function.
name: A name for this op. Defaults to "triangle_normal".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
a normalized vector.
Raises:
ValueError: If the shape of `v0`, `v1`, or `v2` is not supported.
"""
with tf.name_scope(name):
v0 = tf.convert_to_tensor(value=v0)
v1 = tf.convert_to_tensor(value=v1)
v2 = tf.convert_to_tensor(value=v2)
shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3))
shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3))
shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True)
normal_vector = vector.cross(v1 - v0, v2 - v0, axis=-1)
normal_vector = asserts.assert_nonzero_norm(normal_vector)
if not clockwise:
normal_vector *= -1.0
if normalize:
return tf.nn.l2_normalize(normal_vector, axis=-1)
return normal_vector
def area(v0, v1, v2, name="triangle_area"):
"""Computes triangle areas.
Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges
of triangle. A degenerate triangle will return 0 area, whereas the normal
for a degenerate triangle is not defined.
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the first vertex of a triangle.
v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the second vertex of a triangle.
v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
represents the third vertex of a triangle.
name: A name for this op. Defaults to "triangle_area".
Returns:
A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents
a normalized vector.
"""
with tf.name_scope(name):
v0 = tf.convert_to_tensor(value=v0)
v1 = tf.convert_to_tensor(value=v1)
v2 = tf.convert_to_tensor(value=v2)
shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3))
shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3))
shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True)
normals = vector.cross(v1 - v0, v2 - v0, axis=-1)
return 0.5 * tf.linalg.norm(tensor=normals, axis=-1, keepdims=True)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/projects/local_implicit_grid/core/implicit_nets.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementations of various implicit function networks architectures.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
layers = tf.keras.layers
class ImNet(layers.Layer):
"""ImNet layer keras implementation.
"""
def __init__(self, dim=3, in_features=128, out_features=1, num_filters=128,
activation=tf.nn.leaky_relu, name='im_net'):
"""Initialization.
Args:
dim: int, dimension of input points.
in_features: int, length of input features (i.e., latent code).
out_features: number of output features.
num_filters: int, width of the second to last layer.
activation: tf activation op.
name: str, name of the layer.
"""
super(ImNet, self).__init__(name=name)
self.dim = dim
self.in_features = in_features
self.dimz = dim + in_features
self.out_features = out_features
self.num_filters = num_filters
self.activ = activation
self.fc0 = layers.Dense(num_filters*16, name='dense_1')
self.fc1 = layers.Dense(num_filters*8, name='dense_2')
self.fc2 = layers.Dense(num_filters*4, name='dense_3')
self.fc3 = layers.Dense(num_filters*2, name='dense_4')
self.fc4 = layers.Dense(num_filters*1, name='dense_5')
self.fc5 = layers.Dense(out_features, name='dense_6')
self.fc = [self.fc0, self.fc1, self.fc2, self.fc3, self.fc4, self.fc5]
def call(self, x, training=False):
"""Forward method.
Args:
x: `[batch_size, dim+in_features]` tensor, inputs to decode.
training: bool, flag indicating training phase.
Returns:
x_: output through this layer.
"""
x_ = x
for dense in self.fc[:4]:
x_ = self.activ(dense(x_))
x_ = tf.concat([x_, x], axis=-1)
x_ = self.activ(self.fc4(x_))
x_ = self.fc5(x_)
return x_
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementations of various implicit function networks architectures.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
layers = tf.keras.layers
class ImNet(layers.Layer):
"""ImNet layer keras implementation.
"""
def __init__(self, dim=3, in_features=128, out_features=1, num_filters=128,
activation=tf.nn.leaky_relu, name='im_net'):
"""Initialization.
Args:
dim: int, dimension of input points.
in_features: int, length of input features (i.e., latent code).
out_features: number of output features.
num_filters: int, width of the second to last layer.
activation: tf activation op.
name: str, name of the layer.
"""
super(ImNet, self).__init__(name=name)
self.dim = dim
self.in_features = in_features
self.dimz = dim + in_features
self.out_features = out_features
self.num_filters = num_filters
self.activ = activation
self.fc0 = layers.Dense(num_filters*16, name='dense_1')
self.fc1 = layers.Dense(num_filters*8, name='dense_2')
self.fc2 = layers.Dense(num_filters*4, name='dense_3')
self.fc3 = layers.Dense(num_filters*2, name='dense_4')
self.fc4 = layers.Dense(num_filters*1, name='dense_5')
self.fc5 = layers.Dense(out_features, name='dense_6')
self.fc = [self.fc0, self.fc1, self.fc2, self.fc3, self.fc4, self.fc5]
def call(self, x, training=False):
"""Forward method.
Args:
x: `[batch_size, dim+in_features]` tensor, inputs to decode.
training: bool, flag indicating training phase.
Returns:
x_: output through this layer.
"""
x_ = x
for dense in self.fc[:4]:
x_ = self.activ(dense(x_))
x_ = tf.concat([x_, x], axis=-1)
x_ = self.activ(self.fc4(x_))
x_ = self.fc5(x_)
return x_
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/opensource_only.files | tensorflow_graphics/rendering/opengl/BUILD | tensorflow_graphics/rendering/opengl/BUILD | -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h | /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
#include "absl/base/integral_types.h"
// Determines the mode for face culling. Analogous to OpenGL's glCullFace
// parameters.
enum class FaceCullingMode { kNone = 0, kBack, kFront };
// Computes the triangle id, barycentric coordinates, and z-buffer at each pixel
// in the image.
//
// vertices: A flattened 2D array with 4*vertex_count elements.
// Each contiguous triplet is the XYZW location of the vertex with that
// triplet's id. The coordinates are assumed to be OpenGL-style clip-space
// (i.e., post-projection, pre-divide), where X points right, Y points up,
// Z points away.
// triangles: A flattened 2D array with 3*triangle_count elements.
// Each contiguous triplet is the three vertex ids indexing into vertices
// describing one triangle with clockwise winding.
// triangle_count: The number of triangles stored in the array triangles.
// num_layers: Number of surface layers to store at each pixel, esentially
// depth-peeling (https://en.wikipedia.org/wiki/Depth_peeling).
// face_culling_mode: mode for culling back-facing triangles, front-facing
// triangles, or none.
// triangle_ids: A flattened 2D array with num_layers*image_height*image_width
// elements. At return, each pixel contains a triangle id in the range
// [0, triangle_count). The id value is also 0 if there is no triangle
// at the pixel. The barycentric_coordinates must be checked to
// distinguish the two cases.
// z_buffer: A flattened 2D array with num_layers*image_height*image_width
// elements. At return, contains the normalized device Z coordinates of the
// rendered triangles.
// barycentric_coordinates: A flattened 3D array with
// num_layers*image_height*image_width*3 elements. At return, contains the
// triplet of barycentric coordinates at each pixel in the same vertex
// ordering as triangles. If no triangle is present, all coordinates are 0.
// May be nullptr if barycentric coordinates are not desired.
void RasterizeTrianglesImpl(const float* vertices, const int32* triangles,
int32 triangle_count, int32 image_width,
int32 image_height, int32 num_layers,
FaceCullingMode face_culling_mode,
int32* triangle_ids, float* z_buffer,
float* barycentric_coordinates);
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
| /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
#include "absl/base/integral_types.h"
// Determines the mode for face culling. Analogous to OpenGL's glCullFace
// parameters.
enum class FaceCullingMode { kNone = 0, kBack, kFront };
// Computes the triangle id, barycentric coordinates, and z-buffer at each pixel
// in the image.
//
// vertices: A flattened 2D array with 4*vertex_count elements.
// Each contiguous triplet is the XYZW location of the vertex with that
// triplet's id. The coordinates are assumed to be OpenGL-style clip-space
// (i.e., post-projection, pre-divide), where X points right, Y points up,
// Z points away.
// triangles: A flattened 2D array with 3*triangle_count elements.
// Each contiguous triplet is the three vertex ids indexing into vertices
// describing one triangle with clockwise winding.
// triangle_count: The number of triangles stored in the array triangles.
// num_layers: Number of surface layers to store at each pixel, esentially
// depth-peeling (https://en.wikipedia.org/wiki/Depth_peeling).
// face_culling_mode: mode for culling back-facing triangles, front-facing
// triangles, or none.
// triangle_ids: A flattened 2D array with num_layers*image_height*image_width
// elements. At return, each pixel contains a triangle id in the range
// [0, triangle_count). The id value is also 0 if there is no triangle
// at the pixel. The barycentric_coordinates must be checked to
// distinguish the two cases.
// z_buffer: A flattened 2D array with num_layers*image_height*image_width
// elements. At return, contains the normalized device Z coordinates of the
// rendered triangles.
// barycentric_coordinates: A flattened 3D array with
// num_layers*image_height*image_width*3 elements. At return, contains the
// triplet of barycentric coordinates at each pixel in the same vertex
// ordering as triangles. If no triangle is present, all coordinates are 0.
// May be nullptr if barycentric coordinates are not desired.
void RasterizeTrianglesImpl(const float* vertices, const int32* triangles,
int32 triangle_count, int32 image_width,
int32 image_height, int32 num_layers,
FaceCullingMode face_culling_mode,
int32* triangle_ids, float* z_buffer,
float* barycentric_coordinates);
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/light/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.rendering.light import point_light
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.rendering.light.
__all__ = _export_api.get_modules()
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.rendering.light import point_light
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.rendering.light.
__all__ = _export_api.get_modules()
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/rendering/opengl/egl_offscreen_context.h | /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
#include <EGL/egl.h>
#include <memory>
#include "tensorflow/core/lib/core/status.h"
// EGL is an interface between OpenGL ES and the windowing system of the native
// platform. The following class provides functionality to manage an EGL
// off-screen contexts.
class EGLOffscreenContext {
public:
~EGLOffscreenContext();
// Creates an EGL display, pixel buffer surface, and context that can be used
// for rendering. These objects are created with default parameters
//
// Arguments:
// * egl_offscreen_context: if the method is successful, this object holds a
// valid offscreen context.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
static tensorflow::Status Create(
std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context);
// Creates an EGL display, pixel buffer surface, and context that can be used
// for rendering.
//
// Arguments:
// * pixel_buffer_width: width of the pixel buffer surface.
// * pixel_buffer_height: height of the pixel buffer surface.
// * context: if the method succeeds, this variable returns an object storing
// a valid display, context, and pixel buffer surface.
// * configuration_attributes: attributes used to build frame buffer
// * configurations.
// * context_attributes: attributes used to create the EGL context.
// * rendering_api: defines the rendering API for the current thread. The
// available APIs are EGL_OPENGL_API, EGL_OPENGL_ES_API, and
// EGL_OPENVG_API.
// * egl_offscreen_context: if the method is successful, this object holds a
// valid offscreen context.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
static tensorflow::Status Create(
const int pixel_buffer_width, const int pixel_buffer_height,
const EGLenum rendering_api, const EGLint* configuration_attributes,
const EGLint* context_attributes,
std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context);
// Binds the EGL context to the current rendering thread and to the pixel
// buffer surface. Note that this context must not be current in any other
// thread.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
tensorflow::Status MakeCurrent() const;
// Un-binds the current EGL rendering context from the current rendering
// thread and from the pixel buffer surface.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
tensorflow::Status Release();
private:
EGLOffscreenContext() = delete;
EGLOffscreenContext(EGLContext context, EGLDisplay display,
EGLSurface pixel_buffer_surface);
EGLOffscreenContext(const EGLOffscreenContext&) = delete;
EGLOffscreenContext(EGLOffscreenContext&&) = delete;
EGLOffscreenContext& operator=(const EGLOffscreenContext&) = delete;
EGLOffscreenContext& operator=(EGLOffscreenContext&&) = delete;
tensorflow::Status Destroy();
EGLContext context_;
EGLDisplay display_;
EGLSurface pixel_buffer_surface_;
};
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
| /* Copyright 2020 The TensorFlow Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
#define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
#include <EGL/egl.h>
#include <memory>
#include "tensorflow/core/lib/core/status.h"
// EGL is an interface between OpenGL ES and the windowing system of the native
// platform. The following class provides functionality to manage an EGL
// off-screen contexts.
class EGLOffscreenContext {
public:
~EGLOffscreenContext();
// Creates an EGL display, pixel buffer surface, and context that can be used
// for rendering. These objects are created with default parameters
//
// Arguments:
// * egl_offscreen_context: if the method is successful, this object holds a
// valid offscreen context.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
static tensorflow::Status Create(
std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context);
// Creates an EGL display, pixel buffer surface, and context that can be used
// for rendering.
//
// Arguments:
// * pixel_buffer_width: width of the pixel buffer surface.
// * pixel_buffer_height: height of the pixel buffer surface.
// * context: if the method succeeds, this variable returns an object storing
// a valid display, context, and pixel buffer surface.
// * configuration_attributes: attributes used to build frame buffer
// * configurations.
// * context_attributes: attributes used to create the EGL context.
// * rendering_api: defines the rendering API for the current thread. The
// available APIs are EGL_OPENGL_API, EGL_OPENGL_ES_API, and
// EGL_OPENVG_API.
// * egl_offscreen_context: if the method is successful, this object holds a
// valid offscreen context.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
static tensorflow::Status Create(
const int pixel_buffer_width, const int pixel_buffer_height,
const EGLenum rendering_api, const EGLint* configuration_attributes,
const EGLint* context_attributes,
std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context);
// Binds the EGL context to the current rendering thread and to the pixel
// buffer surface. Note that this context must not be current in any other
// thread.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
tensorflow::Status MakeCurrent() const;
// Un-binds the current EGL rendering context from the current rendering
// thread and from the pixel buffer surface.
//
// Returns:
// A tensorflow::Status object storing tensorflow::Status::OK() on success,
// and an object of type tensorflow::errors otherwise.
tensorflow::Status Release();
private:
EGLOffscreenContext() = delete;
EGLOffscreenContext(EGLContext context, EGLDisplay display,
EGLSurface pixel_buffer_surface);
EGLOffscreenContext(const EGLOffscreenContext&) = delete;
EGLOffscreenContext(EGLOffscreenContext&&) = delete;
EGLOffscreenContext& operator=(const EGLOffscreenContext&) = delete;
EGLOffscreenContext& operator=(EGLOffscreenContext&&) = delete;
tensorflow::Status Destroy();
EGLContext context_;
EGLDisplay display_;
EGLSurface pixel_buffer_surface_;
};
#endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./tensorflow_graphics/nn/layer/tests/__init__.py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| -1 |
tensorflow/graphics | 489 | Enforce `Framebuffer` to accept only tensors with single batch dimension. | Enforce `Framebuffer` to accept only tensors with single batch dimension.
| copybara-service[bot] | "2021-02-03T21:06:22Z" | "2021-02-12T23:59:48Z" | 3a4f1952ed967fb884dc031eeda6dac3fbefbe52 | b7a2bf260d6fcf924fddcbb6dba36c72ece66990 | Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
| ./.git/objects/pack/pack-ceb7c92f2b0f5403ca04c0b8128e51d67b830874.idx | tOc 1 N } / L f { 6 R e 5 J i # G a / U u 0 F i : W z 5 U n A V i
C
^
y
( L f ? b
!
A
Z
x
7 P l " ? [ t - O p ' C Y t . R r 5 ] } : R n + P l ) Q r = ] t ' F b D g ' J k # @ Z y . L d PfʘhI3Ӡ0 <5OȜpFKHD@ L(0_Z1Z
$ "<tZ/IEܟ #AΪq?#- &}訣(ZmE Ccx۬M
R5bXk@+ E:b-IZz(e9ֺ) Eyͺ!#m8 VƙF0% Ur{ \Ȥ8(fRj `qY۔H1|dZ ex5.Ro# xzCE~0 642^! X[
c:4 <.{FZ&j
E 8*֞}lD N۶_W9a1[)ޭP Zc/,ɖEmD :(g16 ¨5
҇H4? k>c%1Í љ-ed!z}I ڭakp\aT_ ۟}I"4nj .ԋ_H,#/ =ylm )t @g~53J(`Gw(>mwX;LpDI[
eR5kt KH|^f'*0IGcD',Rg
U=5nrn3R?׆<q|4tV4q/IpMY^?⇕8@X}l^ʞ/]ʍLCm-gLI|~r_q@nWυe0PWpm-@
@xIT`>MLڤ
9e}cF9\-<6p?Vʩu?و*i8Xqd1yઋm.Tw\J͇W[
lNE"(sMͷqh!̤U7
=bfvH8\*6jSg-qJ?H wC؈e,=P!{Sibu4VP;z:"R0"=&1S +ysjP;-5DHj776]O=zV D
K=RHidzZ{rL!\
XbnW C]_.TWc[KWBeBr(/j/1pbtlf_(th !MtV[V6=&o&ɕ,]pnE}|u
K.P
1sFu@riVCgu-QhpnG(zَE`F6+Ay;jz5Дhz:ng+0LIi$tbHi$j h_>#'6$qO]'b]-?'w#sʛhMȸ9FS37i1yd!zJ.WQ(P
U1_]eLXU^(:
gJcSs3g ߞ
t2n#GNV&]g=k_r9רZo
fъ}FoT/rzi3$:PDg\\y{-}nѕI; W3r< =w)
Ffzp
j:8D#\(aіc:p$q=8
&,8g.KJ>mjϙ)ق'q+L zQv@T}{pLґGs
F\- JEEsqVQkK9o8M. M׆=08mq"=R1*94xؙx]%T /0@Lǰk0]`E1<FAM'Dh-:L;߂whtk%Gy hd1ՖxgyZ1P5RB"qX:SjL!O%-}y8+#D'pE!LNS]a4P۵hyhJg <SCc:VW0ߐD#Uz>ߠӨJkq`gA(u㒆JiZvk%@:62weԆ~Rg4-d`<".vA>PW>N6X%",G&'-HyU{,d4#=l`CnJՉUF8̾4XʆД'^+*N~L/Wl!$>Lկi&E (Au^x!n\BV")u?E^AI\vރĜfHgkT=%..wT<OKhIe9;[ΆWOF
O>VU:=CHp[˼b_=WSXskсIxYEO,^vvb$'Lu~"_H(0_rHIC]YlWՀ_h;1i 7@Siym<8S$}sB<QΎT5儱
KJنhnDܫ5>Ji_'̘`xAyKjA%hJ/vlMrj 9IeБ4N3'Q
xUbh:*)cIΰgTj9N;aLzBLw;1̵fBC+<!M;ЯXBDaǦaeDp5bF"bde$R/GrRUOW y,"p+UJńBVz[tbYğ1D$)=k<j<a6^j^oje\+:xDR&( xx_ǂ.F;y~Ro+H~#2@&|>
a~ԲaxT=6M!$ki8g)<tqmX`%bن6Ĵ-Ĥ16dJϯUaQhVg}JNzܪ)_'T
AXֹnS0 <tc 3ʎ&] qaS<Έ,ZÚw+0[俎gMBdԆiBR备B;8CPGc+-q]I,ᾱ$qwe59CD]Ĕ:0*UfN
YN&HV5 ='R*9hTSki8fH50$nf
pACغ-mSV@pWA|mVZd8; mm ._]*
qhs)r77N? Zs 5r6VtFn&ySMKʝEBN`S?nJYo5=(,\akܖRfaTwAr0p Z#W h^ŧuPTY2S/~vqom rLnt^ãAW.a6w'g }"zZ.L"@LǺxf|ѡAnʹ1*ҭ)%kੜAKgaƢ#/߳!nFj|vW5}(ë#LmQnҡrz&:r2,7e
"6̭җk*qh[n%jRz^EjY:Gq2T o9%Ɵ
@E^/oݖ+ 0C:|QlGXƬ{x={%%Ujq,1y^]
\p]S*62{=Bg w<'k6a#y&+}v%s~E 6(]?cE_wz
,
)B_IάoC'U.TQ*X
mvK+14z҈
0H*+6edZ(z[b="E
6ϞL $h|,CְZFƬBZ$Eo=xGFcqw@DV x߯S&XY ?S<7\Yj3á)L-ePadb8eC5o?g\h</VzVvZT:mh^J"@ ^oIf ·U'9qQoIwsˇGM?y~YF]9J{_B8WSb-NJh;QmMlW*h?gBQW;8l^*Ǻw\*eGUpFm7yG:Q9b[row9=cJ>VO $1 MS^~r'Ϩd0#F|ߡmWT /5KxI~
~P6¾=d_cmEq{ǣ1dz`V9uX\Xi
ɾ@[BxֱrFfأ(>H/&3:'[C<{ zU 8L3]7Rpx!|j#b;nڲ)7i1sߒ8;c\P(/nyVi,rDQ2SPqx6Į-{`Øv{Km eʝѧk܀.CL.g.hbL5!z37轉i j%NIB:l#iBi՟‣Y*SPwdiǖc#iK]!Ñ{7yXP,^NMZ{}ICp&#Na,^eԖF~1xF_<Xp@!E͈0᾿Z@;t$sZUa
)wE')L¨Зȉyip-q]?<+sV
e}]M}b[20Uteoycc2?R/l TćK(uR:fah rņ2ma-5{- NKU`}ü1 m^!cޱԠ 2WfnfxA&64q( 7'=s/M'Ԛf g~O&PUe%ۗR $qg;k"E U*V}Rb6e. YJQ% aM|K47ڔ hr1QgJ ja *&AX k73F=D3G= |>c1_F
ɔ4 haL)վh ȃ X+-g1R 5
n-4 anyxda wՓ>|H d['
-VE, Qu&fХ0,,y *+QYQmu p3:{OЍ:~P X
l2bA z0tuk,ʥ'& A('Y5"䒮 ZHb.TlK -PQ2t_ą
5=6J⣆B'`
+x%J)Aj6g
q
B:
'.S9];
DA-(#
UD
:l:oF&Mt
Ր z$R=y*z
$#jQkB^a&
'V\l jF.
*}}_;}rPuq
4ᘕz4L^2:k
A䕭2=ٔNE
K@) v9
PV/]¶rdT6
Ynp.倒j0;ՂU
guUhaTѢa?
*LX*w, }
l{s.b
n'5r+"4wE
=5W^ k
'2pwg2\ժ
h1}Om!3L
\Q/|J}
Xz
/l5:)xv&H
gAK:È
ŞSV|;>a
ۯ:s2c
k}e6
pZO*|
}nUMgiEl L~1u0
AdXSs%v`u)[s5s]*Y\'֜ 3K*L+z."{t(7@98N{Z</&"z9Pu>C'4[iq;):nF3u̕pH<?E
*/N2^B'.m_EIցApɼkLYbj&qrZPpMʹpgIqڤ
ۊ=itgiQY
z2n.+LMټ0mz}kf5şS]c i<V-N<z<C}eӱf}Iv9%NlBGPkpb SeUKHkQ{݀NPH
d@QԐHfڊП!:ql2e70FeH2OK`ZF6-`Ƽ Ϊ炊0u<Իޱ,ˆ9d6>ij
ě:8/NaSPi((b0Za&`GR%\g- x̠Om[5YԜ 79ULFT"Z
FjSe*pK"twz"LoS^Ű
*03XR)+b{Y+뮥Nxg2ZvSHJrmPG_\ǰ+0UFL]$d
9%Acj1K\<HG!5;ƋBex(3)HX8aq(ȶ!o,pݬ!ZTYanA*'`nL`K
A66}
"H
@;fsك]!z-
` f;l
蜓
2K⤲y
S/[ ds
=+):Bu.sI
AOt|#SkSC|L
G]J-`U^
MFzձ3$\b=|
P/N
i&Qֺ
Y-rr^M$3`4
c2O%܌ q
dZz3gߐ~<b
YpT
_C%P
Ht.k`(T-
&=ڿ@`uv(˔
cw0itX=
fowXbds
dm瘇9gݼR>Eˢ
AUF%:0,R
$aŶ O"w(
>lk\,#8Hɒ
?F>, Ӗ
u|dYz֕hKbl<8wVa *P.9&LAȤnD> T$K,6YW;2a7VY226{>THG)
dXT
G!zBC_m]x>(ɞ$ud^{
-b$@$!(7flV+Drvb}ȋKtOOqa/XWw]lUأ'OH(L)+f{AW,kBsu\->NQf,UgDSP5SVDslQzuWG=ܰ6 m@OCVƵt 8K!o;Z\j<=>LۻfbuJKk'u ԅմsqVF'zH%KFfj?7۲VU\/
#H ìa.<M
98+cz4#
Jtf`ưU2ƜlJ< kNu@DlRCK D'" 3
o^rcJ$aJ}Yk7})q6 Q{B&`7 j<q~b{4Sfd䀾ڎbE&8YA jUm5IyM%/!_N5LwDVN9} ܞhJO)@6ZXJa
zkN4 kġ|#9k~)ؖP6ֱ矀5#g*V,$`dGz')PO!82!L\=s32&suXYzTVZI5Ɇ8{X7 =ÄJm_20stQU?5+}E3Gb)eac3a[o]bc&ArhCcz|,+x2M2*i!넶03b"+ym=J~'e lAF +2SgW[" ތ϶0JpKS{[C'~Cb]_sKaSk>ڻb-9CWYg*6#J!0Ll|_lv͠?Zhᭃ/zN!'xVr]~A!~%꼉 nbX;6x&N\j_HTm~K5pc~xb)L
T~U
]Q??mdY'{ܼX_`KZѥmQ+GhlF^7+1qr'!tE?S4vǟ$0X69n0{-ŷ1=+Qۼ<
^sOR˻ӰHiPSCnPWJɯ[ht35 q7)u+lSIH Rg
=Ew?c3\v\@m!bYɹfhIKRybE2>m{ r[\pȸD@~`"EŴDRQǴħr/J
UF ug )RjbU|zД%C>h-|;Xt%bVmGtg5yJhYD.]y~`7=k#;W6^ֺ}O֣zn L_2X|E"&{VV俓"Z'3ZB<[_f4r$cRAʕ}Z
Д26F}$SIsQz2OT`(kAkƔksk%oedpv.RTjI[ YqWVn.x$FKw a0P)Jc^դ0tW02^0,"8Gq؟hgha_FaU;dLz
)ropyFrS}Fz] /\6,[I`Ky_ۑF``?EM[E5.UQˁSɼv7gѴ#"*\}݉]byq.a 6`}IL7 syz]!acaMEOZ8x}sh$Z4IP'@Fjqxl_BH[{ϘfFzhEYYJv}7®=yT7hS+{|*ڦ
w孴%g?+N3TR[(1)e%xsRj_<i?WTO8sZ
!!5VT9)tJ(`)Ƈ'EojC~>!XU͆?IraulO<9w+ID&DGb;/}ט^Ha
Vt7C뫬'QT5h+0I(_x%/ZWl9H=oוP}`5;?(85_j,oj DP&TƧ|d7.H29E!~o8]n&ޏ'@_Uil`7ƃDEF. Hk)amY!lx8ö玼Ru9^AyABv3vD¹ ᒾjRKB574+(x\fj~s#瞐fѣ
)-d[i-D -/3*#qx1< |